repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
mulonemartin/kaira | kaira/wrapper.py | Python | mit | 1,431 | 0.000699 |
__all__ = ['ContextManager', 'WrapWithContextManager']
class ContextManager(object):
name = 'ContextManager'
def on_start(self, request):
pass
def on_success(self, request):
pass
def on_failure(self, request):
pass
def wrap_call(self, func):
return func
class WrapWithContextManager(object):
def __init__(self, context=None, skip_list=None):
self.context = context
if skip_list is None:
skip_list = []
self.skip_list = skip_list
def __call__(self, f):
def wrap(fnc, ctm):
def g(*a, **b):
try:
ctm.on_star | t(a[0])
output = ctm.wrap_call(fnc)(*a, **b)
ctm.on_success(a[0])
return output
except:
ctm.on_failure(a[0])
raise
return g
def wrap_skip_context(fnc, ctm):
def g(*a, **b):
output = ctm.wrap_call(fnc)(*a, **b)
return output
return g
| if self.context:
for context in reversed(self.context):
if isinstance(context, ContextManager):
if context.name in self.skip_list:
f = wrap_skip_context(f, context)
else:
f = wrap(f, context)
return f
|
ityaptin/ceilometer | ceilometer/storage/sqlalchemy/migrate_repo/versions/006_counter_volume_is_float.py | Python | apache-2.0 | 888 | 0 | # -*- coding: utf-8 -*-
#
# Copyright 2013 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Float
from sqlalchemy import MetaData
from sqlalchemy import Table
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
meter = Table('meter', meta, | autoload=True)
meter.c.counter | _volume.alter(type=Float(53))
|
DarioGT/docker-carra | src/rqEirq/admin.py | Python | mit | 247 | 0.036437 | from django.c | ontrib import admin
# Register your models here.
from .models import Source
from .actions import doGraphMerveille
class MySource( admin.ModelAdmin ):
| actions = [ doGraphMerveille ]
admin.site.register( Source, MySource )
|
coreycb/horizon | openstack_dashboard/test/integration_tests/tests/test_security_groups.py | Python | apache-2.0 | 4,753 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.regions import messages
class TestSecuritygroup(helpers.TestCase):
SEC_GROUP_NAME = helpers.gen_random_resource_name("securitygroup")
RULE_PORT = str(random.randint(9000, 9999))
@property
def securitygroup_page(self):
return self.home_pg.\
go_to_compute_accessandsecurity_securitygroupspage()
def _create_securitygroup(self):
page = self.securitygroup_page
page.create_securitygroup(self.SEC_GROUP_NAME)
self.assertTrue(page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(page.is_securitygroup_present(self.SEC_GROUP_NAME))
def _delete_securitygroup(self):
page = self.securitygroup_page
page.delete_securitygroup(self.SEC_GROUP_NAME)
self.assertTrue(page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(page.is_securitygroup_present(self.SEC_GROUP_NAME))
def _add_rule(self):
page = self.securitygroup_page
page = page.go_to_manage_rules(self.SEC_GROUP_NAME)
page.create_rule(self.RULE_PORT)
self.assertTrue(page.find_message_and_dismiss(messages.SUCCESS))
self.assertTrue(page.is_port_present(self.RULE_PORT))
def _delete_rule_by_table_action(self):
page = self.securitygroup_page
page = page.go_to_manage_rules(self.SEC_GROUP_NAME)
page.delete_rules(self.RULE_PORT)
self.assertTrue(page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(page.is_port_present(self.RULE_PORT))
def _delete_rule_by_row_action(self):
page = self.securitygroup_page
page = page.go_to_manage_rules(self.SEC_GROUP_NAME)
page.delete_rule(self.RULE_PORT)
self.assertTrue(page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(page.is_port_present(self.RULE_PORT))
def test_securitygroup_create_delete(self):
"""tests the security group creation and deletion functionalities:
* creates a new security group
* verifies the security group appears in the security groups table
* deletes the newly created security group
* verifies the security group does not appear in the table after
deletion
"""
self._create_securitygroup()
self._delete_securitygroup()
def test_managerules_create_delete_by_row(self):
"""tests the manage rules creation and deletion functionalities:
* create a new security group
* verifies the security group appears in the security groups table
* creates a new rule
* verifies the rule appears in the rules table
* delete the newly created rule
* verifies the rule does not appear in the table after deletion
* deletes the newly created security group
* verifies the security group does not appear in the table after
deletion
"""
self._create_securitygroup()
self._add_rule()
self._delete_rule_by_row_action()
self._delete_securitygroup()
def test_managerules_create_delete_by_table(self):
"""tests the manage rules creation and deletion functionalities:
* create a new security group
* verifies the security group appears in the security groups table
* creates a new r | ule
* verifies the rule appears in the rules table
* delete the newly created rule
* verifies the rule does not appear in the table after deletion
* deletes the newly created security group
| * verifies the security group does not appear in the table after
deletion
"""
self._create_securitygroup()
self._add_rule()
self._delete_rule_by_table_action()
self._delete_securitygroup()
|
fstltna/PyImp | src/empPath.py | Python | gpl-2.0 | 20,956 | 0.031733 | # Copyright (C) 1998 Ulf Larsson
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import string
import empDb
import empSector
import math
move_directions = "ujnbgy"
move_reverse_directions ="bgyujn"
def norm_coords( coords, world_size ) :
""" normalize coordinates according to world size"""
x , y = coords
size_x, size_y = world_size
return ( ( x + size_x / 2 ) % size_x - size_x / 2,
( y + size_y / 2 ) % size_y - size_y / 2 )
def neighbours( coords ) :
""" neighbours ordered as directions """
world = empDb.megaDB['version']['worldsize']
x , y = coords
return [ norm_coords( ( x + 1, y - 1) , world ) ,
norm_coords( ( x + 2, y ) , world ) ,
norm_coords( ( x + 1, y + 1) , world ) ,
norm_coords( ( x - 1, y + 1) , world ) ,
norm_coords( ( x - 2, y ) , world ) ,
norm_coords( ( x - 1, y - 1) , world ) ]
def coords_to_str( coords ) :
x , y = coords
return `x` + ',' + `y`
## MobCost is used in the bestpath algorithm.
## Bestpath for navigating, marching and
## exploring needs their own version of
## this class
class MobCost:
""" Mobility cost using the move cmd """
def __init__( self ) :
pass
def cost( self, coords ) :
""" cost for moving into sector """
result = 2.0 * empSector.infinite_mob_cost
sect = empDb.megaDB['SECTOR'].get( coords , {} )
if sect and sect.get( 'owner' ) == empDb.CN_OWNED :
result = empSector.mob_cost( sect )
return result
class ExplMobCost:
""" Mobility cost using the expl cmd """
def __init__( self ) :
pass
def cost( self, coords ) :
""" cost for moving into sector """
result = 2.0 * empSector.infinite_mob_cost
sect = empDb.megaDB['SECTOR'].get( coords, {} )
if sect and ( sect.get( 'owner' ) == empDb.CN_OWNED
or empSector.is_explorable_into( sect ) ) :
result = empSector.mob_cost( sect )
return result
## Path is used in bestpath calculation to keep track of
## start point, end point , path string and path cost.
## These are public members right now but should be
## private.
class Path :
""" Empire path between sectors in a hex map """
def __init__( self, sect, mob_cost ) :
self.start = sect
self.end = sect
self.directions = ""
self.cost = mob_cost
def append( self, tail, dir ) :
""" concatinate two paths """
result = Path( self.start, self.cost + tail.cost )
result.directions = self.directions + dir + tail.directions
result.end = tail.end
return result
def post_extend( self, sect , mob_cost , dir ) :
""" add a step at the end of the path """
result = Path( self.start, self.cost + mob_cost )
result.directions = self.directions + dir
result.end = sect
return result
def pre_extend( self, sect , mob_cost , dir ) :
""" add a step at the beginning of the path """
result = Path( sect, self.cost + mob_cost )
result.directions = dir + self.directions
result.end = self.end;
return result
## Paths -- bestpath generator between sets of sectors.
##
##
## Paths has the following data members
## __mob : mobility cost object
## __visited : dictonary of sector we have calculated a path to
## __heads : list of paths starting at a source sector
## __tails : list of paths endinging at a destination sector
## __complete : list of paths starting at a source sector
## and ends at a destination sector.
##
## __heads, __tails and __complete are sorted wrt the path cost
##
## Paths has two main parts. One is to build | up paths
## and the second part deals with removing a source or
## destination sector.
##
## Building up paths is done by taking the best head ( or tail) path
## and crea | te new paths to the neighbours of the path's end point.
## If the neigbouring sector is *not* in __visited we add the new
## path, otherwise we try to create a __complete path. This ends
## when the total cost of the best head and tail path is higher
## then the best complete path.
##
## Removing source or destination sector is done by looping through
## __visited sectors and remove those origin from the removed sector.
## Same for the lists of paths.
##
##
class Paths :
""" Paths between two sets of sectors """
def __init__( self, from_sect, to_sect, mcost ):
self.__mob = mcost
self.__visited = {}
self.__starting_at = {}
self.__ending_at = {}
self.__heads = []
self.__tails = []
self.__complete = []
for sect in from_sect:
path = Path( sect, 0.0 )
self.__visited[ path.start ] = ( path , 1 )
self.__starting_at[ path.start ] = [ path ]
self.__insert_path( self.__heads , path )
for sect in to_sect :
path = Path( sect, self.__mob.cost( sect ) )
self.__visited[ path.end ] = ( path , 0 )
self.__ending_at[ path.end ] = [ path ]
self.__insert_path( self.__tails , path )
self.__make_paths()
def empty( self ) :
""" no path exits """
return ( len( self.__complete ) == 0
or self.__complete[ 0 ].cost >= empSector.infinite_mob_cost )
def best( self ) :
""" the best path ( lowest cost ) between any two sectors """
return self.__complete[ 0 ]
def __found_best_path( self ) :
""" have we found the best path """
done_search = not self.__heads or not self.__tails
if not done_search :
best_so_far = empSector.infinite_mob_cost
if self.__complete :
best_so_far = self.__complete[ 0 ].cost
best_possible = self.__heads[ 0 ].cost + self.__tails[ 0 ].cost
done_search = best_possible > best_so_far
return done_search
def __insert_path( self, path_list, path ) :
""" insert path in a sorted list """
index = 0
for elem in path_list :
if path.cost <= elem.cost :
break
else :
index = index + 1;
path_list.insert( index, path )
def __make_paths( self ):
""" expand tail and head paths """
expand_heads = not 0
while not self.__found_best_path():
if expand_heads:
self.__expand_heads()
else :
self.__expand_tails()
expand_heads = not expand_heads
def __expand_heads( self ) :
""" expand best head path """
path = self.__heads[ 0 ];
# print "expand head path " + path_str( path )
del self.__heads[ 0 ]
i = 0
for sect in neighbours( path.end ) :
dir = move_directions[ i ]
if not self.__visited.has_key( sect ) :
new_path = path.post_extend( sect ,
self.__mob.cost( sect ),
dir )
self.__insert_path( self.__heads, new_path )
self.__visited[ sect ] = ( new_path, 1 )
self.__starting_at[ path.start ].append( new_path )
else :
tail, is_head_path = self.__visited[ sect ]
if not is_head_path :
|
common-workflow-language/cwltool | cwltool/executors.py | Python | apache-2.0 | 18,408 | 0.000978 | """Single and multi-threaded executors."""
import datetime
import functools
import logging
import math
import os
import threading
from abc import ABCMeta, abstractmethod
from threading import Lock
from typing import (
Dict,
Iterable,
List,
MutableSequence,
Optional,
Set,
Tuple,
Union,
cast,
)
import psutil
from schema_salad.exceptions import ValidationException
from schema_salad.sourceline import SourceLine
from .command_line_tool import CallbackJob, ExpressionJob
from .context import RuntimeContext, getdefault
from .errors import WorkflowException
from .job import JobBase
from .loghandler import _logger
from .mutation import MutationManager
from .process import Process, cleanIntermediate, relocateOutputs
from .provenance_profile import ProvenanceProfile
from .task_queue import TaskQueue
from .update import ORIGINAL_CWLVERSION
from .utils import CWLObjectType, JobsType
from .workflow import Workflow
from .workflow_job import WorkflowJob, WorkflowJobStep
TMPDIR_LOCK = Lock()
class JobExecutor(metaclass=ABCMeta):
"""Abstract base job executor."""
def __init__(self) -> None:
"""Initialize."""
self.final_output = [] # type: MutableSequence[Optional[CWLObjectType]]
self.final_status = [] # type: List[str]
self.output_dirs = set() # type: Set[str]
def __call__(
self,
process: Process,
job_order_object: CWLObjectType,
runtime_context: RuntimeContext,
logger: logging.Logger = _logger,
) -> Tuple[Optional[CWLObjectType], str]:
return self.execute(process, job_order_object, runtime_context, logger)
def output_callback(
self, out: Optional[CWLObjectType], process_status: str
) -> None:
"""Collect the final status and outputs."""
self.final_status.append(process_status)
self.final_output.append(out)
@abstractmethod
def run_jobs(
self,
process: Process,
job_order_object: CWLObjectType,
logger: logging.Logger,
runtime_context: RuntimeContext,
) -> None:
"""Execute the jobs for the given Process."""
def execute(
self,
process: Process,
job_order_object: CWLObjectType,
runtime_context: RuntimeContext,
logger: logging.Logger = _logger,
) -> Tuple[Union[Optional[CWLObjectType]], str]:
"""Execute the process."""
if not runtime_context.basedir:
raise WorkflowException("Must provide 'basedir' in runtimeContext")
def check_for_abstract_op(tool: CWLObjectType) -> None:
if tool["class"] == "Operation":
raise SourceLine(
tool, "class", WorkflowException, runtime_context.debug
).makeError("Workflow has unrunnable abstract Operation")
process.visit(check_for_abstract_op)
finaloutdir = None # Type: Optional[str]
original_outdir = runtime_context.outdir
if isinstance(original_outdir, str):
finaloutdir = os.path.abspath(original_outdir)
runtime_context = runtime_context.copy()
outdir = runtime_context.create_outdir()
self.output_dirs.add(outdir)
runtime_context.outdir = outdir
runtime_context.mutation_manager = MutationManager()
runtime_context.toplevel = True
runtime_context.workflow_eval_lock = threading.Condition(threading.RLock())
job_reqs = None # type: Optional[List[CWLObjectType]]
if "https://w3id.org/cwl/cwl#requirements" in job_order_object:
if process.metadata.get(ORIGINAL_CWLVERSION) == "v1.0":
raise WorkflowException(
"`cwl:requirements` in the input object is not part of CWL "
"v1.0. You can adjust to use `cwltool:overrides` instead; or you "
"can set the cwlVersion to v1.1"
)
job_reqs = cast(
List[CWLObjectType],
job_order_object["https://w3id.org/cwl/cwl#requirements"],
)
elif (
"cwl:defaults" in process.metadata
and "https://w3id.org/cwl/cwl#requirements"
in cast(CWLObjectType, process.metadata["cwl:defaults"])
):
if process.metadata.get(ORIGINAL_CWLVERSION) == "v1.0":
raise WorkflowException(
"`cwl:requirements` in the input object is not part of CWL "
"v1.0. You can adjust to use `cwltool:overrides` instead; or you "
"can set the cwlVersion to v1.1"
)
job_reqs = cast(
Optional[List[CWLObjectType]],
cast(CWLObjectType, process.metadata["cwl:defaults"])[
"https://w3id.org/cwl/cwl#requirements"
],
)
if job_reqs is not None:
for req in job_reqs:
process.requir | ements.append(req)
self.run_jobs(process, job_order_object, logger, runtime_context)
if (
self.final_output
and self.final_output[0] is not | None
and finaloutdir is not None
):
self.final_output[0] = relocateOutputs(
self.final_output[0],
finaloutdir,
self.output_dirs,
runtime_context.move_outputs,
runtime_context.make_fs_access(""),
getdefault(runtime_context.compute_checksum, True),
path_mapper=runtime_context.path_mapper,
)
if runtime_context.rm_tmpdir:
if not runtime_context.cachedir:
output_dirs = self.output_dirs # type: Iterable[str]
else:
output_dirs = filter(
lambda x: not x.startswith(runtime_context.cachedir), # type: ignore
self.output_dirs,
)
cleanIntermediate(output_dirs)
if self.final_output and self.final_status:
if (
runtime_context.research_obj is not None
and isinstance(
process, (JobBase, Process, WorkflowJobStep, WorkflowJob)
)
and process.parent_wf
):
process_run_id = None # type: Optional[str]
name = "primary"
process.parent_wf.generate_output_prov(
self.final_output[0], process_run_id, name
)
process.parent_wf.document.wasEndedBy(
process.parent_wf.workflow_run_uri,
None,
process.parent_wf.engine_uuid,
datetime.datetime.now(),
)
process.parent_wf.finalize_prov_profile(name=None)
return (self.final_output[0], self.final_status[0])
return (None, "permanentFail")
class SingleJobExecutor(JobExecutor):
"""Default single-threaded CWL reference executor."""
def run_jobs(
self,
process: Process,
job_order_object: CWLObjectType,
logger: logging.Logger,
runtime_context: RuntimeContext,
) -> None:
process_run_id = None # type: Optional[str]
# define provenance profile for single commandline tool
if (
not isinstance(process, Workflow)
and runtime_context.research_obj is not None
):
process.provenance_object = ProvenanceProfile(
runtime_context.research_obj,
full_name=runtime_context.cwl_full_name,
host_provenance=False,
user_provenance=False,
orcid=runtime_context.orcid,
# single tool execution, so RO UUID = wf UUID = tool UUID
run_uuid=runtime_context.research_obj.ro_uuid,
fsaccess=runtime_context.make_fs_access(""),
)
process.parent_wf = process.provenance_object
jobiter = process.job(job_order_object, self.output_callback, runtime_context)
try:
for job in jo |
weqopy/blog_instance | app/api_1_0/comments.py | Python | mit | 2,341 | 0 | from flask import jsonify, request, g, url_for, current_app
from .. import db
from ..models import Post, Permission, Comment
from . import api
from .decorators import permission_required
@api.route('/comments/')
def get_comments():
page = request.args.get('page', 1, type=int)
pagination = Comment.query.order_by(Comment.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASK_COMMENTS_PER_PAGE'],
error_out=False)
comments = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_comments', page=page - 1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_comments', page=page + 1, _external=True)
return jsonify({
'comments': [comment.to_json() for comment in comments],
'prev': prev,
'next': next,
'count': pagination.total
})
@api.route('/comments/<int:id>')
def get_comment(id):
comment = Comment.query.get_or_404(id)
return jsonify(comment.to_json())
@api.route('/posts/<int:id>/comments/')
def get_post_comments(id):
post = Post.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
pagination = post.comments.order_by(Comment.timestamp.asc()).paginate(
page, per_page=current_app.config['FLASK_COMMENTS_PER_PAGE'],
error_out=False)
comments = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_post_comments', id=id, page=page - 1,
_external=True)
next = None
if pagination.has_next:
next = url_for('api.get_post_comments', id=id, page=page + 1,
_external=True)
return jsonify({
'comments': [comment.to_json() for comment in comments],
'prev': prev,
'next': next,
'count': pagination.total
})
@api.route('/posts/<int:id>/comments/', methods=['POST'])
@permission_required(Permission.COMMENT)
def new_post_comment(id):
post = P | ost.query.get_or_404(id)
comment = Comment.from_json(request.json)
comment.author = g.current_user
comment.post = post
db.session.add(comment)
db.session.commit()
return j | sonify(comment.to_json()), 201, \
{'Location': url_for('api.get_comment', id=comment.id,
_external=True)}
|
pbanaszkiewicz/amy | amy/workshops/migrations/0239_organization_affiliated_organizations.py | Python | mit | 441 | 0.002268 | # Generated by Django 2.2.18 on 2021-03-27 18:32
from django.db import migrations, models |
class Migration(migrations.Migration):
dependencies = [
('workshops', '0238_task_seat_public'),
]
operations = [
migrations.AddField(
model_name='organization',
name='affiliated_organizations',
field=models.ManyToManyField(to='workshops.Organization', blank=True),
) | ,
]
|
XiaosongWei/chromium-crosswalk | tools/telemetry/telemetry/internal/results/story_run_unittest.py | Python | bsd-3-clause | 2,653 | 0.003769 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.internal.results import story_run
from telemetry.story import shared_state
from telemetry.story import story_set
from telemetry import story as story_module
from telemetry.value import failure
from telemetry.value import improvement_direction
from telemetry.value import scalar
from telemetry.value import skip
# pylint: disable=abstract-method
class SharedStateBar(shared_state.SharedState):
pass
class StoryFoo(story_module.Story):
def __init__(self, name='', labels=None):
super(StoryFoo, self).__init__(
SharedStateBar, name, labels)
class StoryRunTest(unittest.TestCase):
def setUp(self):
self.story_set = story_set.StorySet()
self.story_set.AddStory(StoryFoo())
@property
def stories(self):
return self.story_set.stories
def testStoryRunFailed(self):
run = story_run.StoryRun(self.stories[0])
run.AddValue(failure.FailureValue.FromMessage(self.stories[0], 'test'))
self.assertFalse(run.ok)
self.assertTrue(run.failed)
self.assertFalse(run.skipped)
run = story_run.StoryRun(self.stories[0])
run.AddValue(scalar.ScalarValue(
self.stories[0], 'a', 's', 1,
improvement_direction=improvement_direction.UP))
run.AddValue(failure.FailureValue.FromMessage(self.stories[0], 'test'))
self.assertFalse(run.ok)
self.assertTrue(run.failed)
self.assertFalse(run.skipped)
def testStoryRunSkipped(self):
run = story | _run.StoryRun(self.stories[0])
run.AddValue(failure.FailureValue.FromMessage(self.stories[0], 'test'))
run.AddValue(skip.SkipValue(self.stories[0], 'test'))
self.assertFalse(run.ok)
self.assertFalse(run.failed)
self.assertTrue(run.skipped)
run = story_run.StoryRun(self.stories[0])
run.AddValue(scalar.ScalarValue(
self.stories[0], 'a', 's', 1,
improvement_direction=improvement_direction.UP))
run.AddValue(skip.SkipValue(self.stories[0], 'test'))
| self.assertFalse(run.ok)
self.assertFalse(run.failed)
self.assertTrue(run.skipped)
def testStoryRunSucceeded(self):
run = story_run.StoryRun(self.stories[0])
self.assertTrue(run.ok)
self.assertFalse(run.failed)
self.assertFalse(run.skipped)
run = story_run.StoryRun(self.stories[0])
run.AddValue(scalar.ScalarValue(
self.stories[0], 'a', 's', 1,
improvement_direction=improvement_direction.UP))
self.assertTrue(run.ok)
self.assertFalse(run.failed)
self.assertFalse(run.skipped)
|
khchine5/lino | lino/modlib/users/desktop.py | Python | bsd-2-clause | 4,228 | 0.006623 | # -*- coding: UTF-8 -*-
# Copyright 2011-2017 Luc Saffre
# License: BSD (see file COPYING for details)
"""Desktop UI for this plugin.
Documentation is in :doc:`/specs/users` and :doc:`/dev/users`
"""
from django.conf import settings
from lino.api import dd, rt, _
from lino.core import actions
from lino.core.roles import SiteAdmin, SiteUser
from .choicelists import UserTypes
from .actions import SendWelcomeMail, SignIn, SignInWithSocialAuth
class UserDetail(dd.DetailLayout):
box1 = """
username user_type:20 partner
first_name last_name initials
email language time_zone
id created modified
"""
main = """
box1 #MembershipsByUser:20
remarks:40 AuthoritiesGiven:20 SocialAuthsByUser:30
"""
main_m = """
username
user_type
partner
first_name last_name
initials
email language time_zone
id created modified
remarks
AuthoritiesGiven
"""
class UserInsertLayout(dd.InsertLayout):
window_size = (60, 'auto')
main = """
username email
first_name last_name
partner
language user_type
"""
class Users(dd.Table):
#~ debug_actions = True
model = 'users.User'
#~ order_by = "last_name first_name".split()
order_by = ["username"]
active_fields = 'partner'
parameters = dict(
user_type=UserTypes.field(blank=True))
simple_parameters = ['user_type']
#~ column_names = 'username first_name last_name is_active is_staff is_expert is_superuser *'
column_names = 'username user_type first_name last_name *'
detail_layout = 'users.UserDetail'
insert_l | ayout = UserInsertLayout()
column_names_m = 'mobile_item *' |
@classmethod
def render_list_item(cls, obj, ar):
return "<p>{}</p>".format(obj.username)
#~ @classmethod
#~ def get_row_permission(cls,action,user,obj):
#~ """
#~ Only system managers may edit other users.
#~ See also :meth:`User.disabled_fields`.
#~ """
#~ if not super(Users,cls).get_row_permission(action,user,obj):
#~ return False
#~ if user.level >= UserLevel.manager: return True
#~ if action.readonly: return True
#~ if user is not None and user == obj: return True
#~ return False
class AllUsers(Users):
required_roles = dd.login_required(SiteAdmin)
send_welcome_email = SendWelcomeMail()
class UsersOverview(Users):
required_roles = set([])
column_names = 'username user_type language'
exclude = dict(user_type='')
sign_in = SignIn()
# if settings.SITE.social_auth_backends is None:
# sign_in = SignIn()
# else:
# sign_in = SignInWithSocialAuth()
class MySettings(Users):
# use_as_default_table = False
# hide_top_toolbar = True
required_roles = dd.login_required()
default_list_action_name = 'detail'
# detail_layout = 'users.UserDetail'
@classmethod
def get_default_action(cls):
return actions.ShowDetail(cls.detail_layout, hide_navigator=True)
class Authorities(dd.Table):
required_roles = dd.login_required(SiteAdmin)
model = 'users.Authority'
class AuthoritiesGiven(Authorities):
required_roles = dd.login_required()
master_key = 'user'
label = _("Authorities given")
column_names = 'authorized'
auto_fit_column_widths = True
class AuthoritiesTaken(Authorities):
required_roles = dd.login_required()
master_key = 'authorized'
label = _("Authorities taken")
column_names = 'user'
auto_fit_column_widths = True
if settings.SITE.social_auth_backends:
try:
import social_django
except ImportError:
raise Exception(
"Sites with social_auth_backends must also install PSA "
"into their environment: "
"$ pip install social-auth-app-django")
class SocialAuths(dd.Table):
label = _("Third-party authorizations")
required_roles = dd.login_required(SiteAdmin)
model = 'social_django.UserSocialAuth'
class SocialAuthsByUser(SocialAuths):
required_roles = dd.login_required(SiteUser)
master_key = 'user'
else:
class SocialAuthsByUser(dd.Dummy):
pass
|
jocelynmass/nrf51 | toolchain/arm_cm0/arm-none-eabi/lib/thumb/v7-ar/fpv3/hard/libstdc++.a-gdb.py | Python | gpl-2.0 | 2,493 | 0.006418 | # -*- python -*-
# Copyright (C) 2009-2017 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/Users/build/work/GCC-7-build/install-native/share/gcc-arm-none-eabi'
libdir = '/Users/build/work/GCC-7-build/install-native/arm-none-eabi/lib/thumb/v7-ar/fpv3/hard'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a | directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (pr | efix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Call a function as a plain import would not execute body of the included file
# on repeated reloads of this object file.
from libstdcxx.v6 import register_libstdcxx_printers
register_libstdcxx_printers(gdb.current_objfile())
|
veltzer/demos-python | src/examples/short/environment_variables/simple.py | Python | gpl-3.0 | 458 | 0.002183 | #!/usr/ | bin/env python
"""
A simple example of how to get or set environment variables from python
"""
import os
print(os.environ['USER'])
if 'HOSTNAME' in os.environ:
print(os.environ['HOSTNAME'])
else:
print(
'you dont have a HOSTNAME in your environment, it is probably just a shell variable')
# lets delete an environment variable
del os.environ['USER']
assert 'USER' not in os.environ
for k, v in os.environ.items():
print | (k, v)
|
Fantomas42/django-emoticons | emoticons/tests/settings.py | Python | bsd-3-clause | 392 | 0 | """Settings for testing em | oticons"""
DATABASES = {
'default': {'NAME': 'emoticons.db',
'ENGINE': 'django.db.backends.sqlite3'}
}
TEMP | LATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}
]
INSTALLED_APPS = [
'emoticons',
'django.contrib.staticfiles'
]
SECRET_KEY = 'secret-key'
STATIC_URL = '/'
|
alxgu/ansible | lib/ansible/modules/network/netvisor/pn_access_list_ip.py | Python | gpl-3.0 | 4,598 | 0.00087 | #!/usr/bin/python
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_access_list_ip
author: "Pluribus Networks (@rajaspachipulusu17)"
version_added: "2.8"
short_description: CLI command to add/remove access-list-ip
description:
- This modules can be used to add and remove IPs associated with access list.
options:
pn_cliswitch:
description:
- Target switch to run the CLI on.
required: False
type: str
state:
description:
- State the action to perform. Use 'present' to add access-list-ip and
'absent' to remove acce | ss-list-ip.
required: True
choices: ["present", "absent"]
pn_ip:
description:
- IP associated with the access list.
required: False
default: '::'
type: str
pn_name:
| description:
- Access List Name.
required: False
type: str
"""
EXAMPLES = """
- name: access list ip functionality
pn_access_list_ip:
pn_cliswitch: "sw01"
pn_name: "foo"
pn_ip: "172.16.3.1"
state: "present"
- name: access list ip functionality
pn_access_list_ip:
pn_cliswitch: "sw01"
pn_name: "foo"
pn_ip: "172.16.3.1"
state: "absent"
"""
RETURN = """
command:
description: the CLI command run on the target node.
returned: always
type: str
stdout:
description: set of responses from the access-list-ip command.
returned: always
type: list
stderr:
description: set of error responses from the access-list-ip command.
returned: on error
type: list
changed:
description: indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli
from ansible.module_utils.network.netvisor.netvisor import run_commands
def check_cli(module, cli):
"""
This method checks for idempotency using the access-list-ip-show command.
If ip exists, return True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
"""
name = module.params['pn_name']
ip = module.params['pn_ip']
clicopy = cli
cli += ' access-list-show name %s no-show-headers ' % name
out = run_commands(module, cli)[1]
if name not in out:
module.fail_json(
failed=True,
msg='access-list with name %s does not exist' % name
)
cli = clicopy
cli += ' access-list-ip-show name %s format ip no-show-headers' % name
out = run_commands(module, cli)[1]
out = out.split()
return True if ip in out else False
def main():
""" This section is for arguments parsing """
state_map = dict(
present='access-list-ip-add',
absent='access-list-ip-remove',
)
module = AnsibleModule(
argument_spec=dict(
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=True, type='str',
choices=state_map.keys()),
pn_ip=dict(required=False, type='str', default='::'),
pn_name=dict(required=False, type='str'),
),
required_if=(
["state", "present", ["pn_name"]],
["state", "absent", ["pn_name", "pn_ip"]],
),
)
# Accessing the arguments
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
ip = module.params['pn_ip']
name = module.params['pn_name']
command = state_map[state]
# Building the CLI command string
cli = pn_cli(module, cliswitch)
IP_EXISTS = check_cli(module, cli)
cli += ' %s name %s ' % (command, name)
if command == 'access-list-ip-remove':
if IP_EXISTS is False:
module.exit_json(
skipped=True,
msg='access-list with ip %s does not exist' % ip
)
if ip:
cli += ' ip ' + ip
else:
if command == 'access-list-ip-add':
if IP_EXISTS is True:
module.exit_json(
skipped=True,
msg='access list with ip %s already exists' % ip
)
if ip:
cli += ' ip ' + ip
run_cli(module, cli, state_map)
if __name__ == '__main__':
main()
|
mschlaipfer/z3 | scripts/mk_mem_initializer_cpp.py | Python | mit | 1,138 | 0.002636 | #!/usr/bin/env python
"""
Scans the source directories for
memory initializers and finalizers and
emits and implementation of
``void mem_initialize()`` and
``void mem_finalize()`` into ``mem_initializer.cpp``
in the destination directory.
"""
import mk_genfile_common
import argparse
import logging
import os
import sys
def main(args):
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("destination_dir" | , help="destination direct | ory")
parser.add_argument("source_dirs", nargs="+",
help="One or more source directories to search")
pargs = parser.parse_args(args)
if not mk_genfile_common.check_dir_exists(pargs.destination_dir):
return 1
for source_dir in pargs.source_dirs:
if not mk_genfile_common.check_dir_exists(source_dir):
return 1
output = mk_genfile_common.mk_mem_initializer_cpp_internal(
pargs.source_dirs,
pargs.destination_dir
)
logging.info('Generated "{}"'.format(output))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
hakancelik96/coogger | coogger/utils.py | Python | mit | 208 | 0 | f | rom django.shortcuts import render
from django.urls import resolve
def just_redirect_by_name(request):
url_name = resolve(request.path_info).url_name
return render(request, f"{url_name}.html", {}) | |
Alshain-Oy/Cloudsnake-Application-Server | clients/client_analytics_clear.py | Python | apache-2.0 | 451 | 0.019956 |
#!/usr/bin/env python
# Cloudsnake Application server
# Licensed under Apache Licen | se, see license.txt
# Author: Markus Gronholm <markus@alshain.fi> Alshain Oy
import libCloudSnakeClient as SnakeClient
import pprint, sys, time
client = SnakeClient.CloudSnakeClient( 'http://localhost:8500', sys.argv[ 1 ] )
mapped_object = SnakeClient.CloudSnakeMapper( client )
#pprint.p | print( mapped_object.dump_analytics() )
mapped_object.clear_analytics()
|
jorisvandenbossche/numpy | numpy/doc/constants.py | Python | bsd-3-clause | 9,291 | 0.001938 | # -*- coding: utf-8 -*-
"""
=========
Constants
=========
.. currentmodule:: numpy
NumPy includes several constants:
%(constant_list)s
"""
#
# Note: the docstring is autogenerated.
#
from __future__ import division, absolute_import, print_function
import textwrap, re
# Maintain same format as in numpy.add_newdocs
constants = []
def add_newdoc(module, name, doc):
constants.append((name, doc))
add_newdoc('numpy', 'pi',
"""
``pi = 3.1415926535897932384626433...``
References
----------
https://en.wikipedia.org/wiki/Pi
""")
add_newdoc('numpy', 'e',
"""
Euler's constant, base of natural logarithms, Napier's constant.
``e = 2.71828182845904523536028747135266249775724709369995...``
See Also
--------
exp : Exponential function
log : Natural logarithm
References
----------
https://en.wikipedia.org/wiki/E_%28mathematical_constant%29
""")
add_newdoc('numpy', 'euler_gamma',
"""
``γ = 0.5772156649015328606065120900824024310421...``
References
----------
https://en.wikipedia.org/wiki/Euler-Mascheroni_constant
""")
add_newdoc('numpy', 'inf',
"""
IEEE 754 floating point representation of (positive) infinity.
Returns
-------
y : float
A floating point representation of positive infinity.
See Also
--------
isinf : Shows which elements are positive or negative infinity
isposinf : Shows which elements are positive infinity
isneginf : Shows which elements are negative infinity
isnan : Shows which elements are Not a Number
isfinite : Shows which el | ements are finite (not one of Not a Number,
positive infinity and negative infinity)
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive in | finity.
`Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`.
Examples
--------
>>> np.inf
inf
>>> np.array([1]) / 0.
array([ Inf])
""")
add_newdoc('numpy', 'nan',
"""
IEEE 754 floating point representation of Not a Number (NaN).
Returns
-------
y : A floating point representation of Not a Number.
See Also
--------
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite (not one of
Not a Number, positive infinity and negative infinity)
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
`NaN` and `NAN` are aliases of `nan`.
Examples
--------
>>> np.nan
nan
>>> np.log(-1)
nan
>>> np.log([-1, 1, 2])
array([ NaN, 0. , 0.69314718])
""")
add_newdoc('numpy', 'newaxis',
"""
A convenient alias for None, useful for indexing arrays.
See Also
--------
`numpy.doc.indexing`
Examples
--------
>>> newaxis is None
True
>>> x = np.arange(3)
>>> x
array([0, 1, 2])
>>> x[:, newaxis]
array([[0],
[1],
[2]])
>>> x[:, newaxis, newaxis]
array([[[0]],
[[1]],
[[2]]])
>>> x[:, newaxis] * x
array([[0, 0, 0],
[0, 1, 2],
[0, 2, 4]])
Outer product, same as ``outer(x, y)``:
>>> y = np.arange(3, 6)
>>> x[:, newaxis] * y
array([[ 0, 0, 0],
[ 3, 4, 5],
[ 6, 8, 10]])
``x[newaxis, :]`` is equivalent to ``x[newaxis]`` and ``x[None]``:
>>> x[newaxis, :].shape
(1, 3)
>>> x[newaxis].shape
(1, 3)
>>> x[None].shape
(1, 3)
>>> x[:, newaxis].shape
(3, 1)
""")
add_newdoc('numpy', 'NZERO',
"""
IEEE 754 floating point representation of negative zero.
Returns
-------
y : float
A floating point representation of negative zero.
See Also
--------
PZERO : Defines positive zero.
isinf : Shows which elements are positive or negative infinity.
isposinf : Shows which elements are positive infinity.
isneginf : Shows which elements are negative infinity.
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite - not one of
Not a Number, positive infinity and negative infinity.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). Negative zero is considered to be a finite number.
Examples
--------
>>> np.NZERO
-0.0
>>> np.PZERO
0.0
>>> np.isfinite([np.NZERO])
array([ True])
>>> np.isnan([np.NZERO])
array([False])
>>> np.isinf([np.NZERO])
array([False])
""")
add_newdoc('numpy', 'PZERO',
"""
IEEE 754 floating point representation of positive zero.
Returns
-------
y : float
A floating point representation of positive zero.
See Also
--------
NZERO : Defines negative zero.
isinf : Shows which elements are positive or negative infinity.
isposinf : Shows which elements are positive infinity.
isneginf : Shows which elements are negative infinity.
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite - not one of
Not a Number, positive infinity and negative infinity.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). Positive zero is considered to be a finite number.
Examples
--------
>>> np.PZERO
0.0
>>> np.NZERO
-0.0
>>> np.isfinite([np.PZERO])
array([ True])
>>> np.isnan([np.PZERO])
array([False])
>>> np.isinf([np.PZERO])
array([False])
""")
add_newdoc('numpy', 'NAN',
"""
IEEE 754 floating point representation of Not a Number (NaN).
`NaN` and `NAN` are equivalent definitions of `nan`. Please use
`nan` instead of `NAN`.
See Also
--------
nan
""")
add_newdoc('numpy', 'NaN',
"""
IEEE 754 floating point representation of Not a Number (NaN).
`NaN` and `NAN` are equivalent definitions of `nan`. Please use
`nan` instead of `NaN`.
See Also
--------
nan
""")
add_newdoc('numpy', 'NINF',
"""
IEEE 754 floating point representation of negative infinity.
Returns
-------
y : float
A floating point representation of negative infinity.
See Also
--------
isinf : Shows which elements are positive or negative infinity
isposinf : Shows which elements are positive infinity
isneginf : Shows which elements are negative infinity
isnan : Shows which elements are Not a Number
isfinite : Shows which elements are finite (not one of Not a Number,
positive infinity and negative infinity)
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
Examples
--------
>>> np.NINF
-inf
>>> np.log(0)
-inf
""")
add_newdoc('numpy', 'PINF',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'infty',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'Inf',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'Infinity',
"""
IEEE 754 float |
jtyuan/racetrack | tests/configs/tgen-dram-ctrl.py | Python | bsd-3-clause | 3,365 | 0.005944 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT | OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
import m5
from m5.objects import *
# both traffic generator and communication monitor are only available
# if we have p | rotobuf support, so potentially skip this test
require_sim_object("TrafficGen")
require_sim_object("CommMonitor")
# even if this is only a traffic generator, call it cpu to make sure
# the scripts are happy
cpu = TrafficGen(config_file = "tests/quick/se/70.tgen/tgen-dram-ctrl.cfg")
# system simulated
system = System(cpu = cpu, physmem = DDR3_1600_x64(),
membus = NoncoherentBus(width = 16),
clk_domain = SrcClockDomain(clock = '1GHz',
voltage_domain =
VoltageDomain()))
# add a communication monitor
system.monitor = CommMonitor()
# connect the traffic generator to the bus via a communication monitor
system.cpu.port = system.monitor.slave
system.monitor.master = system.membus.slave
# connect the system port even if it is not used in this example
system.system_port = system.membus.slave
# connect memory to the membus
system.physmem.port = system.membus.master
# -----------------------
# run simulation
# -----------------------
root = Root(full_system = False, system = system)
root.system.mem_mode = 'timing'
|
max1k/cbs | p311/migrations/0004_auto_20150325_1306.py | Python | gpl-2.0 | 781 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('p311', '0003_auto_20150323_1217'),
]
operations = [
migrations.RenameField(
model_name='commoninfo',
| old_name='date',
new_name='mod_date',
),
migrations.AddField(
model_name='commoninfo',
name='doc_date',
field=models.DateField(null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='result',
name='description',
field=models.CharField(null=True, max_l | ength=250),
preserve_default=True,
),
]
|
kaslusimoes/SummerSchool2016 | data/Multiple Variation/Scale Free/heatmap.py | Python | apache-2.0 | 712 | 0.030899 | from pickle import load
import os
import matplotlib.pyplot as plt
import numpy as np
class Data:
def __init__(self):
self.m_list1 = []
self.m_l | ist2 = []
p = './data/'
m1 = []
m2 = []
for _,_,c in os.walk(p):
for x in c:
print('\n',x,'\n')
f = open(os.path.join(p,x), 'r')
d = load(f)
lm = [x for _,_,_,_,x in d.m_list1]
hm = np.sum(lm,0)/10.
m1.append(hm)
lm = [x for _,_,_,_,x in d.m_list2]
hm = np.sum(lm,0)/10.
m2.append(hm)
sm1 = np.mean(m1,0)
sm2 = np.mean(m2,0)
plt.imshow(sm1, extent=[1, 2, -1, 0], aspect="auto", origin="lower")
plt.colorbar | ()
plt.show()
plt.imshow(sm2, extent=[1, 2, -1, 0], aspect="auto", origin="lower")
plt.colorbar()
plt.show()
|
neilb14/featurewise | scripts/generate_usage.py | Python | mit | 2,293 | 0.027911 | import simpy
import datetime
import random
from datetime import timedelta
end_of_time = 60*60*24*365
def working_hours(current_time):
return current_time.hour < 17 and current_time.hour > 8
def linear_increase(now, min_duration, max_duration):
time_left = end_of_time - now
return min_duration + max_duration * time_left / end_of_time
def linear_decrease(now, min_duration, max_duration):
return min_duration + max_duration * now / end_of | _time
random_dura | tion = lambda x,min_duration,max_duration : random.gauss((max_duration-min_duration)/2, (max_duration-min_duration)/(6))
increasing_duration = lambda x,min_duration,max_duration : linear_increase(x,min_duration,max_duration)
decreasing_duration = lambda x,min_duration,max_duration : linear_decrease(x,min_duration,max_duration)
start = datetime.datetime.now() - timedelta(seconds=end_of_time)
end_time = datetime.datetime.now()
events = []
def user(env, name, duration, min_duration, max_duration):
yield env.timeout(30)
current_time = start
while current_time < end_time:
current_time = start + timedelta(seconds=env.now)
if(working_hours(current_time)):
events.append({'name':name,'at':current_time, 'type':'tick'})
duration_in_s = duration(env.now, min_duration, max_duration)
while(duration_in_s < 0):
duration_in_s = duration(env.now, min_duration, max_duration)
yield env.timeout(duration_in_s)
else:
yield env.timeout(15*60)
env = simpy.Environment()
env.process(user(env, 'rhino', decreasing_duration, 60*10, 60*60*2))
env.process(user(env, 'cheetah', increasing_duration, 60*2, 60*45))
env.process(user(env, 'moose', random_duration, 50, 6000))
env.process(user(env, 'lion', increasing_duration, 60*5, 60*60*1))
env.process(user(env, 'mouse', decreasing_duration, 60*15, 60*60*2))
env.process(user(env, 'hippo', increasing_duration, 60*60*1, 60*60*6))
env.process(user(env, 'giraffe', random_duration, 1000,10000))
env.run()
str = ","
fieldNames = ['feature', 'type','at']
with open('data.csv', 'w') as f:
f.write(str.join(fieldNames) + "\n")
for row in events:
fields = []
fields.append(row['name'])
fields.append(row['type'])
fields.append(row['at'].strftime("%Y-%m-%dT%H:%M:%S"))
f.write(str.join(fields) + "\n")
print("Data has been written to: data.csv") |
vicky2135/lucious | src/oscar/templatetags/basket_tags.py | Python | bsd-3-clause | 829 | 0 | from django import template
from oscar.core.compat import assignment_tag
from oscar.core.loa | ding import get_class, get_model
AddToBasketForm = get_class('basket.forms', 'AddToBasketForm')
SimpleAddToBasketForm = get_class('basket.forms', 'SimpleAddToBasketForm')
Product = get_model('catalogue', 'product')
register = template.Library()
QNT_SINGLE, QNT_MULTIPLE = 'single', 'multiple'
@assignment_tag(register)
def basket_form(request, product, quantity_ | type='single'):
if not isinstance(product, Product):
return ''
initial = {}
if not product.is_parent:
initial['product_id'] = product.id
form_class = AddToBasketForm
if quantity_type == QNT_SINGLE:
form_class = SimpleAddToBasketForm
form = form_class(request.basket, product=product, initial=initial)
return form
|
zhantyzgz/polaris | plugins/lastfm.py | Python | gpl-2.0 | 2,882 | 0.002434 | # Made by zhantyzgz and fixed by me (luksireiku)
from core.utils import *
commands = [
('/nowplaying', ['user'])
]
description = 'Returns what you are or were last listening to. If you specify a username, info will be returned for that username.'
shortcut = '/np'
def run(m):
username = get_input(m)
if not username:
if m.sender.username:
username = m.sender.username
else:
return send_message(m, lang.errors.input)
url = 'http://ws.audioscrobbler.com/2.0/'
params = {
'method': 'user.getrecenttracks',
'format': 'json',
'limit': '1',
'api_key': config.keys.lastfm,
'user': username
}
res = requests.get(url, params=params, timeout=config.timeout)
if res.status_code != 200:
send_alert('%s\n%s' % (lang.errors.connection, res.text))
return send_message(m, lang.errors.connection, markup='Markdown')
lastfm = json.loads(res.text)
if not len(lastfm['recenttracks']['track']) > 0:
return send_message(m, lang.errors.results)
artist = lastfm['recenttracks']['track'][0]['artist']['#text'].title()
track = lastfm['recenttracks']['track'][0]['name'].title()
album = lastfm['recenttracks']['track'][0]['album']['#text'].title()
track_url = lastfm['recenttracks']['track'][0]['url'].title()
try:
nowplaying = lastfm['recenttracks']['track'][0]['@attr']['nowplaying']
if nowplaying == 'true':
nowplaying = True
else:
nowplaying == False
except:
date = lastfm['recenttracks']['track'][0]['date']['#text']
nowplaying = False
result = ''
if nowplaying:
result += '<b>%s</b> is now playing:\n' % username
else:
result += '<b>%s</b> last played:\n' % username
result += '🎵 <i>%s</i>\n💽 %s' % (track, artist)
if album:
result += ' - %s' % album
url_yt = 'https://www.googleapis.com/youtube/v3/search'
params_yt = {
'type': 'video',
'part': 'snippet',
'maxResults': '1',
'q': '%s - %s - %s' % (track, artist, album),
'key': config.keys.google_developer_console
}
res_yt = requests.get(url_yt, params=params_yt)
if res_yt.s | tatus_code != 200:
send_alert('<i>%s</i>\n%s' % (lang.errors.connection, res_yt.text))
return send_message(m, lang.errors.connection)
youtube = json.loads(res_yt.text)
keyboard = {}
if len(youtube['items']) > 0:
keyboard['inline_keyboard'] = [
[
{
'text': 'Watch "%s"' % youtube['items'][0]['snippet']['title'],
'url': 'http://youtu.be/%s | ' % youtube['items'][0]['id']['videoId']
}
]
]
send_message(m, result, markup='HTML', preview=False, keyboard=keyboard)
|
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/translations/tests/test_translationimportqueue.py | Python | agpl-3.0 | 30,965 | 0.000904 | # Copyright 2009-2011 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
__metaclass__ = type
from operator import attrgetter
import os.path
import transaction
from zope.component import getUtility
from zope.security.proxy import removeSecurityProxy
from lp.app.enums import InformationType
from lp.app.interfaces.launchpad import ILaunchpadCelebrities
from lp.services.database.interfaces import (
ISlaveStore,
IStore,
)
from lp.services.librarianserver.testing.fake import FakeLibrarian
from lp.services.tarfile_helpers import LaunchpadWriteTarFile
from lp.services.worlddata.interfaces.language import ILanguageSet
from lp.testing import (
person_logged_in,
TestCaseWithFactory,
)
from lp.testing.dbuser import switch_dbuser
from lp.testing.factory import LaunchpadObjectFactory
from lp.testing.fakemethod import FakeMethod
from lp.testing.layers import (
LaunchpadFunctionalLayer,
LaunchpadZopelessLayer,
ZopelessDatabaseLayer,
)
from lp.translations.enums import RosettaImportStatus
from lp.translations.interfaces.translationimportqueue import (
ITranslationImportQueue,
)
from lp.translations.model.translationimportqueue import (
compose_approval_conflict_notice,
list_distroseries_request_targets,
list_product_request_targets,
TranslationImportQueueEntry,
)
class TestCanSetStatusBase:
"""Base for tests that check that canSetStatus works ."""
layer = LaunchpadZopelessLayer
dbuser = None
entry = None
def setUp(self):
"""Set up context to test in."""
super(TestCanSetStatusBase, self).setUp()
self.queue = getUtility(ITranslationImportQueue)
self.rosetta_experts = (
getUtility(ILaunchpadCelebrities).rosetta_experts)
self.productseries = self.factory.makeProductSeries()
self.productseries.driver = self.factory.makePerson()
self.productseries.product.driver = self.factory.makePerson()
self.uploaderperson = self.factory.makePerson()
def _switch_dbuser(self):
if self.dbuser != None:
switch_dbuser(self.dbuser)
def _assertCanSetStatus(self, user, entry, expected_list):
# Helper to check for all statuses.
# Could iterate RosettaImportStatus.items but listing them here
# explicitly is better to read. They are sorted alphabetically.
possible_statuses = [
RosettaImportStatus.APPROVED,
RosettaImportStatus.BLOCKED,
| RosettaImportStatus.DELETED,
RosettaImportStatus.FAILED,
RosettaImportStatus.IMPORTED,
RosettaImportStatus.NEEDS_INFORMATION,
RosettaImportStatus.NEEDS_REVIEW,
]
self._switch_dbuser()
# Do *not* use assertContentEqual here, as the order matters.
self.assertEqual(expected_list,
[entry.canSetStatus(status, user)
| for status in possible_statuses])
def test_canSetStatus_non_admin(self):
# A non-privileged users cannot set any status.
some_user = self.factory.makePerson()
self._assertCanSetStatus(some_user, self.entry,
# A B D F I NI NR
[False, False, False, False, False, False, False])
def test_canSetStatus_rosetta_expert(self):
# Rosetta experts are all-powerful, didn't you know that?
self._assertCanSetStatus(self.rosetta_experts, self.entry,
# A B D F I NI NR
[True, True, True, True, True, True, True])
def test_canSetStatus_rosetta_expert_no_target(self):
# If the entry has no import target set, even Rosetta experts
# cannot set it to approved or imported.
self.entry.potemplate = None
self.entry.pofile = None
self._assertCanSetStatus(self.rosetta_experts, self.entry,
# A B D F I NI NR
[False, True, True, True, False, True, True])
def test_canSetStatus_uploader(self):
# The uploader can set some statuses.
self._assertCanSetStatus(self.uploaderperson, self.entry,
# A B D F I NI NR
[False, False, True, False, False, False, True])
def test_canSetStatus_product_owner(self):
# The owner (maintainer) of the product gets to set Blocked as well.
owner = self.productseries.product.owner
self._assertCanSetStatus(owner, self.entry,
# A B D F I NI NR
[True, True, True, False, False, True, True])
def test_canSetStatus_owner_and_uploader(self):
# Corner case: Nothing changes if the maintainer is also the uploader.
self.productseries.product.owner = self.uploaderperson
self._assertCanSetStatus(self.uploaderperson, self.entry,
# A B D F I NI NR
[True, True, True, False, False, True, True])
def test_canSetStatus_driver(self):
# The driver gets the same permissions as the maintainer.
driver = self.productseries.driver
self._assertCanSetStatus(driver, self.entry,
# A B D F I NI NR
[True, True, True, False, False, True, True])
def test_canSetStatus_driver_and_uploader(self):
# Corner case: Nothing changes if the driver is also the uploader.
self.productseries.driver = self.uploaderperson
self._assertCanSetStatus(self.uploaderperson, self.entry,
# A B D F I NI NR
[True, True, True, False, False, True, True])
def test_canSetStatus_product_driver(self):
# The driver of the product, too.
driver = self.productseries.product.driver
self._assertCanSetStatus(driver, self.entry,
# A B D F I NI NR
[True, True, True, False, False, True, True])
def test_canSetStatus_product_driver_and_uploader(self):
# Corner case: Nothing changes if the driver is also the uploader.
self.productseries.product.driver = self.uploaderperson
self._assertCanSetStatus(self.uploaderperson, self.entry,
# A B D F I NI NR
[True, True, True, False, False, True, True])
def _setUpUbuntu(self):
self.ubuntu = getUtility(ILaunchpadCelebrities).ubuntu
self.ubuntu_group_owner = self.factory.makePerson()
self.ubuntu.translationgroup = (
self.factory.makeTranslationGroup(self.ubuntu_group_owner))
def test_canSetStatus_ubuntu_translation_group(self):
# Owners of the Ubuntu translation Groups can set entries to approved
# that are targeted to Ubuntu.
self._setUpUbuntu()
ubuntu_entry = self.queue.addOrUpdateEntry(
'demo.pot', '#demo', False, self.uploaderperson,
distroseries=self.factory.makeDistroSeries(self.ubuntu),
sourcepackagename=self.factory.makeSourcePackageName(),
potemplate=self.potemplate)
self._assertCanSetStatus(self.ubuntu_group_owner, ubuntu_entry,
# A B D F I NI NR
[True, True, True, False, False, True, True])
def test_canSetStatus_ubuntu_translation_group_not_ubuntu(self):
# Outside of Ubuntu, owners of the Ubuntu translation Groups have no
# powers.
self._setUpUbuntu()
self._assertCanSetStatus(self.ubuntu_group_owner, self.entry,
# A B D F I NI NR
[False, False, False, False, False, False, False])
class TestCanSetStatusPOTemplate(TestCanSetStatusBase, TestCaseWithFactory):
"""Test canStatus applied to an entry with a POTemplate."""
def setUp(self):
"""Create the entry to test on."""
super(TestCanSetStatusPOTemplate, self).setUp()
self.potemplate = self.factory.makePOTemplate(
productseries=self.productseries)
self.entry = self.queue.addOrUpdateEntry(
' |
google-research/google-research | simulation_research/traffic/file_util_test.py | Python | apache-2.0 | 5,630 | 0.002131 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import os
import sys
import tempfile
from absl.testing import absltest
import numpy as np
from six.moves import cPickle
from simulation_research.traffic import file_util
class UtilTest(absltest.TestCase):
def setUp(self):
super(UtilTest, self).setUp()
self._output_dir = tempfile.mkdtemp(dir=absltest.get_default_test_tmpdir())
def test_append_line_to_file(self):
r"""Tests the output file.
The output file contains the following.
hello world
(hello) "world"
(hello) !!!!!!!!!!! @~#$%^&*()_+"world"
aaaaaaaa
bbbbbbbbbb
backslash\ backslash
backslash\ backslash
backslash\\ backslash
backslash\\\ backslash
backslash\\ backslash
"""
input_lines = ['hello world',
'(hello) "world"',
'(hello) !!!!!!!!!!! @~#$%^&*()_+"world"',
'aaaaaaaa\nbbbbbbbbbb',
r'backslash\ backslash',
'backslash\\ backslash',
r'backslash\\ backslash',
r'backslash\\\ backslash',
'backslash\\\\ backslash']
file_path = os.path.join(self._output_dir, 'test_append_line_to_file.txt')
for line in input_lines:
file_util.append_line_to_file(file_path, line)
self.assertTrue(file_util.f_exists(file_path))
# Note that the linebreak in the input_lines[3].
target_lines = ['hello world',
'(hello) "world"',
'(hello) !!!!!!!!!!! @~#$%^&*()_+"world"',
'aaaaaaaa',
'bbbbbbbbbb',
r'backslash\ backslash',
'backslash\\ backslash',
r'backslash\\ backslash',
r'backslash\\\ backslash',
'backslash\\\\ backslash']
with file_util.f_open(file_path, 'r') as actual_file:
line_counter = 0
read_lines = actual_file.readlines()
for line in read_lines:
# Linebreak is appended to the target string.
self.assertEqual(line, target_lines[line_counter] + '\n')
line_counter += 1
target_line_number = len(target_lines)
self.assertEqual(target_line_number, line_counter)
def test_save_load_variable(self):
file_path = os.path.join(self._output_dir, 'test_output_data.pkl')
# Case 1: Nested dictionary.
data = {'zz': 1, 'b': 234, 123: 'asdfa', 'dict': {'a': 123, 't': 123}}
file_util.save_variable(file_path, data)
actual_variable = file_util.load_variable(file_path)
self.assertEqual(data, actual_variable)
self.assertIsInstance(actual_variable, dict)
# Case 2: 2-level nested dictionary.
data = collections.defaultdict(
lambda: collections.defaultdict(list))
data['first']['A'] = [1, 2, 3]
data['first']['B'] = [1, 2, 3]
| data['second']['B'] = [1, 2, 3]
data['second']['C'] = [1, 2, 3]
data['thi | rd']['C'] = [1, 2, 3]
data['third']['D'] = [1, 2, 3]
data['path'] = 'asdfas/asdf/asdfasdf/'
file_util.save_variable(file_path, data)
actual_variable = file_util.load_variable(file_path)
self.assertEqual(data, actual_variable)
self.assertIsInstance(actual_variable, dict)
# Case 3: Large array. If the size is too large, the test will timeout.
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] * 10000
file_util.save_variable(file_path, data)
actual_variable = file_util.load_variable(file_path)
self.assertListEqual(data, actual_variable)
self.assertIsInstance(actual_variable, list)
# Case 4: numpy array.
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] * 10
data = np.array(data)
file_util.save_variable(file_path, data)
actual_variable = file_util.load_variable(file_path)
np.testing.assert_array_equal(data, actual_variable)
self.assertIsInstance(actual_variable, np.ndarray)
# Case 5: A list of tuples.
x = [1, 2, 3]
y = ['a', 'b', 'c']
data = zip(x, y)
# Saving zip variable does not affect the iterative variable.
file_util.save_variable(file_path, data)
actual_variable = file_util.load_variable(file_path)
# python2 treats `actual_variable` as a list, however, python3 treats it as
# an iterative object.
self.assertListEqual(list(actual_variable), list(data))
# Case 6: In python2, the itertools.tee cannot be saved by cPickle. However,
# in python3, it can be saved.
x = [1, 2, 3]
y = ['a', 'b', 'c']
data = zip(x, y)
data_tee, _ = itertools.tee(data)
python_version = sys.version_info[0]
try:
file_util.save_variable(file_path, data_tee)
pickle_save_correctly = True
except cPickle.PicklingError:
pickle_save_correctly = False
self.assertTrue((pickle_save_correctly and python_version == 3) or
(not pickle_save_correctly and python_version == 2))
if __name__ == '__main__':
absltest.main()
|
rohitranjan1991/home-assistant | homeassistant/components/saj/sensor.py | Python | mit | 8,049 | 0.000621 | """SAJ solar inverter interface."""
from __future__ import annotations
from datetime import date
import logging
import pysaj
import voluptuous as vol
from homeassistant.components.sensor import (
PLATFORM_SCHEMA,
SensorDeviceClass,
SensorEntity,
SensorStateClass,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_TYPE,
CONF_USERNAME,
ENERGY_KILO_WATT_HOUR,
EVENT_HOMEASSISTANT_STOP,
MASS_KILOGRAMS,
POWER_WATT,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
TIME_HOURS,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant, callback
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_call_later
from homeassistant.helpers.start import async_at_start
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
MIN_INTERVAL = 5
MAX_INTERVAL = 300
INVERTER_TYPES = ["ethernet", "wifi"]
SAJ_UNIT_MAPPINGS = {
"": None,
"h": TIME_HOURS,
"kg": MASS_KILOGRAMS,
"kWh": ENERGY_KILO_WATT_HOUR,
"W": POWER_WATT,
"°C": TEMP_CELSIUS,
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional( | CONF_NAME): cv.string,
vol.Optional(CONF_TYPE, default=INVERTER_TYPES[0]): vol. | In(INVERTER_TYPES),
vol.Inclusive(CONF_USERNAME, "credentials"): cv.string,
vol.Inclusive(CONF_PASSWORD, "credentials"): cv.string,
}
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the SAJ sensors."""
remove_interval_update = None
wifi = config[CONF_TYPE] == INVERTER_TYPES[1]
# Init all sensors
sensor_def = pysaj.Sensors(wifi)
# Use all sensors by default
hass_sensors = []
kwargs = {}
if wifi:
kwargs["wifi"] = True
if config.get(CONF_USERNAME) and config.get(CONF_PASSWORD):
kwargs["username"] = config[CONF_USERNAME]
kwargs["password"] = config[CONF_PASSWORD]
try:
saj = pysaj.SAJ(config[CONF_HOST], **kwargs)
done = await saj.read(sensor_def)
except pysaj.UnauthorizedException:
_LOGGER.error("Username and/or password is wrong")
return
except pysaj.UnexpectedResponseException as err:
_LOGGER.error(
"Error in SAJ, please check host/ip address. Original error: %s", err
)
return
if not done:
raise PlatformNotReady
for sensor in sensor_def:
if sensor.enabled:
hass_sensors.append(
SAJsensor(saj.serialnumber, sensor, inverter_name=config.get(CONF_NAME))
)
async_add_entities(hass_sensors)
async def async_saj():
"""Update all the SAJ sensors."""
values = await saj.read(sensor_def)
for sensor in hass_sensors:
state_unknown = False
# SAJ inverters are powered by DC via solar panels and thus are
# offline after the sun has set. If a sensor resets on a daily
# basis like "today_yield", this reset won't happen automatically.
# Code below checks if today > day when sensor was last updated
# and if so: set state to None.
# Sensors with live values like "temperature" or "current_power"
# will also be reset to None.
if not values and (
(sensor.per_day_basis and date.today() > sensor.date_updated)
or (not sensor.per_day_basis and not sensor.per_total_basis)
):
state_unknown = True
sensor.async_update_values(unknown_state=state_unknown)
return values
@callback
def start_update_interval(event):
"""Start the update interval scheduling."""
nonlocal remove_interval_update
remove_interval_update = async_track_time_interval_backoff(hass, async_saj)
@callback
def stop_update_interval(event):
"""Properly cancel the scheduled update."""
remove_interval_update() # pylint: disable=not-callable
hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, stop_update_interval)
async_at_start(hass, start_update_interval)
@callback
def async_track_time_interval_backoff(hass, action) -> CALLBACK_TYPE:
"""Add a listener that fires repetitively and increases the interval when failed."""
remove = None
interval = MIN_INTERVAL
async def interval_listener(now=None):
"""Handle elapsed interval with backoff."""
nonlocal interval, remove
try:
if await action():
interval = MIN_INTERVAL
else:
interval = min(interval * 2, MAX_INTERVAL)
finally:
remove = async_call_later(hass, interval, interval_listener)
hass.async_create_task(interval_listener())
def remove_listener():
"""Remove interval listener."""
if remove:
remove() # pylint: disable=not-callable
return remove_listener
class SAJsensor(SensorEntity):
"""Representation of a SAJ sensor."""
def __init__(self, serialnumber, pysaj_sensor, inverter_name=None):
"""Initialize the SAJ sensor."""
self._sensor = pysaj_sensor
self._inverter_name = inverter_name
self._serialnumber = serialnumber
self._state = self._sensor.value
if pysaj_sensor.name in ("current_power", "temperature"):
self._attr_state_class = SensorStateClass.MEASUREMENT
if pysaj_sensor.name == "total_yield":
self._attr_state_class = SensorStateClass.TOTAL_INCREASING
@property
def name(self):
"""Return the name of the sensor."""
if self._inverter_name:
return f"saj_{self._inverter_name}_{self._sensor.name}"
return f"saj_{self._sensor.name}"
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
@property
def native_unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return SAJ_UNIT_MAPPINGS[self._sensor.unit]
@property
def device_class(self):
"""Return the device class the sensor belongs to."""
if self.unit_of_measurement == POWER_WATT:
return SensorDeviceClass.POWER
if self.unit_of_measurement == ENERGY_KILO_WATT_HOUR:
return SensorDeviceClass.ENERGY
if (
self.unit_of_measurement == TEMP_CELSIUS
or self._sensor.unit == TEMP_FAHRENHEIT
):
return SensorDeviceClass.TEMPERATURE
@property
def should_poll(self) -> bool:
"""SAJ sensors are updated & don't poll."""
return False
@property
def per_day_basis(self) -> bool:
"""Return if the sensors value is on daily basis or not."""
return self._sensor.per_day_basis
@property
def per_total_basis(self) -> bool:
"""Return if the sensors value is cumulative or not."""
return self._sensor.per_total_basis
@property
def date_updated(self) -> date:
"""Return the date when the sensor was last updated."""
return self._sensor.date
@callback
def async_update_values(self, unknown_state=False):
"""Update this sensor."""
update = False
if self._sensor.value != self._state:
update = True
self._state = self._sensor.value
if unknown_state and self._state is not None:
update = True
self._state = None
if update:
self.async_write_ha_state()
@property
def unique_id(self):
"""Return a unique identifier for this sensor."""
return f"{self._serialnumber}_{self._sensor.name}"
|
flaviovdf/vodlibs | vod/learn/cluster.py | Python | mit | 2,393 | 0.00794 | # -*- coding: utf8
'''
Common code for clustering tasks
'''
from __future__ import division, print_function
from sklearn.cluster import KMeans
from sklearn.cluster import MiniBatchKMeans
from sklearn.metrics import pairwise
from vod.stats.ci import half_confidence_interval_size
import numpy as np
def kmeans_betacv(data, num_cluster, batch_kmeans=False, n_runs = 10,
confidence = 0.90):
'''
Computes the BetaCV for running Kmeans on the dataset. This method
returns the BetaCV value and half of the size of the confidence interval
for the same value (BetaCV is an average or the number of runs given).
Arguments
---------
data: matrix
A matrix of observations. If this is sparse, `batch_kmeans` must
be True
num_cluster: int
number of clusters to run k-means for
batch_kmeans: bool (defauts to False)
if `sklearn.cluster.MiniBatchKMeans` should be used. This is faster
and suitable for sparse datasets, but less accurate.
n_runs: int (default = 10)
Number of runs to compute the BetaCV
confidence: double [0, 1) (default = 0.9)
The confidence used to compute half the confidence interval size
Returns
-------
The betacv and half of the confidence interval size
'''
algorithm = None
if not batch_kmeans:
algorithm = KMeans(num_cluster)
else:
algorithm = MiniBatchKMeans(num_cluster)
| inter_array = np.zeros(n_runs)
intra_array = np.zeros(n_runs)
for i in xrange(n_runs):
#Run K-Means
algorithm.fit(data)
centers = algorithm.cluster_centers_
labels = algorithm.labels_
#KMeans in sklearn uses euclidean
dist_centers = pairwise.euclidean_distances(centers)
| #Inter distance
mean_dist_between_centers = np.mean(dist_centers)
inter_array[i] = mean_dist_between_centers
#Intra distance
dist_all_centers = algorithm.transform(data)
intra_dists = []
for doc_id, cluster in enumerate(labels):
dist = dist_all_centers[doc_id, cluster]
intra_dists.append(dist)
intra_array[i] = np.mean(intra_dists)
betacv = intra_array / inter_array
cinterval = half_confidence_interval_size(betacv, confidence)
return np.mean(betacv), cinterval |
bopowers/MikenetGUI | lib/tabs.py | Python | gpl-3.0 | 63,418 | 0.006339 | '''
Copyright (C) 2013-2014 Robert Powers
This file is part of MikeNetGUI.
MikeNetGUI is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MikeNetGUI is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with MikeNetGUI. If not, see <http://www.gnu.org/licenses/>.
'''
from PySide import QtGui,QtCore
from custom_widgets import CustomListWidget,CustomWiringWidget
from custom_widgets import CustomTreeWidget,CustomPhaseWidget
from custom_widgets import CustomRecordingWidget,CustomComponentSelectionWidget
from custom_widgets import CustomInteractiveParamWidget,CustomTestSetSelectionWidget
from custom_widgets import CustomWeightNoiseWidget,CustomActivationNoiseWidget
from custom_widgets import CustomInputNoiseWidget,CustomApplyIterationWidget
from editor_windows import DefaultsEditor
from multiproc import ScriptThread
import psutil
import sys
import pydot
# test pydot to find out if Graphviz is installed
if pydot.find_graphviz():
pass
else:
print 'Graphviz executables not found. "Visualize" feature will be disabled.'
from matplotlib import pyplot
from scipy import misc
import gen_utils as guts
import os
import dialogs
from time import time
class ScriptTab(QtGui.QWidget):
'''Creates a tab with tools for script-level editing.
The widget is arranged in two columns. Each column is arranged using
a vertical layout.
'''
def __init__(self,script):
super(ScriptTab, self).__init__()
self.script = script
self.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
#..............................................................
# LAYOUTS
# create main horizontal layout
h_layout = QtGui.QHBoxLayout()
# left and right column layouts
left_v_layout = QtGui.QVBoxLayout()
right_v_layout = QtGui.QVBoxLayout()
# sublayouts for timeline control, script property editing, start button
time_layout = QtGui.QGridLayout()
props_layout = QtGui.QFormLayout()
start_layout = QtGui.QHBoxLayout()
#..............................................................
# HIERARCHICAL TREE VIEW OBJECT (ENTIRE LEFT COLUMN)
# see custom_widgets module for CustomTreeWidget definitions
self.tree_view = CustomTreeWidget(self,script)
#..............................................................
# TIMELINE EDITING CONTROLS (ADD RUN/ITERATION, REMOVE, ETC...)
# create timeline editing buttons
self.add_run_btn = QtGui.QPushButton('Add Run')
self.add_iter_btn = QtGui.QPushButton('Add Iterator')
self.del_btn = QtGui.QPushButton('Remove Selected')
self.del_btn.setEnabled(False)
self.dup_btn = QtGui.QPushButton('Duplicate Selected')
self.dup_btn.setEnabled(False)
# create timeline editing group box
timeline_container = QtGui.QHBoxLayout() # to shift everything over
timeline = QtGui.QGroupBox('Edit Script Timeline')
timeline.setAlignment(QtCore.Qt.AlignHCenter)
time_layout.setSpacing(10)
time_layout.addWidget(self.add_run_btn,0,0,1,1)
time_layout.addWidget(self.add_iter_btn,1,0,1,1)
time_layout.addWidget(self.del_btn,0,1,1,1)
time_layout.addWidget(self.dup_btn,1,1,1,1)
timeline.setLayout(time_layout)
timeline_container.addWidget(timeline)
#timeline_container.addStretch(1)
# connect button signals
self.add_run_btn.clicked.connect(self.tree_view.newRun)
self.add_iter_btn.clicked.connect(self.tree_view.newIterator)
self.dup_btn.clicked.connect(self.tree_view.duplicateCurrentObject)
self.del_btn.clicked.connect(self.tree_view.removeCurrentObject)
#..............................................................
# SCRIPT PROPERTIES BOX
# create script properties panel
props = QtGui.QGroupBox('Script Properties')
props.setAlignment(QtCore.Qt.AlignHCenter)
script_name,script_box = self.script.getParameter('script_name').getWidget()
#defaults_btn = QtGui.QPushButton('Edit global parameter defaults')
#defaults_btn.clicked.connect(self.editDefaults)
props_layout.addRow(script_name,script_ | box)
#props_layout.addRow('',defau | lts_btn)
props.setLayout(props_layout)
# connect signals
script_box.editingFinished.connect(self.updateTabName)
#..............................................................
# START BUTTON
self.start_btn = StartButton(self)
self.start_btn.clicked.connect(self.scanScript)
#..............................................................
# putting it all together
self.setLayout(h_layout)
h_layout.addLayout(left_v_layout)
left_v_layout.addWidget(QtGui.QLabel('Script Timeline'))
left_v_layout.addWidget(self.tree_view)
h_layout.addLayout(right_v_layout)
right_v_layout.addLayout(timeline_container)
right_v_layout.addWidget(props)
right_v_layout.addLayout(start_layout)
start_layout.addStretch(1)
start_layout.addWidget(self.start_btn)
start_layout.addStretch(1)
right_v_layout.addStretch(1)
# initialize
self.tree_view.syncToModel()
def scanScript(self):
self.start_btn.setScanning()
self.scan_thread = dialogs.ScanningThread(self.script)
self.scan_thread.finished.connect(self.reportScriptIssues)
self.scan_thread.start()
def startScript(self):
self.start_btn.setInProgress()
# start script run
self.prog = dialogs.ProgressWindow(self.script.getGUI())
self.prog.show()
self.prog.raise_()
self.prog.activateWindow()
self.script_thread = ScriptThread(self.script)
self.script_thread.finished.connect(self.notifyScriptEnded)
# time the entire script
self.tic = time()
self.script_thread.start()
def abortScript(self):
early_abort = True
self.script_thread.quit()
# kill all processes
for proc in psutil.process_iter():
if 'mikenet_master' in proc.name:
proc.kill()
#if 'mikenet_master' in proc.name():
# print 'killed a process'
# proc.kill()
#try:
# print proc.name()
# if 'mikenet_master' in proc.name():
# proc.kill()
#except:
# print 'excepted process search'
@QtCore.Slot()
def reportScriptIssues(self):
if self.scan_thread.issues:
screener = dialogs.ScriptScreener(self.script.getGUI(),
self.scan_thread.issues)
screener.exec_()
self.start_btn.setFree()
else:
self.startScript()
@QtCore.Slot(int)
def updateCores(self,i):
self.prog.updateCores(i)
@QtCore.Slot(int,int)
def updateTotalProgress(self,complete,total):
toc = time()
self.prog.updateTotalProgress(complete,total,toc-self.tic)
@QtCore.Slot(int,int)
def updateSuccessRatio(self,good,total):
self.prog.updateSuccessRatio(good,total)
def notifyScriptEnded(self):
# gets activated after script runs and database is finished updating
toc = time()
self.prog.simulationOver(toc-self.tic)
self.script.getGUI().emailNotify(toc-self.tic)
self.start_btn.setFree()
def getNewRunNames(self):
return self.tree_view.getNewRunNames()
def getIteratorNam |
lmazuel/azure-sdk-for-python | azure-mgmt-relay/tests/test_azure_mgmt_wcfrelay.py | Python | mit | 7,326 | 0.006144 | # coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import unittest
import time
from msrestazure.azure_exceptions import CloudError
import azure.mgmt.relay.models
from azure.mgmt.relay.models import RelayNamespace, Sku, SkuTier, Relaytype, AuthorizationRule, AccessRights, AccessKeys, WcfRelay, ErrorResponseException, ErrorResponse
from azure.common.credentials import ServicePrincipalCredentials
from devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer
class MgmtWcfRelayTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtWcfRelayTest, self).setUp()
self.relay_client = self.create_mgmt_client(
azure.mgmt.relay.RelayManagementClient
)
@ResourceGroupPreparer()
def test_wcfrelay_curd(self, resource_group, location):
resource_group_name = resource_group.name
# | Create a Namespace |
namespace_name = "testingpythontestcaseeventhubnamespaceEventhub"
namespaceparameter = RelayNamespace(location, {'tag1': 'value1', 'tag2': 'value2'}, Sku(SkuTier.standard))
creatednamespace = self.relay_client.namespaces.create_or_update(resource_group_name, namespace_name, namespaceparameter).result()
self.assertEqual(creatednamespace.name, namespace_name)
#
# # Get created Namespace
#
getnamespaceresponse = self.relay_client.namespaces.get(resource_group_name, namespace_name)
self.assertEqual(getnamespaceresponse.name, namespace_name)
# Create a WcfRelay
wcfrelay_name = "testingpythontestcasewcfrelay"
wcfrelayparameter = WcfRelay(
relay_type=Relaytype.net_tcp,
requires_client_authorization=True,
requires_transport_security=True,
user_metadata="User data for WcfRelay"
)
createdwcfrelayresponse = self.relay_client.wcf_relays.create_or_update(resource_group_name, namespace_name, wcfrelay_name, wcfrelayparameter)
self.assertEqual(createdwcfrelayresponse.name, wcfrelay_name)
self.assertEqual(createdwcfrelayresponse.requires_client_authorization, True)
#Get the created wcfRelay
geteventhubresponse = self.relay_client.wcf_relays.get(resource_group_name, namespace_name, wcfrelay_name)
self.assertEqual(geteventhubresponse.name, wcfrelay_name)
self.assertEqual(geteventhubresponse.requires_transport_security, True)
self.assertEqual(geteventhubresponse.user_metadata, "User data for WcfRelay")
#Get the List of wcfRealy by namespaces
getlistbynamespacewcfrelayresponse = list(self.relay_client.wcf_relays.list_by_namespace(resource_group_name, namespace_name))
self.assertGreater(len(getlistbynamespacewcfrelayresponse), 0)
# update the Created eventhub
wcfrelayupdateparameter = WcfRelay(
relay_type=Relaytype.net_tcp,
user_metadata="User data for WcfRelay updated"
)
updatewcfrelayresponse = self.relay_client.wcf_relays.create_or_update(resource_group_name, namespace_name,
wcfrelay_name, wcfrelayupdateparameter)
self.assertEqual(updatewcfrelayresponse.name, wcfrelay_name)
self.assertEqual(updatewcfrelayresponse.requires_transport_security, True)
self.assertEqual(updatewcfrelayresponse.requires_client_authorization, True)
self.assertEqual(updatewcfrelayresponse.user_metadata, "User data for WcfRelay updated")
# Create a new authorizationrule
authoRule_name = "testingauthrulepy"
createwcfrelayauthorule = self.relay_client.wcf_relays.create_or_update_authorization_rule(resource_group_name, namespace_name, wcfrelay_name, authoRule_name,[AccessRights('Send'),AccessRights('Listen')])
self.assertEqual(createwcfrelayauthorule.name, authoRule_name, "Authorization rule name not as created - create_or_update_authorization_rule ")
self.assertEqual(len(createwcfrelayauthorule.rights), 2)
# Get the created authorizationrule
getwcfrelayauthorule = self.relay_client.wcf_relays.get_authorization_rule(resource_group_name, namespace_name, wcfrelay_name, authoRule_name)
self.assertEqual(getwcfrelayauthorule.name, authoRule_name, "Authorization rule name not as passed as parameter - get_authorization_rule ")
self.assertEqual(len(getwcfrelayauthorule.rights), 2, "Access rights mis match as created - get_authorization_rule ")
# update the rights of the authorizatiorule
getwcfrelayauthorule.rights.append('Manage')
updatewcfrelayauthorule = self.relay_client.wcf_relays.create_or_update_authorization_rule(resource_group_name, namespace_name, wcfrelay_name, authoRule_name, getwcfrelayauthorule.rights)
self.assertEqual(updatewcfrelayauthorule.name, authoRule_name, "Authorization rule name not as passed as parameter for update call - create_or_update_authorization_rule ")
self.assertEqual(len(updatewcfrelayauthorule.rights), 3, "Access rights mis match as updated - create_or_update_authorization_rule ")
#list all the authorization ruels for the given namespace
wcfrelayauthorulelist = list(self.relay_client.wcf_relays.list_authorization_rules(resource_group_name, namespace_name, wcfrelay_name))
self.assertEqual(len(wcfrelayauthorulelist), 1, "number of authorization rule mismatch with the created + default = 2 - list_authorization_rules")
#List keys for the authorization rule
listkeysauthorizationrule = self.relay_client.wcf_relays.list_keys(resource_group_name, namespace_name, wcfrelay_name, authoRule_name)
self.assertIsNotNone(listkeysauthorizationrule)
# regenerate Keys for authorizationrule - Primary
regenratePrimarykeyauthorizationrule = self.relay_client.wcf_relays.regenerate_keys(resource_group_name, namespace_name, wcfrelay_name, authoRule_name, 'PrimaryKey')
self.assertNotEqual(listkeysauthorizationrule.primary_key,regenratePrimarykeyauthorizationrule.primary_key)
# regenerate Keys for authorizationrule - Primary
regenrateSecondarykeyauthorizationrule = self.relay_client.wcf_relays.regenerate_keys(resource_group_name,namespace_name, wcfrelay_name, authoRule_name, 'SecondaryKey')
self.assertNotEqual(listkeysauthorizationrule.secondary_key, regenrateSecondarykeyauthorizationrule.secondary_key)
# delete the authorizationrule
self.relay_client.wcf_relays.delete_authorization_rule(resource_group_name, namespace_name, wcfrelay_name, authoRule_name)
# Delete the created WcfRelay
getwcfrelayresponse = self.relay_client.wcf_relays.delete(resource_group_name, namespace_name, wcfrelay_name)
# Delete the create namespace
self.relay_client.namespaces.delete(resource_group_name, namespace_name).result()
# ------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main() |
LumPenPacK/NetworkExtractionFromImages | win_build/nefi2_win_amd64_msvc_2015/site-packages/numpy/core/function_base.py | Python | bsd-2-clause | 6,518 | 0 | from __future__ import division, absolute_import, print_function
__all__ = ['logspace', 'linspace']
from . import numeric as _nx
from .numeric import result_type, NaN
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop`].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
dtype : dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
.. versionadded:: 1.9.0
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float
Only returned if `retstep` is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
logspace : Samples uniformly distributed in log space.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
num = int(num)
if num < 0:
raise ValueError("Number of samples, %s, must be non-negative." % num)
div = (num - 1) if endpoint else num
# Convert float/complex array scalars to float, gh-3504
start = start * 1.
stop = stop * 1.
dt = result_type(start, stop, float(num))
if dtype is None:
dtype = dt
y = _nx.arange(0, num, dtype=dt)
if num > 1:
delta = stop - start
step = delta / div
if step == 0:
# Special handling for denormal numbers, gh-5437
y /= div
y *= delta
else:
y *= step
else:
# 0 and 1 item long sequences have an undefined step
step = NaN
y += start
if endpoint and num > 1:
y[-1] = stop
if retstep:
return y.astype(dtype, copy=False), step
else:
return y.astype(dtype, copy=False)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Parameters
----------
start : float
``base ** start`` is the starting value of the sequence.
stop : float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length ``num``) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
| in linear space, inste | ad of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y).astype(dtype)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([ 100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([ 4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start, stop, num=num, endpoint=endpoint)
if dtype is None:
return _nx.power(base, y)
return _nx.power(base, y).astype(dtype)
|
chemelnucfin/tensorflow | tensorflow/compiler/tests/binary_ops_test.py | Python | apache-2.0 | 57,668 | 0.004526 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for binary operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.compat import compat
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
class BinaryOpsTest(xla_test.XLATestCase):
"""Test cases for binary operators."""
def _testBinary(self, op, a, b, expected, equality_test=None):
with self.session() as session:
with self.test_scope():
pa = array_ops.placeholder(dtypes.as_dtype(a.dtype), a.shape, name="a")
pb = array_ops.placeholder(dtypes.as_dtype(b.dtype), b.shape, name="b")
output = op(pa, pb)
result = session.run(output, {pa: a, pb: b})
if equality_test is None:
equality_test = self.assertAllCloseAccordingToType
equality_test(result, expected, rtol=1e-3)
def _testSymmetricBinary(self, op, a, b, expected, equality_test=None):
self._testBinary(op, a, b, expected, equality_test)
self._testBinary(op, b, a, expected, equality_test)
def ListsAreClose(self, result, expected, rtol):
"""Tests closeness of two lists of floats."""
self.assertEqual(len(result), len(expected))
for i in range(len(result)):
self.assertAllCloseAccordingToType(result[i], expected[i], rtol)
def testFloatOps(self):
for dtype in self.float_types:
if dtype == dtypes.bfloat16.as_numpy_dtype:
a = -1.01
b = 4.1
else:
a = -1.001
b = 4.01
self._testBinary(
lambda x, y: math_ops.approximate_equal(x, y, tolerance=0.0001),
np.array([[[[-1, 2.00009999], [-3, b]]]], dtype=dtype),
np.array([[[[a, 2], [-3.00009, 4]]]], dtype=dtype),
expected=np.array([[[[False, True], [True, False]]]], dtype=dtype))
self._testBinary(
gen_math_ops.real_div,
np.array([3, 3, -1.5, -8, 44], dtype=dtype),
np.array([2, -2, 7, -4, 0], dtype=dtype),
expected=np.array(
[1.5, -1.5, -0.2142857, 2, float("inf")], dtype=dtype))
self._testBinary(math_ops.pow, dtype(3), dtype(4), expected=dtype(81))
self._testBinary(
math_ops.pow,
np.array([1, 2], dtype=dtype),
np.zeros(shape=[0, 2], dtype=dtype),
expected=np.zeros(shape=[0, 2], dtype=dtype))
self._testBinary(
math_ops.pow,
np.array([10, 4], dtype=dtype),
np.array([2, 3], dtype=dtype),
expected=np.array([100, 64], dtype=dtype))
self._testBinary(
math_ops.pow,
dtype(2),
np.array([3, 4], dtype=dtype),
expected=np.array([8, 16], dtype=dtype))
self._testBinary(
math_ops.pow,
np.array([[2], [3]], dtype=dtype),
dtype(4),
expected=np.array([[16], [81]], dtype=dtype))
self._testBinary(
math_ops.atan2,
np.array([0, np.sqrt(2), 1, np.sqrt(2), 0], dtype),
np.array([1, np.sqrt(2), 0, -np.sqrt(2), -1], dtype),
expected=np.array(
[0, np.pi / 4, np.pi / 2, np.pi * 3 / 4, np.pi], dtype=dtype))
self._testBinary(
gen_math_ops.reciprocal_grad,
np.array([4, -3, -2, 1], dtype=dtype),
np.array([5, -6, 7, -8], dtype=dtype),
expected=np.array([-80, 54, -28, 8], dtype=dtype))
self._testBinary(
gen_math_ops.sigmoid_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([-60, -36, -14, 0], dtype=dtype))
self._testBinary(
gen_math_ops.rsqrt_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([-160, -81, -28, -4], dtype=dtype))
self._testBinary(
gen_math_ops.sqrt_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([0.625, 1, 1.75, 4], dtype=dtype))
self._testBinary(
gen_nn_ops.softplus_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array(
[3.97322869, 2.99258232, 1.99817801, 0.99966466], dtype=dtype))
self._testBinary(
gen_nn_ops.softsign_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array(
[0.11111111, 0.06122449, 0.03125, 0.01234568], dtype=dtype))
self._testBinary(
gen_math_ops.tanh_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([-75, -48, -21, 0], dtype=dtype))
self._testBinary(
gen_nn_ops.elu_grad,
np.array([1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([-.6, -.4, -.2, 0, .2, .4], dtype=dtype),
expected=np.array([0.4, 1.2, 2.4, 4, 5, 6], dtype=dtype))
self._testBinary(
gen_nn_ops.selu_grad,
np.array([1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([-.6, -.4, -.2, .2, .4, .6], dtype=dtype),
expected=np.array(
[1.158099340847, 2.7161986816948, 4.67429802254,
4.202803949422, 5.2535049367774, 6.30420592413], dtype=dtype))
self._testBinary(
gen_nn_ops.relu_grad,
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtype),
np.array([0, 0, 0, 0, 0, 0.1, 0.3, 0.5, 0.7, 0.9], dtype=dtype),
expected=np.array([0, 0, 0, 0, 0, 6, 7, 8, 9, 10], dtype=dtype))
self._testBinary(
gen_nn_ops.relu6_grad,
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtype),
np.array(
[0, 0, 0, 0, 0, 0.1, 0.3, 0.5, 0.7, 0.9, 6.1, 10.0], dtype=dtype),
expected=np.array([0, 0 | , 0, 0, 0, 6, 7, 8, 9, 10, 0, 0], dtype=dtype))
self._testBinary(
gen_nn_ops.leaky_relu_grad,
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtype),
np.array([0, 0, 0, 0, 0, 0.1, 0.3, 0.5, 0.7, 0.9], dtype=dtype),
expected=np.array([0.2, 0.4, 0.6, 0.8, 1, 6, 7, 8, 9, 10],
dtype=dtype))
self._testBinary(
gen_nn_ops.softmax_cross_entropy_with_logits,
np.array([[1, 2, 3, 4], [5, 6, 7, 8]] | , dtype=dtype),
np.array([[0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1]], dtype=dtype),
expected=[
np.array([1.44019, 2.44019], dtype=dtype),
np.array([[-0.067941, -0.112856, -0.063117, 0.243914],
[-0.367941, -0.212856, 0.036883, 0.543914]],
dtype=dtype),
],
equality_test=self.ListsAreClose)
# TODO(b/68813416): Fails with bfloat16.
if dtype != dtypes.bfloat16.as_numpy_dtype:
self._testBinary(
gen_nn_ops.sparse_softmax_cross_entropy_with_logits,
np.array(
[[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8],
[0.9, 1.0, 1.1, 1 |
quantmind/pulsar-agile | agile/plugins/docs.py | Python | bsd-3-clause | 2,024 | 0 | import os
from importlib import import_module
from pulsar import ImproperlyConfigured
from cloud import aws
from .. import core
content_types = {'fjson': 'application/json',
'inv': 't | ext/plain'}
class Docs(core.AgileCommand):
"""Requires a valid sphinx installation
"""
description = 'Compile sphinx docs and upload them to aws'
async def run(self, name, config, options):
path = os.path.join(self.repo_path, 'docs')
if not os.path.isdir(path):
raise ImproperlyConfigured('path "%s" missing' % path)
os.chdir(path)
try:
text = await self | .execute('make', self.cfg.docs)
finally:
os.chdir(self.repo_path)
self.logger.info(text)
if self.cfg.push:
await self.upload()
async def upload(self):
"""Upload documentation to amazon s3
"""
if not self.cfg.docs_bucket:
raise ImproperlyConfigured('Please specify the "docs_bucket" '
'in your config file')
docs = self.cfg.docs
path = os.path.join(self.repo_path, 'docs', '_build', docs)
if not os.path.isdir(path):
raise ImproperlyConfigured('path "%s" missing' % path)
self.logger.info('Docs at "%s"', path)
mod = import_module(self.cfg.app_module)
version = mod.__version__
name = mod.__name__
url = '%s/%s' % (name, version)
if docs != 'html':
url = '%s/%s' % (docs, url)
self.logger.info('Preparing to upload to "%s/%s"',
self.cfg.docs_bucket, url)
aws_config = self.config['docs'].get('aws_config', {})
s3 = aws.AsyncioBotocore('s3', http_session=self.gitapi.http,
**aws_config)
await s3.upload_folder(self.cfg.docs_bucket, path, url,
skip=['environment.pickle', 'last_build'],
content_types=content_types)
|
avalentino/PyTables | examples/vlarray4.py | Python | bsd-3-clause | 626 | 0.001597 | #!/usr/bin/env python3
"""Example that shows how to easily save a variable number of atoms w | ith a
VLArray."""
import numpy as np
import tables as | tb
N = 100
shape = (3, 3)
np.random.seed(10) # For reproductible results
f = tb.open_file("vlarray4.h5", mode="w")
vlarray = f.create_vlarray(f.root, 'vlarray1',
tb.Float64Atom(shape=shape),
"ragged array of arrays")
k = 0
for i in range(N):
l = []
for j in range(np.random.randint(N)):
l.append(np.random.randn(*shape))
k += 1
vlarray.append(l)
print("Total number of atoms:", k)
f.close()
|
cfh294/WawaGeoScraper | utils/scraping/__init__.py | Python | gpl-3.0 | 5,149 | 0.000583 | """
scraping
the utility functions for the actual web scraping
"""
import ssl
import datetime
import requests
import re
# this is the endpoint that my new version of this program will
# abuse with possible store ids. this is a much more reliable "darts at the wall"
# technique than the previous location-based one
QUERY_URL = "https://www.wawa.com/Handlers/LocationByStoreNumber.ashx"
# fr | om testing, I have confirmed certain "series" of store IDs
# 0000 | series are all old stores in PA, NJ, MD, DE, and VA
# 5000 series are all stores in FL
# 8000 series are all new stores in PA, NJ, MD, DE, and VA
POSSIBLE_STORE_NUMS = list(range(5000, 6000))
POSSIBLE_STORE_NUMS.extend(list(range(0, 1000)))
POSSIBLE_STORE_NUMS.extend(list(range(8000, 9000)))
# currently only tracking these gas types to keep a consistent csv schema.
# other types are not consistent across all wawas
GAS_TYPES = ["diesel", "plus", "unleaded", "premium"]
def parse_gas_prices(in_location):
"""
Breaks open the json for the gas prices
:param in_location: The Wawa location we are looking at (dict)
:return: The gas price info (dict)
"""
out_data = {}
try:
fuel_data = in_location["fuelTypes"]
for ft in fuel_data:
lowered = ft["description"].lower()
if lowered in GAS_TYPES:
out_data[lowered + "_price"] = ft["price"]
# no gas sold at this Wawa
except KeyError:
for gt in GAS_TYPES:
out_data[gt + "_price"] = ""
return out_data
def camel_to_underscore(in_string):
"""
Basic function that converts a camel-cased word to use underscores
:param in_string: The camel-cased string (str)
:return: The underscore'd string (str)
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', in_string)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def parse_amenities(in_location):
"""
Breaks open the json for the amenities offered at the Wawa location
:param in_location: The Wawa location (dict)
:return: The amenity info (dict)
"""
out_data = {}
for amenity, value in in_location["amenities"].items():
out_data["has_" + camel_to_underscore(amenity).lower()] = value
return out_data
def get_addresses(in_location):
"""
Parses info for the Wawa address and coordinates
:param in_location: The Wawa location (dict)
:return: The address and coordincate info (dict)
"""
friendly = in_location["addresses"][0]
physical = in_location["addresses"][1]
out_friendly = {
"address": friendly["address"],
"city": friendly["city"],
"state": friendly["state"],
"zip": friendly["zip"]
}
out_physical = {
"longitude": physical["loc"][1],
"latitude": physical["loc"][0],
}
return {"address": out_friendly, "coordinates": out_physical}
def get_wawa_data(limit=None):
"""
Hits the store number url endpoint to pull down Wawa locations and
parse each one's information. We don't know the store numbers as there
is not list of store numbers. Through testing I was able to narrow down
"series" of store numbers, so we iterate through ranges of possible
store numbers, skipping any 404 errors (invalid store id responses
returned by url calls).
:param limit: A cap on the number of Wawa results returned (int) (optional)
:return: Parsed Wawa information (list<dict>)
"""
ssl._create_default_https_context = ssl._create_unverified_context
output = []
for i in POSSIBLE_STORE_NUMS:
response = requests.get(QUERY_URL, params={"storeNumber": i})
if response.status_code != 404:
location = response.json()
geographic_data = get_addresses(location)
address = geographic_data["address"]
coordinates = geographic_data["coordinates"]
gas_prices = parse_gas_prices(location)
amenities = parse_amenities(location)
this_location_output = {
"has_menu": location["hasMenu"],
"last_updated": datetime.datetime.strptime(location["lastUpdated"], "%m/%d/%Y %I:%M %p"),
"location_id": location["locationID"],
"open_24_hours": location["open24Hours"],
"regional_director": location["regionalDirector"],
"store_close": location["storeClose"],
"store_name": location["storeName"],
"store_number": location["storeNumber"],
"store_open": location["storeOpen"],
"telephone": location["telephone"]
}
this_location_output = {**this_location_output, **address}
this_location_output = {**this_location_output, **coordinates}
this_location_output = {**this_location_output, **gas_prices}
this_location_output = {**this_location_output, **amenities}
output.append(this_location_output)
if limit and len(output) == limit:
break
return output
|
filiperodriguez/sylvanian | sylvanian_family/config.py | Python | mit | 262 | 0.003817 | # config
#configure our database
class Configuration(object):
DATABASE = | {
'name': 'sylvanian',
'engine': 'peewee.MySQLDatabase',
'user': 'sylvanian',
'passwd': '68zL7VeS0W',
}
DEBUG = True
| SECRET_KEY = 'ssshhhh'
|
teddywing/pubnub-python | python/examples/subscribe_group.py | Python | mit | 1,925 | 0.007273 | ## www.pubnub.com - PubNub Real-time push service in the cloud.
# coding=utf8
## PubNub Real-time Push APIs and Notifications Framework
## Copyright (c) 2010 Stephen Blum
## http://www.pubnub.com/
import sys
from pubnub import Pubnub as Pubnub
publish_key = len(sys.argv) > 1 and sys.argv[1] or 'demo'
subscribe_key = len(sys.argv) > 2 and sys.argv[2] or 'demo'
secret_key = len(sys.argv) > 3 and sys.argv[3] or 'demo'
cipher_key = len(sys.argv) > 4 and sys.argv[4] or 'abcd'
ssl_on = len(sys.argv) > 5 and bool(sys.argv[5]) or False
## -----------------------------------------------------------------------
## Initiate Pubnub State
## -----------------------------------------------------------------------
pubnub = Pubnub(publish_key=publish_key, subscribe_key=subscribe_key,
secret_key=secret_key, cipher_key=cipher_key, ssl_on=ssl_on)
channel = 'ab'
# Asynchronous usage
def callback_abc(message, channel, real_channel):
print(str(message) + ' , ' + channel + ', ' + real_channel)
pubnub.unsubscribe_group(channel_group='abc')
#pubnub.stop()
def callback_d(message, channel):
print(str(message) + ' , ' + channel)
def error(message):
print("ERROR : " + str(message))
def connect_abc(message):
print("CONNECTED " + str(message))
def connect_d(message):
print("CONNECTED " + str(message))
pubnub.unsubscribe(channel='d')
def reconnect(message):
print("RECONNECTED " + str(message))
def disconnect(message):
print("DISCONNECTED " + str(message))
print pubnub.channel_group_add_channel(channel_group='abc', channel="b")
pubnub.subscribe_group(channel_groups='a | bc', callback=callback_abc, error=error,
connect=connect_abc, reconnect=reconnect, disconnect=disconnect)
pubnub.subscribe(channels='d', callb | ack=callback_d, error=error,
connect=connect_d, reconnect=reconnect, disconnect=disconnect)
pubnub.start()
|
CloudVLab/professional-services | tools/asset-inventory/asset_inventory/export.py | Python | apache-2.0 | 7,690 | 0.00117 | #!/usr/bin/env python
#
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Invokes Asset Inventory API to export resources, and IAM policies.
For more information on the Cloud Asset Inventory API see:
https://cloud.google.com/resource-manager/docs/cloud-asset-inventory/overview
"""
from __future__ import print_function
import argparse
import logging
import pprint
from concurrent import futures
from google.cloud.exceptions import GoogleCloudError
from google.cloud import asset_v1
class Clients(object):
"""Holds API client objects."""
_cloudasset = None
@classmethod
def cloudasset(cls):
if cls._cloudasset:
return cls._cloudasset
cls._cloudasset = asset_v1.AssetServiceClient()
return cls._cloudasset
def export_to_gcs(parent, gcs_destination, content_type, asset_types):
"""Exports assets to GCS destination.
Invoke either the cloudasset.organizations.exportAssets or
cloudasset.projects.exportAssets method depending on if parent is a project
or orgniaztion.
Args:
parent: Either `project/<project-id>` or `organization/<organization#>`.
gcs_destination: GCS uri to export to.
content_type: Either `RESOURCE` or `IAM_POLICY` or
None/`CONTENT_TYPE_UNSPECIFIED` for just asset names.
asset_types: None for all asset types or a list of asset names to
export.
Returns:
The result of the successfully completed export operation.
"""
output_config | = asset_v1.types.OutputConfig()
output_config.gcs_destination.uri = gcs_destination
operation = Clients.cloudasset().export_assets(
parent,
output_config,
content_type=content_type,
asset_types=asset_types)
return operation.result()
def export_to_gcs_content_types(parent, gcs_destination, content_types,
| asset_types):
"""Export each asset type into a GCS object with the GCS prefix.
Will call `export_to_gcs concurrently` to perform an export, once for each
content_type.
Args:
parent: Project id or organization number.
gcs_destination: GCS object prefix to export to (gs://bucket/prefix)
content_types: List of [RESOURCE, NAME, IAM_POLICY, NAME] to export.
Defaults to [RESOURCE, NAME, IAM_POLICY]
asset_types: List of asset_types to export. Supply `None` to get
everything.
Returns:
A dict of content_types and export result objects.
"""
logging.info('performing export from %s to %s of content_types %s',
parent, gcs_destination, str(content_types))
if asset_types == ['*']:
asset_types = None
if content_types is None:
content_types = ['RESOURCE', 'IAM_POLICY']
with futures.ThreadPoolExecutor(max_workers=3) as executor:
export_futures = {
executor.submit(export_to_gcs, parent, '{}/{}.json'.format(
gcs_destination, content_type), content_type, asset_types):
content_type
for content_type in content_types
}
operation_results = {}
for future in futures.as_completed(export_futures):
try:
content_type = export_futures[future]
operation_results[content_type] = future.result()
except GoogleCloudError:
content_type = export_futures[future]
logging.exception('Error exporting %s', content_type)
raise
logging.info('export results: %s', pprint.pformat(operation_results))
return operation_results
def add_argparse_args(ap, required=False):
"""Configure the `argparse.ArgumentParser`."""
ap.formatter_class = argparse.RawTextHelpFormatter
# pylint: disable=line-too-long
ap.description = (
'Exports google cloud organization or project assets '
'to a gcs bucket or bigquery. See:\n'
'https://cloud.google.com/resource-manager/docs/cloud-asset-inventory/overview\n\n'
'This MUST be run with a service account owned by a project with the '
'Cloud Asset API enabled. The gcloud generated user credentials'
' do not work. This requires:\n\n'
' 1. Enable the Cloud Asset Inventory API on a project (https://console.cloud.google.com/apis/api/cloudasset.googleapis.com/overview)\n'
' 2. Create a service acocunt owned by this project\n'
' 3. Give the service account roles/cloudasset.viewer at the organization layer\n'
' 4. Run on a GCE instance started with this service account,\n'
' or downloadthe private key and set GOOGLE_APPLICATION_CREDENTIALS to the file name\n'
' 5. Run this command.\n\n'
'If the GCS bucket being written to is owned by a different project then'
' the project that you enabled the API on, then you must also grant the'
' "service-<project-id>@gcp-sa-cloudasset.iam.gserviceaccount.com" account'
' objectAdmin privleges to the bucket:\n'
' gsutil iam ch serviceAccount:service-<project-id>@gcp-sa-cloudasset.iam.gserviceaccount.com:objectAdmin gs://<bucket>\n'
'\n\n')
ap.add_argument(
'--parent',
required=required,
help=('Organization number (organizations/123)'
'or project id (projects/id) or number (projects/123).'))
ap.add_argument(
'--gcs-destination', help='URL of the gcs file to write to.',
required=required)
def content_types_argument(string):
valid_content_types = [
'CONTENT_TYPE_UNSPECIFIED', 'RESOURCE', 'IAM_POLICY'
]
content_types = [x.strip() for x in string.split(',')]
for content_type in content_types:
if content_type not in valid_content_types:
raise argparse.ArgumentTypeError(
'invalid content_type {}'.format(content_type))
return content_types
ap.add_argument(
'--content-types',
help=('Type content to output for each asset a comma seperated list '
' of `CONTENT_TYPE_UNSPECIFIED`, `RESOURCE`, `IAM_POLICY` '
'defaults to `RESOURCE, IAM_POLICY`.'),
type=content_types_argument,
default='RESOURCE, IAM_POLICY',
nargs='?')
ap.add_argument(
'--asset-types',
help=('Comma seprated list of asset types to export such as '
'"google.compute.Firewall,google.compute.HealthCheck"'
' default is `*` for everything'),
type=lambda x: [y.strip() for y in x.split(',')],
nargs='?')
def main():
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
ap = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
add_argparse_args(ap, required=True)
args = ap.parse_args()
logging.info('Exporting assets.')
export_result = export_to_gcs_content_types(
args.parent,
args.gcs_destination,
args.content_types,
asset_types=args.asset_types.split(',') if args.asset_types else None)
logging.info('Export results %s.', pprint.pformat(export_result))
if __name__ == '__main__':
main()
|
subho007/androguard | tests/test_ins.py | Python | apache-2.0 | 5,377 | 0.015064 | #!/usr/bin/env python
# This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
import sys, re
PATH_INSTALL = "./"
sys.path.append(PATH_INSTALL)
from androguard.core.androgen import AndroguardS
from androguard.core.analysis import analysis
TESTS_CASES = [ #'examples/android/TC/bin/classes.dex',
'examples/android/TestsAndroguard/bin/classes.dex',
]
VALUES = {
'examples/android/TestsAndroguard/bin/classes.dex' : {
"Ltests/androguard/TestInvoke; <init> ()V" : {
0x0 : ("invoke-direct" , [['v',1] , ['meth@', 4, 'Ljava/lang/Object;', '()', 'V', '<init>']]),
0xa : ("invoke-virtual", [['v',1], ['v',0] , ['meth@', 49, 'Ltests/androguard/TestInvoke;', '(I)', 'I', 'TestInvoke1']]),
},
"Ltests/androguard/TestInvoke; TestInvoke1 (I)I" : {
0x4 : ("invoke-virtual", [['v',1] , ['v',2] , ['v',0] , ['meth@', 50,'Ltests/androguard/TestInvoke;' ,'(I I)', 'I', 'TestInvoke2']]),
},
"Ltests/androguard/TestInvoke; TestInvoke2 (I I)I" : {
0x4 : ("invoke-virtual", [['v',1] , ['v',2] , ['v',3] , ['v',0] , ['meth@', 51, 'Ltests/androguard/TestInvoke;', '(I I I)', 'I', 'TestInvoke3']]),
},
"Ltests/androguard/TestInvoke; TestInvoke3 (I I I)I" : {
0x4 : ("invoke-virtual", [['v', 1], ['v', 2], ['v', 3], ['v', 4], ['v', 0], ['meth@', 52, 'Ltests/androguard/TestInvoke;', '(I I I I)', 'I', 'TestInvoke4']]),
},
"Ltests/androguard/TestInvoke; TestInvoke4 (I I I I)I" | : {
0xe : ("invoke-virtual/range", [['v', 0], ['v', 1], ['v', 2], ['v', 3], ['v', 4], ['v', 5], ['meth@', 53, | 'Ltests/androguard/TestInvoke;', '(I I I I I)', 'I', 'TestInvoke5']]),
},
"Ltests/androguard/TestInvoke; TestInvoke5 (I I I I I)I" : {
0x10 : ("invoke-virtual/range", [['v', 0], ['v', 1], ['v', 2], ['v', 3], ['v', 4], ['v', 5], ['v', 6], ['meth@', 54, 'Ltests/androguard/TestInvoke;', '(I I I I I I)', 'I', 'TestInvoke6']]),
},
"Ltests/androguard/TestInvoke; TestInvoke6 (I I I I I I)I" : {
0x12 : ("invoke-virtual/range", [['v', 0], ['v', 1], ['v', 2], ['v', 3], ['v', 4], ['v', 5], ['v', 6], ['v', 7], ['meth@', 55, 'Ltests/androguard/TestInvoke;', '(I I I I I I I)', 'I', 'TestInvoke7']]),
},
"Ltests/androguard/TestInvoke; TestInvoke7 (I I I I I I I)I" : {
0x16 : ("invoke-virtual/range", [['v', 0], ['v', 1], ['v', 2], ['v', 3], ['v', 4], ['v', 5], ['v', 6], ['v', 7], ['v', 8], ['meth@', 56, 'Ltests/androguard/TestInvoke;', '(I I I I I I I I)', 'I', 'TestInvoke8']]),
},
"Ltests/androguard/TestInvoke; TestInvoke8 (I I I I I I I I)I" : {
0x0 : ("mul-int", [['v', 0], ['v', 2], ['v', 3]]),
0x4 : ("mul-int/2addr", [['v', 0], ['v', 4]]),
0x10 : ("return", [['v', 0]]),
}
},
}
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print('\t%s got: %s expected: %s' % (prefix, repr(got), repr(expected)))
def getVal(i):
op = i.get_operands()
if isinstance(op, int):
return [ op ]
elif i.get_name() == "lookupswitch":
x = []
x.append( i.get_operands().default )
for idx in range(0, i.get_operands().npairs):
off = getattr(i.get_operands(), "offset%d" % idx)
x.append( off )
return x
return [-1]
def check(a, values):
for method in a.get_methods():
key = method.get_class_name() + " " + method.get_name() + " " + method.get_descriptor()
if key not in values:
continue
print("CHECKING ...", method.get_class_name(), method.get_name(), method.get_descriptor())
code = method.get_code()
bc = code.get_bc()
idx = 0
for i in bc.get():
# print "\t", "%x(%d)" % (idx, idx), i.get_name(), i.get_operands()
if idx in values[key]:
elem = values[key][idx]
val1 = i.get_name() + "%s" % i.get_operands()
val2 = elem[0] + "%s" % elem[1]
test(val1, val2)
del values[key][idx]
idx += i.get_length()
for i in TESTS_CASES:
a = AndroguardS( i )
check( a, VALUES[i] )
x = analysis.VMAnalysis( a.get_vm() )
print(x)
|
interfax/interfax-python | tests/test_files.py | Python | mit | 3,313 | 0.000302 | from mimetypes import guess_extension
from uuid import UUID
from interfax.files import File, Files
from interfax.response import Document
try:
from unittest.mock import Mock, patch, call
except ImportError:
from mock import Mock, patch, call
class TestFiles(object):
def setup_method(self, method):
self.client = Mock()
self.files = Files(self.client)
def teardown_method(self, method):
del self.client
del self.files
def test___init__(self):
assert self.files.client == self.client
def test_create(self, fake):
data = fake.pystr()
kwargs = fake.pydict()
with patch('interfax.files.File') as f:
self.files.create(data, **kwargs)
f.assert_called_with(self.client, data, **kwargs)
class TestFile(object):
def setup_method(self, method):
self.client = Mock()
def teardown_method(self, method):
del self.client
def test_with_binary(self, fake):
data = fake.binary(512 * 1024)
mime_type = fake.mime_type()
f = File(self.client, data, mime_type=mime_type)
assert 'Content-Location' not in f.headers
| assert f.mime_type == mime_type
assert f.body == data
assert f.file_tuple() == (None, data, mime_type, None)
def test_with_uri(self, fake):
data = fake.uri()
f = File(self.client, data)
assert f.headers == {'Content-Location': data}
| assert f.mime_type is None
assert f.body is None
assert f.file_tuple() == (None, '', None, {'Content-Location': data})
def test_with_path(self, fake):
data = './tests/test.pdf'
f = File(self.client, data)
with open(data, 'rb') as fp:
content = fp.read()
assert 'Content-Location' not in f.headers
assert f.mime_type == 'application/pdf'
assert f.body == content
assert f.file_tuple() == (None, content, 'application/pdf', None)
def test_with_large_file(self, fake):
data = fake.binary()
mime_type = fake.mime_type()
chunk_size = fake.random_int(len(data) // 20, len(data) // 2)
document = Document(self.client, {
'uri': 'https://rest.interfax.net/outbound/documents/{0}'.format(
fake.random_number())
})
self.client.documents.create.return_value = document
with patch('interfax.files.uuid4') as m:
m.return_value = UUID('8fbaaaaf-87bb-4bd0-9d82-823c3eb38e49')
f = File(self.client, data, mime_type=mime_type,
chunk_size=chunk_size)
assert f.headers == {'Content-Location': document.uri}
assert f.file_tuple() == (None, '', None, {
'Content-Location': document.uri
})
filename = 'upload-8fbaaaaf-87bb-4bd0-9d82-823c3eb38e49{0}'.format(
guess_extension(mime_type)
)
calls = [call.create(filename, len(data))]
cursor = 0
while cursor < len(data):
chunk = data[cursor:cursor + chunk_size]
calls.append(call.upload(document.id, cursor,
cursor + len(chunk) - 1, chunk))
cursor += len(chunk)
self.client.documents.assert_has_calls(calls)
|
unnikrishnankgs/va | venv/lib/python3.5/site-packages/nbformat/v4/tests/test_json.py | Python | bsd-2-clause | 3,764 | 0.00186 | from base64 import decodestring
import json
from unittest import TestCase
from ipython_genutils.py3compat import unicode_type
from ..nbjson import reads, writes
from .. import nbjson
from .nbexamples import nb0
from . import formattest
class TestJSON(formattest.NBFormatTest, TestCase):
nb0_ref = None
ext = 'ipynb'
mod = nbjson
def test_roundtrip_nosplit(self):
"""Ensure that multiline blobs are still readable"""
# ensures that notebooks written prior to splitlines change
# are still readable.
s = writes(nb0, split_lines=False)
self.assertEqual(nbjson.reads(s),nb0)
def test_roundtrip_split(self):
"""Ensure that splitting multiline blocks is safe"""
# This won't differ from test_roundtrip unless the default changes
s = writes(nb0, split_lines=True)
self.assertEqual(nbjson.reads(s),nb0)
def test_splitlines(self):
"""Test splitlines in mime-bundles"""
s = writes(nb0, split_lines=True)
raw_nb = json.loads(s)
for i, ref_cell in enumerate(nb0.cells):
if ref_cell.source.strip() == 'Cell with attachments':
attach_ref = ref_cell['attachments']['attachment1']
attach_json = raw_nb['cells'][i]['attachments']['attachment1']
if ref_cell.source.strip() == 'json_outputs()':
output_ref = ref_cell['outputs'][0]['data']
output_json = raw_nb['cells'][i]['outputs'][0]['data']
for key, json_value in attach_json.items():
if key == 'text/plain':
# text should be split
assert json_value == attach_ref['text/plain'].splitlines(True)
else:
# JSON attachments
assert json_value == attach_ref[key]
# check that JSON outputs are left alone:
for key, json_value in output_json.items():
if key == 'text/plain':
# text should be split
assert json_value == output_ref['text/plain'].splitlines(True)
else:
| # JSON outputs should be left alone
assert json_value == output_ref[key]
def test_read_png(self):
"""PNG output data is b64 unicode"""
s = writes(nb0)
nb1 = nbjson.reads(s)
found_png = False
for cell in nb1.cells:
| if not 'outputs' in cell:
continue
for output in cell.outputs:
if not 'data' in output:
continue
if 'image/png' in output.data:
found_png = True
pngdata = output.data['image/png']
self.assertEqual(type(pngdata), unicode_type)
# test that it is valid b64 data
b64bytes = pngdata.encode('ascii')
raw_bytes = decodestring(b64bytes)
assert found_png, "never found png output"
def test_read_jpeg(self):
"""JPEG output data is b64 unicode"""
s = writes(nb0)
nb1 = nbjson.reads(s)
found_jpeg = False
for cell in nb1.cells:
if not 'outputs' in cell:
continue
for output in cell.outputs:
if not 'data' in output:
continue
if 'image/jpeg' in output.data:
found_jpeg = True
jpegdata = output.data['image/jpeg']
self.assertEqual(type(jpegdata), unicode_type)
# test that it is valid b64 data
b64bytes = jpegdata.encode('ascii')
raw_bytes = decodestring(b64bytes)
assert found_jpeg, "never found jpeg output"
|
vilemnovak/blendercam | scripts/addons/cam/utils.py | Python | gpl-2.0 | 65,143 | 0.003684 | # blender CAM utils.py (c) 2012 Vilem Novak
#
# ***** BEGIN GPL LICENSE BLOCK *****
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
# here is the main functionality of Blender CAM. The functions here are called with operators defined in ops.py.
import bpy
import time
import mathutils
import math
from math import *
from mathutils import *
from bpy.props import *
from bpy_extras import object_utils
import sys, numpy,pickle
from cam.chunk import *
from cam.collision import *
from cam.simple import *
from cam.pattern import *
from cam.polygon_utils_cam import *
from cam.image_utils import *
from cam.opencamlib.opencamlib import oclSample, oclSamplePoints, oclResampleChunks, oclGetWaterline
from shapely.geometry import polygon as spolygon
from shapely import ops as sops
from shapely import geometry as sgeometry
# from shapely.geometry import * not possible until Polygon libs gets out finally..
SHAPELY = True
def positionObject(operation):
ob = bpy.data.objects[operation.object_name]
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS')
ob.select_set(True)
bpy.context.view_layer.objects.active = ob
minx, miny, minz, maxx, maxy, maxz = getBoundsWorldspace([ob], operation.use_modifiers)
totx=maxx-minx
toty=maxy-miny
totz=maxz-minz
if operation.material_center_x:
ob.location.x -= minx +totx/2
else:
ob.location.x -= minx
if operation.material_center_y:
ob.location.y -= miny +toty/2
else:
ob.location.y -= miny
if operation.material_Z== 'BELOW':
ob.location.z -= maxz
elif operation.material_Z == 'ABOVE':
ob.location.z -= minz
elif operation.material_Z == 'CENTERED':
ob.location.z -= minz +totz/2
if ob.type != 'CURVE':
bpy.ops.object.transform_apply(location=True, rotation=False, scale=False)
#addMaterialAreaObject()
def getBoundsWorldspace(obs, use_modifiers=False):
# progress('getting bounds of object(s)')
t = time.time()
maxx = maxy = maxz = -10000000
minx = miny = minz = 10000000
for ob in obs:
# bb=ob.bound_box
mw = ob.matrix_world
if ob.type == 'MESH':
if use_modifiers:
depsgraph = bpy.context.evaluated_depsgraph_get()
mesh_owner = ob.evaluated_get(depsgraph)
mesh = mesh_owner.to_mesh()
else:
mesh = ob.data
for c in mesh.vertices:
coord = c.co
worldCoord = mw @ Vector((coord[0], coord[1], coord[2]))
minx = min(minx, worldCoord.x)
miny = min(miny, worldCoord.y)
minz = min(minz, worldCoord.z)
maxx = max(maxx, worldCoord.x)
maxy = max(maxy, worldCoord.y)
maxz = max(maxz, worldCoord.z)
if use_modifiers:
mesh_owner.to_mesh_clear()
elif ob.type == "FONT":
activate(ob)
bpy.ops.object.duplicate()
co = bpy.context.active_object
bpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')
bpy.ops.object.convert(target='MESH', keep_original=False)
mesh = co.data
for c in mesh.vertices:
coord = c.co
worldCoord = mw @ Vector((coord[0], coord[1], coord[2]))
minx = min(minx, worldCoord.x)
miny = min(miny, worldCoord.y)
minz = min(minz, worldCoord.z)
maxx = max(maxx, worldCoord.x)
maxy = max(maxy, worldCoord.y)
maxz = max(maxz, worldCoord.z)
bpy.ops.object.delete()
bpy.ops.outliner.orphans_purge()
else:
# for coord in bb:
for c in ob.data.splines:
for p in c.bezier_points:
coord = p.co
# this can work badly with some imported curves, don't know why...
# worldCoord = mw * Vector((coord[0]/ob.scale.x, coord[1]/ob.scale.y, coord[2]/ob.scale.z))
worldCoord = mw @ Vector((coord[0], coord[1], coord[2]))
minx = min(minx, worldCoord.x)
miny = min(miny, worldCoord.y)
minz = min(minz, worldCoord.z)
maxx = max(maxx, worldCoord.x)
maxy = max(maxy, worldCoord.y)
maxz = max(maxz, worldCoord.z)
for p in c.points:
coord = p.co
# this can work badly with some imported curves, don't know why...
# worldCoord = mw * Vector((coord[0]/ob.scale.x, coord[1]/ob.scale.y, coord[2]/ob.scale.z))
worldCoord = mw @ Vector((coord[0], coord[1], coord[2]))
minx = min(minx, worldCoord.x)
miny = min(miny, worldCoord.y)
minz = min(minz, worldCoord.z)
maxx = max(maxx, worldCoord.x)
maxy = max(maxy, worldCoord.y)
maxz = max(maxz, worldCoord.z)
# progress(time.time()-t)
return minx, miny, minz, maxx, maxy, maxz
def getSplineBounds(ob, curve):
# progress('getting bounds of object(s)')
maxx = maxy = maxz = -10000000
minx = miny = minz = 10000000
mw = ob.matrix_world
for p in curve.bezier_points:
coord = p.co
# this can work badly with some imported curves, don't know why...
# worldCoord = mw * Vector((coord[0]/ob.scale.x, coord[1]/ob.scale.y, coord[2]/ob.scale.z))
worldCoord = mw @ Vector((coord[0], coord[1], coord[2]))
minx = min(minx, worldCoord.x)
miny = min(miny, worldCoord.y)
minz = min(minz, worldCoord.z)
maxx = max(maxx, worldCoord.x)
maxy = max(maxy, worldCoord.y)
maxz = max(maxz, worldCoord.z)
for p in curve.points:
coord = p.co
# this can work badly with some imported curves, don't know why...
# worldCoord = mw * Vector((coord[0]/ob.scale.x, coord[1]/ob. | scale.y, coord[2]/ob.scale.z))
worldCoord = mw @ Vector((coord[0], coord[1], coord[2]))
minx = min(minx, worldCoord.x)
miny = min(miny, worldCoord.y)
minz = min(minz, worldCoord.z)
maxx = max(maxx, worldCoord.x)
maxy = max(maxy, worldCoord.y)
maxz = max(maxz, worldCoord.z)
# progress(time.time()-t)
re | turn minx, miny, minz, maxx, maxy, maxz
def getOperationSources(o):
if o.geometry_source == 'OBJECT':
# bpy.ops.object.select_all(action='DESELECT')
ob = bpy.data.objects[o.object_name]
o.objects = [ob]
ob.select_set(True)
bpy.context.view_layer.objects.active = ob
if o.enable_B or o.enable_A:
if o.old_rotation_A != o.rotation_A or o.old_rotation_B != o.rotation_B:
o.old_rotation_A = o.rotation_A
o.old_rotation_B = o.rotation_B
ob=bpy.data.objects[o.object_name]
ob.select_set(True)
bpy.context.view_layer.objects.active = ob
if o.A_along_x : #A parallel with X
if o.enable_A:
bpy.context.active_object.rotation_euler.x = o.rotation_A
if o.enable_B:
bpy.context.active_object.rotation_euler.y = o.rotation_B
else : #A p |
shakamunyi/sahara | sahara/db/migration/alembic_migrations/versions/026_add_is_public_is_protected.py | Python | apache-2.0 | 2,335 | 0.000857 | # Copyright 2015 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add is_public and is_protected flags
Revision ID: 026
Revises: 025
Create Date: 2015-06-24 12:41:52.571258
"""
# revision identifiers, used by Alembic.
revision = '026'
down_revision = '025'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('clusters',
sa.Column('is_public', sa.Boolean()),)
op.add_column('cluster_templates',
sa.Column('is_public', sa.Boolean()))
op.add_column('node_group_templates',
sa.Column('is_public', sa.Boolean()))
op.add_column('data_sources',
sa.Column('is_public', sa.Boolean()))
op.add_column('job_executions',
| sa.Column('is_public', sa.Boolean()))
op.add_column('jobs',
sa.Column('is_public', sa.Boolean()))
op.add_column('job_binary_internal',
sa.Column('is_public', sa.Boolean()))
op.add_column('job_binaries',
sa.Column('is_public', sa.Boolean()))
op.add_column('clusters',
sa.Column('is_protected', sa.Boolean()))
op.add_column('cluster_templates',
| sa.Column('is_protected', sa.Boolean()))
op.add_column('node_group_templates',
sa.Column('is_protected', sa.Boolean()))
op.add_column('data_sources',
sa.Column('is_protected', sa.Boolean()))
op.add_column('job_executions',
sa.Column('is_protected', sa.Boolean()))
op.add_column('jobs',
sa.Column('is_protected', sa.Boolean()))
op.add_column('job_binary_internal',
sa.Column('is_protected', sa.Boolean()))
op.add_column('job_binaries',
sa.Column('is_protected', sa.Boolean()))
|
JackDanger/sentry | src/sentry/web/forms/edit_organization_member.py | Python | bsd-3-clause | 741 | 0.00135 | from __future__ import absolute_import
from sentry.models import (
AuditLogEntry,
AuditLo | gEntryEvent,
)
from sentry.web.forms.base_organization_member import BaseOrganizationMemberForm
class EditOrganizationMemberForm(BaseOrganizationMemberForm):
def save(self, actor, organization, ip_address=None):
om = super(EditOrganizationMemberForm, self).save()
self.save_team_assignments(om)
AuditLogEntry.objects.create(
organization=organization,
actor=actor,
ip_address=ip_address,
target_object=om.id,
| target_user=om.user,
event=AuditLogEntryEvent.MEMBER_EDIT,
data=om.get_audit_log_data(),
)
return om
|
xyfeng/average_portrait | face_swap.py | Python | mit | 6,296 | 0.002065 | """
This is the code behind the Switching Eds blog post:
http://matthewearl.github.io/2015/07/28/switching-eds-with-python/
See the above for an explanation of the code below.
To run the script you'll need to install dlib (http://dlib.net) including its
Python bindings, and OpenCV. You'll also need to obtain the trained model from
sourceforge:
http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
Unzip with `bunzip2` and change `PREDICTOR_PATH` to refer to this file. The
script is run like so:
./faceswap.py <head image> <face image>
If successful, a file `output.jpg` will be produced with the facial features
from `<head image>` replaced with the facial features from `<face image>`.
"""
import cv2
import dlib
import numpy
import sys
PREDICTOR_PATH = "landmarks/shape_predictor_68_face_landmarks.dat"
SCALE_FACTOR = 1
FEATHER_AMOUNT = 11
FACE_POINTS = list(range(17, 68))
MOUTH_POINTS = list(range(48, 61))
RIGHT_BROW_POINTS = list(range(17, 22))
LEFT_BROW_POINTS = list(range(22, 27))
RIGHT_EYE_POINTS = list(range(36, 42))
LEFT_EYE_POINTS = list(range(42, 48))
NOSE_POINTS = list(range(27, 35))
JAW_POINTS = list(range(0, 17))
# Points used to line up the images.
ALIGN_POINTS = (LEFT_BROW_POINTS + RIGHT_EYE_POINTS + LEFT_EYE_POINTS +
RIGHT_BROW_POINTS + NOSE_POINTS + MOUTH_POINTS)
# Points from the second image to overlay on the first. The convex hull of each
# element will be overlaid.
OVERLAY_POINTS = [
LEFT_EYE_POINTS + RIGHT_EYE_POINTS + LEFT_BROW_POINTS + RIGHT_BROW_POINTS,
NOSE_POINTS + MOUTH_POINTS,
]
# Amount of blur to use during colour correction, as a fraction of the
# pupillary distance.
COLOUR_CORRECT_BLUR_FRAC = 0.6
detecto | r = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(PREDICTOR_PATH)
class TooManyFaces(Exception):
pass
class NoFaces(Exception):
pass
def get_landmarks(im):
rects = detector(im, 1)
if len(rects) > 1:
raise TooManyFaces
if len(rects) == 0:
raise NoFaces
return numpy.matrix([[p.x, p.y] for p in predictor(im, rects[0]).parts()])
d | ef annotate_landmarks(im, landmarks):
im = im.copy()
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
cv2.putText(im, str(idx), pos,
fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
fontScale=0.4,
color=(0, 0, 255))
cv2.circle(im, pos, 3, color=(0, 255, 255))
return im
def draw_convex_hull(im, points, color):
points = cv2.convexHull(points)
cv2.fillConvexPoly(im, points, color=color)
def get_face_mask(im, landmarks):
im = numpy.zeros(im.shape[:2], dtype=numpy.float64)
for group in OVERLAY_POINTS:
draw_convex_hull(im,
landmarks[group],
color=1)
im = numpy.array([im, im, im]).transpose((1, 2, 0))
im = (cv2.GaussianBlur(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0) > 0) * 1.0
im = cv2.GaussianBlur(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0)
return im
def transformation_from_points(points1, points2):
"""
Return an affine transformation [s * R | T] such that:
sum ||s*R*p1,i + T - p2,i||^2
is minimized.
"""
# Solve the procrustes problem by subtracting centroids, scaling by the
# standard deviation, and then using the SVD to calculate the rotation. See
# the following for more details:
# https://en.wikipedia.org/wiki/Orthogonal_Procrustes_problem
points1 = points1.astype(numpy.float64)
points2 = points2.astype(numpy.float64)
c1 = numpy.mean(points1, axis=0)
c2 = numpy.mean(points2, axis=0)
points1 -= c1
points2 -= c2
s1 = numpy.std(points1)
s2 = numpy.std(points2)
points1 /= s1
points2 /= s2
U, S, Vt = numpy.linalg.svd(points1.T * points2)
# The R we seek is in fact the transpose of the one given by U * Vt. This
# is because the above formulation assumes the matrix goes on the right
# (with row vectors) where as our solution requires the matrix to be on the
# left (with column vectors).
R = (U * Vt).T
return numpy.vstack([numpy.hstack(((s2 / s1) * R,
c2.T - (s2 / s1) * R * c1.T)),
numpy.matrix([0., 0., 1.])])
def read_im_and_landmarks(fname):
im = cv2.imread(fname, cv2.IMREAD_COLOR)
im = cv2.resize(im, (im.shape[1] * SCALE_FACTOR,
im.shape[0] * SCALE_FACTOR))
s = get_landmarks(im)
return im, s
def warp_im(im, M, dshape):
output_im = numpy.zeros(dshape, dtype=im.dtype)
cv2.warpAffine(im,
M[:2],
(dshape[1], dshape[0]),
dst=output_im,
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return output_im
def correct_colours(im1, im2, landmarks1):
blur_amount = COLOUR_CORRECT_BLUR_FRAC * numpy.linalg.norm(
numpy.mean(landmarks1[LEFT_EYE_POINTS], axis=0) -
numpy.mean(landmarks1[RIGHT_EYE_POINTS], axis=0))
blur_amount = int(blur_amount)
if blur_amount % 2 == 0:
blur_amount += 1
im1_blur = cv2.GaussianBlur(im1, (blur_amount, blur_amount), 0)
im2_blur = cv2.GaussianBlur(im2, (blur_amount, blur_amount), 0)
# Avoid divide-by-zero errors.
im2_blur += 128 * (im2_blur <= 1.0)
return (im2.astype(numpy.float64) * im1_blur.astype(numpy.float64) /
im2_blur.astype(numpy.float64))
im1, landmarks1 = read_im_and_landmarks(sys.argv[1])
im2, landmarks2 = read_im_and_landmarks(sys.argv[2])
M = transformation_from_points(landmarks1[ALIGN_POINTS],
landmarks2[ALIGN_POINTS])
mask = get_face_mask(im2, landmarks2)
warped_mask = warp_im(mask, M, im1.shape)
combined_mask = numpy.max([get_face_mask(im1, landmarks1), warped_mask],
axis=0)
warped_im2 = warp_im(im2, M, im1.shape)
warped_corrected_im2 = correct_colours(im1, warped_im2, landmarks1)
output_im = im1 * (1.0 - combined_mask) + warped_corrected_im2 * combined_mask
cv2.imwrite('result_swap.jpg', output_im)
|
potatolondon/djangoappengine-1-4 | db/compiler.py | Python | bsd-3-clause | 22,103 | 0.001086 | from functools import wraps
import sys
import logging
from django.db.models.fields import AutoField
from django.db.models.sql import aggregates as sqlaggregates
from django.db.models.sql.constants import LOOKUP_SEP, MULTI, SINGLE
from django.db.models.sql.where import AND, OR
from django.db.utils import DatabaseError, IntegrityError
from django.utils.tree import Node
from django.db.models.sql.compiler import MULTI, empty_iter
from google.appengine.api.datastore import Entity, Query, MultiQuery, \
Put, Get, Delete
from google.appengine.api.datastore_errors import Error as GAEError
from google.appengine.api.datastore_types import Key, Text
from google.appengine.ext import db
from djangotoolbox.db.basecompiler import (
NonrelQuery,
NonrelCompiler,
NonrelInsertCompiler,
NonrelUpdateCompiler,
NonrelDeleteCompiler)
from .base import InvalidGaeKey
from .db_settings import get_model_indexes
from .expressions import ExpressionEvaluator
from .utils import commit_locked
from ..fields import AncestorKey
# Valid query types (a dictionary is used for speedy lookups).
OPERATORS_MAP = {
'exact': '=',
'gt': '>',
'gte': '>=',
'lt': '<',
'lte': '<=',
# The following operators are supported with special code below.
'isnull': None,
'in': None,
'startswith': None,
'range': None,
'year': None,
}
# GAE filters used for negated Django lookups.
NEGATION_MAP = {
'gt': '<=',
'gte': '<',
'lt': '>=',
'lte': '>',
# TODO: Support: "'exact': '!='" (it might actually become
# individual '<' and '>' queries).
}
# In some places None is an allowed value, and we need to distinguish
# it from the lack of value.
NOT_PROVIDED = object()
def safe_call(func):
"""
Causes the decorated function to reraise GAE datastore errors as
Django DatabaseErrors.
"""
@wraps(func)
def _func(*args, **kwargs):
try:
return func(*args, **kwargs)
except GAEError, e:
raise DatabaseError, DatabaseError(str(e)), sys.exc_info()[2]
return _func
class GAEQuery(NonrelQuery):
"""
A simple App Engine query: no joins, no distinct, etc.
"""
# ----------------------------------------------
# Public API
# ----------------------------------------------
def __init__(self, compiler, fields):
super(GAEQuery, self).__init__(compiler, fields)
self.inequality_field = None
self.included_pks = None
self.ancestor_key = None
self.excluded_pks = ()
self.has_negated_exact_filter = False
self.ordering = []
self.db_table = self.query.get_meta().db_table
self.pks_only = (len(fields) == 1 and fields[0].primary_key)
start_cursor = getattr(self.query, '_gae_start_cursor', None)
end_cursor = getattr(self.query, '_gae_end_cursor', None)
self.gae_query = [Query(self.db_table, keys_only=self.pks_only,
cursor=start_cursor, end_cursor=end_cursor)]
# This is needed for debugging.
def __repr__(self):
return '<GAEQuery: %r ORDER %r>' % (self.gae_query, self.ordering)
@safe_call
def fetch(self, low_mark=0, high_mark=None):
query = self._build_query()
executed = False
if self.excluded_pks and high_mark is not None:
high_mark += len(self.excluded_pks)
if self.included_pks is not None:
results = self.get_matching_pk(low_mark, high_mark)
else:
if high_mark is None:
kw = {}
if low_mark:
kw['offset'] = low_mark
results = query.Run(**kw)
executed = True
elif high_mark > low_mark:
results = query.Get(high_mark - low_mark, low_mark)
executed = True
else:
results = ()
for entity in results:
if isinstance(entity, Key):
key = entity
else:
key = entity.key()
if key in self.excluded_pks:
continue
yield self._make_entity(entity)
if executed and not isinstance(query, MultiQuery):
try:
self.query._gae_cursor = query.GetCompiledCursor()
except:
pass
@safe_call
def count(self, limit=NOT_PROVIDED):
if self.included_pks is not None:
return len(self.get_matching_pk(0, limit))
if self.excluded_pks:
return len(list(self.fetch(0, 2000)))
# The datastore's Count() method has a 'limit' kwarg, which has
# a default value (obviously). This value can be overridden to
# anything you like, and importantly can be overridden to
# unlimited by passing a value of None. Hence *this* method
# has a default value of NOT_PROVIDED, rather than a default
# value of None
kw = {}
if limit is not NOT_PROVIDED:
kw['limit'] = limit
return self._build_query().Count(**kw)
@safe_call
def delete(self):
if self.included_pks is not None:
keys = [key for key in self.included_pks if key is not None]
else:
keys = self.fetch()
keys = list(keys)
if keys:
Delete(keys)
@safe_call
def order_by(self, ordering):
# GAE doesn't have any kind of natural ordering?
if not isinstance(ordering, bool):
for field, ascending in ordering:
column = '__key__' if field.primary_key else field.column
direction = Query.ASCENDING if ascending else Query.DESCENDING
self.ordering.append((column, direction))
def _decode_child(self, child):
#HACKY: If this is an ancestor lookup, then just special case
#to return the ID, a special ancestor lookup, and the ancestor instance
constraint, lookup_type, annotation, value = child
if constraint.col == '__ancestor':
return ('id', 'ancestor', value)
try:
return super(GAEQuery, self)._decode_child(child)
except InvalidGaeKey:
if not self._negated:
raise
else:
raise DatabaseError("Invalid value for a key lookup on GAE.")
@safe_call
def add_filter(self, field, lookup_type, negated, value):
"""
This function is used by the default add_filters()
implementation.
"""
if lookup_type == 'ancestor':
self.ancestor_key = Key.from_path(value._meta.db_table, value.pk)
return
if lookup_type not in OPERATORS_MAP:
raise DatabaseError("Lookup type %r isn't supported." %
lookup_type)
# GAE does not let you store empty lists, so we can tell
# upfront that queriying for one will return nothing.
if value in ([], ()) and not negated:
self.included_pks = []
return
# Optimization: batch-get by key; this is only suitable for
# primary keys, not for anything that uses the key type.
if field.primary_key and lookup_type in ('exact', 'in'):
if self.included_pks is not None:
raise DatabaseError("You can't apply multiple AND "
"filters on the primary key. "
"Did you mean __in=[...]?")
| if not isinstance(value, (tuple, list)):
value = [value]
pks = [pk for pk in value if pk is not None]
if field.rel:
pks = [ Key.from_path(self.db_table, pk.id_or_name()) for pk in pks ]
if negated:
self.excluded_pks = pks
else:
| self.included_pks = pks
return
# We check for negation after lookup_type isnull because it
# simplifies the code. All following lookup_type checks assume
# that they're not negated.
if lookup_type == 'isnull':
if (negated and value) or not value:
|
RalfBarkow/Zettelkasten | venv/lib/python3.9/site-packages/pip/_vendor/distlib/compat.py | Python | gpl-3.0 | 41,408 | 0.002198 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2017 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import absolute_import
import os
import re
import sys
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
if sys.version_info[0] < 3: # pragma: no cover
from StringIO import StringIO
string_types = basestring,
text_type = unicode
from types import FileType as file_type
import __builtin__ as builtins
import ConfigParser as configparser
from ._backport import shutil
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit
from urllib import (urlretrieve, quote as _quote, unquote, url2pathname,
pathname2url, ContentTooShortError, splittype)
def quote(s):
if isinstance(s, unicode):
s = s.encode('utf-8')
return _quote(s)
import urllib2
from urllib2 import (Request, urlopen, URLError, HTTPError,
HTTPBasicAuthHandler, HTTPPasswordMgr,
HTTPHandler, HTTPRedirectHandler,
build_opener)
if ssl:
from urllib2 import HTTPSHandler
import httplib
import xmlrpclib
import Queue as queue
from HTMLParser import HTMLParser
import htmlentitydefs
raw_input = raw_input
from itertools import ifilter as filter
from itertools import ifilterfalse as filterfalse
_userprog = None
def splituser(host):
"""splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
global _userprog
if _userprog is None:
import re
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match: return match.group(1, 2)
return None, host
else: # pragma: no cover
from io import StringIO
string_types = str,
text_type = str
from io import TextIOWrapper as file_type
import builtins
import configparser
import shutil
from urllib.parse import (urlparse, urlunparse, urljoin, splituser, quote,
unquote, urlsplit, urlunsplit, splittype)
from urllib.request import (urlopen, urlretrieve, Request, url2pathname,
pathname2url,
HTTPBasicAuthHandler, HTTPPasswordMgr,
HTTPHandler, HTTPRedirectHandler,
build_opener)
if ssl:
from urllib.request import HTTPSHandler
from urllib.error import HTTPError, URLError, ContentTooShortError
import http.client as httplib
import urllib.request as urllib2
import xmlrpc.client as xmlrpclib
import queue
from html.parser import HTMLParser
import html.entities as htmlentitydefs
raw_input = input
from itertools import filterfalse
filter = filter
try:
from ssl import match_hostname, CertificateError
except ImportError: # pragma: no cover
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
parts = dn.split('.')
leftmost, remainder = parts[0], parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.starts | with('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(lef | tmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
try:
from types import SimpleNamespace as Container
except ImportError: # pragma: no cover
class Container(object):
"""
A generic container for when multiple values need to be returned
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
try:
from shutil import which
except ImportError: # pragma: no cover
# Implementation from Python 3.3
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given |
yannrouillard/weboob | modules/ina/backend.py | Python | agpl-3.0 | 1,901 | 0.001578 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Christophe Benz
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Af | fero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
| # along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.video import ICapVideo
from weboob.tools.backend import BaseBackend
from .browser import InaBrowser
from .video import InaVideo
__all__ = ['InaBackend']
class InaBackend(BaseBackend, ICapVideo):
NAME = 'ina'
MAINTAINER = u'Christophe Benz'
EMAIL = 'christophe.benz@gmail.com'
VERSION = '0.i'
DESCRIPTION = 'INA French TV video archives'
LICENSE = 'AGPLv3+'
BROWSER = InaBrowser
def get_video(self, _id):
return self.browser.get_video(_id)
def search_videos(self, pattern, sortby=ICapVideo.SEARCH_RELEVANCE, nsfw=False):
with self.browser:
return self.browser.search_videos(pattern)
def fill_video(self, video, fields):
if fields != ['thumbnail']:
# if we don't want only the thumbnail, we probably want also every fields
with self.browser:
video = self.browser.get_video(video.id, video)
if 'thumbnail' in fields and video.thumbnail:
with self.browser:
video.thumbnail.data = self.browser.readurl(video.thumbnail.url)
return video
OBJECTS = {InaVideo: fill_video}
|
OnroerendErfgoed/skosprovider_getty | setup.py | Python | mit | 1,278 | 0 | import os
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
CHANGES | = open(os.path.join(here, 'CHANGES.rst')).read()
| packages = [
'skosprovider_getty'
]
requires = [
'skosprovider>=1.1.0',
'requests',
'rdflib'
]
setup(
name='skosprovider_getty',
version='1.0.0',
description='Skosprovider implementation of the Getty Vocabularies',
long_description=README + '\n\n' + CHANGES,
long_description_content_type='text/x-rst',
packages=packages,
include_package_data=True,
install_requires=requires,
license='MIT',
zip_safe=False,
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
author='Flanders Heritage Agency',
author_email='ict@onroerenderfgoed.be',
url='https://github.com/OnroerendErfgoed/skosprovider_getty',
keywords='getty skos skosprovider vocabulary AAT TGN ULAN'
)
|
wzhfy/spark | python/pyspark/__init__.py | Python | apache-2.0 | 4,687 | 0.00192 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PySpark is the Python API for Spark.
Public classes:
- :class:`SparkContext`:
Main entry point for Spark functionality.
- :class:`RDD`:
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
- :class:`Broadcast`:
A broadcast variable that gets reused across tasks.
- :class:`Accumulator`:
An "add-only" shared variable that tasks can only add values to.
- :class:`SparkConf`:
For configuring Spark.
- : | class:`SparkFiles`:
Access files shipped with jobs.
- :class:`StorageLevel`:
Finer-grained cache persistence levels.
- :class:`TaskContext`:
Information about th | e current running task, available on the workers and experimental.
- :class:`RDDBarrier`:
Wraps an RDD under a barrier stage for barrier execution.
- :class:`BarrierTaskContext`:
A :class:`TaskContext` that provides extra info and tooling for barrier execution.
- :class:`BarrierTaskInfo`:
Information about a barrier task.
- :class:`InheritableThread`:
A inheritable thread to use in Spark when the pinned thread mode is on.
"""
from functools import wraps
import types
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.rdd import RDD, RDDBarrier
from pyspark.files import SparkFiles
from pyspark.util import InheritableThread
from pyspark.storagelevel import StorageLevel
from pyspark.accumulators import Accumulator, AccumulatorParam
from pyspark.broadcast import Broadcast
from pyspark.serializers import MarshalSerializer, PickleSerializer
from pyspark.status import *
from pyspark.taskcontext import TaskContext, BarrierTaskContext, BarrierTaskInfo
from pyspark.profiler import Profiler, BasicProfiler
from pyspark.version import __version__ # noqa: F401
from pyspark._globals import _NoValue # noqa: F401
def since(version):
"""
A decorator that annotates a function to append the version of Spark the function was added.
"""
import re
indent_p = re.compile(r'\n( +)')
def deco(f):
indents = indent_p.findall(f.__doc__)
indent = ' ' * (min(len(m) for m in indents) if indents else 0)
f.__doc__ = f.__doc__.rstrip() + "\n\n%s.. versionadded:: %s" % (indent, version)
return f
return deco
def copy_func(f, name=None, sinceversion=None, doc=None):
"""
Returns a function with same code, globals, defaults, closure, and
name (or provide a new name).
"""
# See
# http://stackoverflow.com/questions/6527633/how-can-i-make-a-deepcopy-of-a-function-in-python
fn = types.FunctionType(f.__code__, f.__globals__, name or f.__name__, f.__defaults__,
f.__closure__)
# in case f was given attrs (note this dict is a shallow copy):
fn.__dict__.update(f.__dict__)
if doc is not None:
fn.__doc__ = doc
if sinceversion is not None:
fn = since(sinceversion)(fn)
return fn
def keyword_only(func):
"""
A decorator that forces keyword arguments in the wrapped method
and saves actual input keyword arguments in `_input_kwargs`.
.. note:: Should only be used to wrap a method where first arg is `self`
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if len(args) > 0:
raise TypeError("Method %s forces keyword arguments." % func.__name__)
self._input_kwargs = kwargs
return func(self, **kwargs)
return wrapper
# for back compatibility
from pyspark.sql import SQLContext, HiveContext, Row # noqa: F401
__all__ = [
"SparkConf", "SparkContext", "SparkFiles", "RDD", "StorageLevel", "Broadcast",
"Accumulator", "AccumulatorParam", "MarshalSerializer", "PickleSerializer",
"StatusTracker", "SparkJobInfo", "SparkStageInfo", "Profiler", "BasicProfiler", "TaskContext",
"RDDBarrier", "BarrierTaskContext", "BarrierTaskInfo", "InheritableThread",
]
|
esarafianou/rupture | backend/breach/tests/test_views.py | Python | mit | 8,595 | 0.001862 | from django.test import Client, TestCase
from django.core.urlresolvers import reverse
from breach.models import Target, Victim, Round, SampleSet
from breach.views import TargetView, VictimListView
import json
from binascii import hexlify
class ViewsTestCase(TestCase):
def setUp(self):
self.client = Client()
self.target1 = Target.objects.create(
name='ruptureit',
endpoint='https://ruptureit.com/test.php?reflection=%s',
prefix='imper',
alphabet='abcdefghijklmnopqrstuvwxyz',
secretlength=9,
alignmentalphabet='ABCDEFGHIJKLMNOPQRSTUVWXYZ',
| recordscardinality=1,
method=1
)
self.target2 = Target.objects.create(
name='ruptureit2',
endpoint='https://ruptureit.com/test.php?reflection=%s',
prefix='imper',
alphabet='abcdefghijklmnopqrstuvwxyz',
| secretlength=9,
alignmentalphabet='ABCDEFGHIJKLMNOPQRSTUVWXYZ',
recordscardinality=1,
method=2
)
self.target1_data = {
'name': 'ruptureit',
'endpoint': 'https://ruptureit.com/test.php?reflection=%s',
'prefix': 'imper',
'alphabet': 'abcdefghijklmnopqrstuvwxyz',
'secretlength': 9,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'recordscardinality': 1,
'method': 1
}
self.target2_data = {
'name': 'ruptureit2',
'endpoint': 'https://ruptureit.com/test.php?reflection=%s',
'prefix': 'imper',
'alphabet': 'abcdefghijklmnopqrstuvwxyz',
'secretlength': 9,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'recordscardinality': 1,
'method': 2
}
def test_target_post(self):
"""
Test post requests for /target
"""
# Create the request
data = {
'name': 'ruptureit3',
'endpoint': 'https://ruptureit.com/test.php?reflection=%s',
'prefix': 'imper',
'alphabet': 'abcdefghijklmnopqrstuvwxyz',
'secretlength': 9,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'recordscardinality': 1,
'method': 1
}
response = self.client.post(reverse('TargetView'), json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['target_name'], 'ruptureit3')
def test_target_get(self):
response = self.client.get(reverse('TargetView'))
response_dict1 = {key: json.loads(response.content)['targets'][0][key] for key in self.target1_data}
response_dict2 = {key: json.loads(response.content)['targets'][1][key] for key in self.target2_data}
self.assertEqual(response.status_code, 200)
self.assertEqual(response_dict1, self.target1_data)
self.assertEqual(response_dict2, self.target2_data)
def test_victim_post(self):
"""
Test post requests for /victim
"""
# Create the request
data = {
'sourceip': '192.168.1.5',
}
response = self.client.post(reverse('VictimListView'), json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['victim_id'], 1)
def test_victim_get(self):
victim = Victim.objects.create(
sourceip='192.168.1.5',
target=self.target1
)
round_data = {
'victim': victim,
'index': 1,
'amount': self.target1.samplesize,
'knownalphabet': 'abcdefghijklmnopqrstuvxyz',
'knownsecret': 'imper'
}
new_round = Round(**round_data)
new_round.save()
response = self.client.get(reverse('VictimListView'))
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['victims'][0]['sourceip'], '192.168.1.5')
def test_attack_post_noID(self):
"""
Test post requests for /victim
"""
# Create the request
data = {
'sourceip': '192.168.1.6',
'target': self.target1.name
}
response = self.client.post(reverse('AttackView'), json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['victim_id'], 1)
def test_attack_post_ID(self):
"""
Test post requests for /victim
"""
victim = Victim.objects.create(
sourceip='192.168.1.5'
)
# Create the request
data = {
'id': victim.id,
'target': self.target1.name
}
response = self.client.post(reverse('AttackView'), json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['victim_id'], victim.id)
def test_victimID_get(self):
victim = Victim.objects.create(
sourceip='192.168.1.5',
target=self.target1
)
victim2 = Victim.objects.create(
sourceip='192.168.1.6',
target=self.target2
)
round_data = {
'victim': victim,
'index': 1,
'amount': victim.target.samplesize,
'knownalphabet': 'abcdefghijklmnopqrstuvxyz',
'knownsecret': 'imper'
}
new_round = Round(**round_data)
new_round.save()
sampleset1_data = {
'round': new_round,
'candidatealphabet': 'a',
'data': hexlify('length'),
'success': True,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVXYZ'
}
sampleset = SampleSet(**sampleset1_data)
sampleset.save()
sampleset2_data = {
'round': new_round,
'candidatealphabet': 'b',
'data': hexlify('length2'),
'success': True,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVXYZ'
}
sampleset2 = SampleSet(**sampleset2_data)
sampleset2.save()
response = self.client.get(reverse('VictimDetailView', kwargs={'victim_id': victim.id}))
self.assertEqual(json.loads(response.content)['victim_ip'], '192.168.1.5')
self.assertEqual(json.loads(response.content)['target_name'], 'ruptureit')
self.assertEqual(json.loads(response.content)['attack_details'][0]['batch'], 0)
def test_victimID_patch_state(self):
victim = Victim.objects.create(
sourceip='192.168.1.5',
target=self.target1,
)
data1 = {'state': 'paused'}
data2 = {'state': 'running'}
response = self.client.patch(reverse('VictimDetailView', kwargs={'victim_id': victim.id}), json.dumps(data1), content_type='application/json', )
self.assertEqual(response.status_code, 200)
paused_victim = Victim.objects.get(pk=victim.id)
self.assertEqual(paused_victim.state, 'paused')
response = self.client.patch(reverse('VictimDetailView', kwargs={'victim_id': victim.id}), json.dumps(data2), content_type='application/json', )
restarted_victim = Victim.objects.get(pk=victim.id)
self.assertEqual(restarted_victim.state, 'running')
def test_victimID_patch_delete(self):
victim = Victim.objects.create(
sourceip='192.168.1.5',
target=self.target1,
)
data1 = {'deleted': True}
data2 = {'deleted': False}
response = self.client.patch(reverse('VictimDetailView', kwargs={'victim_id': victim.id}), json.dumps(data1), content_type='application/json', )
self.assertEqual(response.status_code, 200)
deleted_victim = Victim.objects.get(pk=victim.id)
self.assertNotEqual(deleted_victim.trashed_at, None)
response = self.client.patch(reverse('VictimDetailView', kwar |
Distrotech/yum | yum/__init__.py | Python | gpl-2.0 | 305,350 | 0.004375 | #!/usr/bin/python -tt
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Fou | ndation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Copyright 2005 Duke University
"""
The Yum RPM software updater.
"""
import os
import os.path
import rpm
import sys
def _rpm_ver_atleast(vertup):
""" Check if rpm is at least the current vertup. Can return False/True/Non | e
as rpm hasn't had version info for a long time. """
if not hasattr(rpm, '__version_info__'):
return None
try:
# 4.8.x rpm used strings for the tuple members, so convert.
vi = tuple([ int(num) for num in rpm.__version_info__])
return vi >= vertup
except:
return None # Something went wrong...
import re
import types
import errno
import time
import glob
import fnmatch
import logging
import logging.config
import operator
import tempfile
import shutil
import yum.i18n
# This is required to make gaftonmode work...
_wrap_yum_i18n__ = yum.i18n._
def _(*args, **kwargs):
return _wrap_yum_i18n__(*args, **kwargs)
_wrap_yum_i18n_P_ = yum.i18n.P_
def P_(*args, **kwargs):
return _wrap_yum_i18n_P_(*args, **kwargs)
import config
from config import ParsingError, ConfigParser
import Errors
import rpmsack
import rpmUtils.updates
from rpmUtils.arch import archDifference, canCoinstall, ArchStorage, isMultiLibArch
from rpmUtils.miscutils import compareEVR
import rpmUtils.transaction
import comps
import pkgtag_db
from repos import RepoStorage
import misc
from parser import ConfigPreProcessor, varReplace
import transactioninfo
import urlgrabber
from urlgrabber.grabber import URLGrabber, URLGrabError
from urlgrabber.progress import format_number
from packageSack import packagesNewestByName, packagesNewestByNameArch, ListPackageSack
import depsolve
import plugins
import logginglevels
import yumRepo
import callbacks
import yum.history
import yum.fssnapshots
import yum.igroups
import update_md
import warnings
warnings.simplefilter("ignore", Errors.YumFutureDeprecationWarning)
from packages import parsePackages, comparePoEVR
from packages import YumAvailablePackage, YumLocalPackage, YumInstalledPackage
from packages import YumUrlPackage, YumNotFoundPackage
from constants import *
from yum.rpmtrans import RPMTransaction,SimpleCliCallBack
from yum.i18n import to_unicode, to_str, exception2msg
from yum.drpm import DeltaInfo, DeltaPackage
import string
import StringIO
from weakref import proxy as weakref
from urlgrabber.grabber import default_grabber
try:
import cashe
except ImportError:
cashe = None
__version__ = '3.4.3'
__version_info__ = tuple([ int(num) for num in __version__.split('.')])
# Setup a default_grabber UA here that says we are yum, done using the global
# so that other API users can easily add to it if they want.
# Don't do it at init time, or we'll get multiple additions if you create
# multiple YumBase() objects.
default_grabber.opts.user_agent += " yum/" + __version__
class _YumPreBaseConf:
"""This is the configuration interface for the :class:`YumBase`
configuration. To change configuration settings such as whether
plugins are on or off, or the value of debuglevel, change the
values here. Later, when :func:`YumBase.conf` is first called, all
of the options will be automatically configured.
"""
def __init__(self):
self.fn = '/etc/yum/yum.conf'
self.root = '/'
self.init_plugins = True
self.plugin_types = (plugins.TYPE_CORE,)
self.optparser = None
self.debuglevel = None
self.errorlevel = None
self.disabled_plugins = None
self.enabled_plugins = None
self.syslog_ident = None
self.syslog_facility = None
self.syslog_device = None
self.arch = None
self.releasever = None
self.uuid = None
class _YumPreRepoConf:
"""This is the configuration interface for the repos configuration
configuration. To change configuration settings such what
callbacks are used, change the values here. Later, when
:func:`YumBase.repos` is first called, all of the options will be
automatically configured.
"""
def __init__(self):
self.progressbar = None
self.multi_progressbar = None
self.callback = None
self.failure_callback = None
self.interrupt_callback = None
self.confirm_func = None
self.gpg_import_func = None
self.gpgca_import_func = None
self.cachedir = None
self.cache = None
class _YumCostExclude:
""" This excludes packages that are in repos. of lower cost than the passed
repo. """
def __init__(self, repo, repos):
self.repo = weakref(repo)
self._repos = weakref(repos)
def __contains__(self, pkgtup):
# (n, a, e, v, r) = pkgtup
for repo in self._repos.listEnabled():
if repo.cost >= self.repo.cost:
break
# searchNevra is a bit slower, although more generic for repos.
# that don't use sqlitesack as the backend ... although they are
# probably screwed anyway.
#
# if repo.sack.searchNevra(n, e, v, r, a):
if pkgtup in repo.sack._pkgtup2pkgs:
return True
return False
class YumBase(depsolve.Depsolve):
"""This is a primary structure and base class. It houses the
objects and methods needed to perform most things in yum. It is
almost an abstract class in that you will need to add your own
class above it for most real use.
"""
def __init__(self):
depsolve.Depsolve.__init__(self)
self._conf = None
self._tsInfo = None
self._rpmdb = None
self._up = None
self._comps = None
self._history = None
self._igroups = None
self._pkgSack = None
self._lockfile = None
self._tags = None
self._upinfo = None
self._fssnap = None
self._ts_save_file = None
self.skipped_packages = [] # packages skip by the skip-broken code
self._not_found_a = {}
self._not_found_i = {}
self.logger = logging.getLogger("yum.YumBase")
self.verbose_logger = logging.getLogger("yum.verbose.YumBase")
self._override_sigchecks = False
self._repos = RepoStorage(self)
self.repo_setopts = {} # since we have to use repo_setopts in base and
# not in cli - set it up as empty so no one
# trips over it later
# Start with plugins disabled
self.disablePlugins()
self.localPackages = [] # for local package handling
self.mediagrabber = None
self.arch = ArchStorage()
self.preconf = _YumPreBaseConf()
self.prerepoconf = _YumPreRepoConf()
self.run_with_package_names = set()
self._cleanup = []
self.exit_code = 0
self.updateinfo_filters = {}
def __del__(self):
self.close()
self.closeRpmDB()
self.doUnlock()
# call cleanup callbacks
for cb in self._cleanup: cb()
def close(self):
"""Close the history and repo objects."""
# We don't want to create the object, so we test if it's been created
if self._history is not None:
self.history.close()
if self._igroups is not None:
self.igroups.close()
if self._repos:
self._repos.close()
def _transactionDataFactory(self):
"""Factory method returning Transact |
savi-dev/heat | heat/tests/test_neutron.py | Python | apache-2.0 | 46,371 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is dis | tributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
from testtools import skipIf
from heat.engine import clients
from heat.common import exception
from heat.common import template_format
from heat.engine import properties
from heat.engine import resource
from heat.engine import scheduler
from heat.engine.r | esources.neutron import net
from heat.engine.resources.neutron import subnet
from heat.engine.resources.neutron import router
from heat.engine.resources.neutron.neutron import NeutronResource as qr
from heat.openstack.common.importutils import try_import
from heat.tests.common import HeatTestCase
from heat.tests import fakes
from heat.tests import utils
neutronclient = try_import('neutronclient.v2_0.client')
qe = try_import('neutronclient.common.exceptions')
neutron_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to test Neutron resources",
"Parameters" : {},
"Resources" : {
"network": {
"Type": "OS::Neutron::Net",
"Properties": {
"name": "the_network",
"tenant_id": "c1210485b2424d48804aad5d39c61b8f",
"shared": true
}
},
"unnamed_network": {
"Type": "OS::Neutron::Net"
},
"admin_down_network": {
"Type": "OS::Neutron::Net",
"Properties": {
"admin_state_up": false
}
},
"subnet": {
"Type": "OS::Neutron::Subnet",
"Properties": {
"network_id": { "Ref" : "network" },
"tenant_id": "c1210485b2424d48804aad5d39c61b8f",
"ip_version": 4,
"cidr": "10.0.3.0/24",
"allocation_pools": [{"start": "10.0.3.20", "end": "10.0.3.150"}],
"dns_nameservers": ["8.8.8.8"]
}
},
"port": {
"Type": "OS::Neutron::Port",
"Properties": {
"device_id": "d6b4d3a5-c700-476f-b609-1493dd9dadc0",
"name": "port1",
"network_id": { "Ref" : "network" },
"fixed_ips": [{
"subnet_id": { "Ref" : "subnet" },
"ip_address": "10.0.3.21"
}]
}
},
"port2": {
"Type": "OS::Neutron::Port",
"Properties": {
"name": "port2",
"network_id": { "Ref" : "network" }
}
},
"router": {
"Type": "OS::Neutron::Router"
},
"router_interface": {
"Type": "OS::Neutron::RouterInterface",
"Properties": {
"router_id": { "Ref" : "router" },
"subnet_id": { "Ref" : "subnet" }
}
},
"gateway": {
"Type": "OS::Neutron::RouterGateway",
"Properties": {
"router_id": { "Ref" : "router" },
"network_id": { "Ref" : "network" }
}
}
}
}
'''
neutron_floating_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to test Neutron resources",
"Parameters" : {},
"Resources" : {
"port_floating": {
"Type": "OS::Neutron::Port",
"Properties": {
"network_id": "xyz1234",
"fixed_ips": [{
"subnet_id": "12.12.12.0",
"ip_address": "10.0.0.10"
}]
}
},
"floating_ip": {
"Type": "OS::Neutron::FloatingIP",
"Properties": {
"floating_network_id": "abcd1234",
}
},
"floating_ip_assoc": {
"Type": "OS::Neutron::FloatingIPAssociation",
"Properties": {
"floatingip_id": { "Ref" : "floating_ip" },
"port_id": { "Ref" : "port_floating" }
}
},
"router": {
"Type": "OS::Neutron::Router"
},
"gateway": {
"Type": "OS::Neutron::RouterGateway",
"Properties": {
"router_id": { "Ref" : "router" },
"network_id": "abcd1234"
}
}
}
}
'''
neutron_port_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to test Neutron resources",
"Parameters" : {},
"Resources" : {
"port": {
"Type": "OS::Neutron::Port",
"Properties": {
"network_id": "net1234",
"fixed_ips": [{
"subnet_id": "sub1234",
"ip_address": "10.0.3.21"
}]
}
}
}
}
'''
class NeutronTest(HeatTestCase):
def test_validate_properties(self):
vs = {'router:external': True}
data = {'admin_state_up': False,
'value_specs': vs}
p = properties.Properties(net.Net.properties_schema, data)
self.assertEqual(None, qr.validate_properties(p))
vs['shared'] = True
self.assertEqual('shared not allowed in value_specs',
qr.validate_properties(p))
vs.pop('shared')
vs['name'] = 'foo'
self.assertEqual('name not allowed in value_specs',
qr.validate_properties(p))
vs.pop('name')
vs['tenant_id'] = '1234'
self.assertEqual('tenant_id not allowed in value_specs',
qr.validate_properties(p))
vs.pop('tenant_id')
vs['foo'] = '1234'
self.assertEqual(None, qr.validate_properties(p))
def test_prepare_properties(self):
data = {'admin_state_up': False,
'value_specs': {'router:external': True}}
p = properties.Properties(net.Net.properties_schema, data)
props = qr.prepare_properties(p, 'resource_name')
self.assertEqual({'name': 'resource_name',
'router:external': True,
'admin_state_up': False}, props)
def test_is_built(self):
self.assertTrue(qr.is_built({
'name': 'the_net',
'status': 'ACTIVE'
}))
self.assertTrue(qr.is_built({
'name': 'the_net',
'status': 'DOWN'
}))
self.assertFalse(qr.is_built({
'name': 'the_net',
'status': 'BUILD'
}))
self.assertRaises(exception.Error, qr.is_built, {
'name': 'the_net',
'status': 'FROBULATING'
})
@skipIf(neutronclient is None, 'neutronclient unavailable')
class NeutronNetTest(HeatTestCase):
def setUp(self):
super(NeutronNetTest, self).setUp()
self.m.StubOutWithMock(neutronclient.Client, 'create_network')
self.m.StubOutWithMock(neutronclient.Client, 'delete_network')
self.m.StubOutWithMock(neutronclient.Client, 'show_network')
self.m.StubOutWithMock(clients.OpenStackClients, 'keystone')
utils.setup_dummy_db()
def create_net(self, t, stack, resource_name):
rsrc = net.Net('test_net', t['Resources'][resource_name], stack)
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
return rsrc
def test_net(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
neutronclient.Client.create_network({
'network': {
'name': u'the_network',
'admin_state_up': True,
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f',
'shared': True}
}).AndReturn({"network": {
"status": "BUILD",
"subnets": [],
"name": "name",
"admin_state_up": False,
"shared": True,
"tenant_id": "c1210485b2424d48804aad5d39c61b8f",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_network(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({"network": {
"status": "BUILD",
"subnets": [],
"name": "name",
"admin_state_up": False,
"shared": True,
"tenant_id": |
pythondigest/pythondigest | digest/tests/test_import_importpython.py | Python | mit | 2,657 | 0.001505 | from django.test import TestCase
from mock import patch
from digest.management.commands.import_importpython import ImportPythonParser
from digest.utils import MockResponse, read_fixture
class ImportPythonWeeklyTest(TestCase):
def setUp(self):
self.url = "http://importpython.com/newsletter/no/60/"
test_fixture = 'fixture_test_import_importpython_test_get_blocks.txt'
self.patcher = patch(
'digest.management.commands.import_importpython.urlopen')
self.urlopen_mock = self.patcher.start()
self.urlopen_mock.return_value = MockResponse(
read_fixture(test_fixture))
self.parser = ImportPythonParser()
def tearDown(self):
self.patcher.stop()
def test_correctly_creates_issue_urls(self):
self.assertEqual(ImportPythonParser.get_issue_url(2),
"http://importpython.com/static/files/issue2.html")
self.assertEqual(ImportPythonParser.get_issue_url(12),
"http://importpython.com/newsletter/draft/12")
self.assertEqual(ImportPythonParser.get_issue_url(56),
"http://importpython.com/newsletter/no/56")
with self.assertRaises(ValueError):
ImportPythonParser.get_issue_url(-100)
def test_correct_number_of_blocks_parsed(self):
blocks = self.parser.get_blocks(self.url)
self.assertEqual(len(blocks), 25)
def test_correctly_parses_block(self):
blocks = self.parser.get_blocks(self.url)
block = blocks[0]
self.assertEqual(block['link'],
"https://talkpython.fm/episodes/show/44/project-jupyter-and-ipython")
| self.assertEqual(block['title'],
"Project Jupyter and IPython Podcast Interview")
self.assertEqual(block['content'],
| "One of the fastest growing areas in Python is scientific computing. In scientific computing with Python, there are a few key packages that make it special. These include NumPy / SciPy / and related packages. The one that brings it all together, visually, is IPython (now known as Project Jupyter). That's the topic on episode 44 of Talk Python To Me. ")
def test_correctly_gets_latest_url(self):
test_latest = 'fixture_test_import_importpython_test_get_latest_url.txt'
self._old_return_value = self.urlopen_mock.return_value
self.urlopen_mock.return_value = MockResponse(read_fixture(test_latest))
latest_url = self.parser.get_latest_issue_url()
self.assertEqual(latest_url,
"http://importpython.com/newsletter/no/72/")
|
claesenm/HPOlib | HPOlib/format_converter/convert.py | Python | gpl-3.0 | 4,811 | 0.003326 | #!/usr/bin/env python
##
# wrapping: A program making it easy to use hyperparameter
# optimization software.
# Copyright (C) 2013 Katharina Eggensperger and Matthias Feurer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from argparse import ArgumentPars | er
import os
from string import upper
import sys
import tempfile
import smac_to_spearmint
import tpe_to_smac
__authors__ = ["Katharina Eggensperger", "Matthias Feurer"]
__contact__ = "automl.org"
def smac_to_spearmint_helper(space, save=""):
# print "Convert %s from SMAC to SPEARMINT" % space
return smac_to_spearmint.convert_smac_to_spearmint(space)
de | f smac_to_tpe_helper(space, save=""):
print "This is not yet implemented"
def spearmint_to_smac_helper(space, save=""):
print "This is not yet implemented"
def spearmint_to_tpe_helper(space, save=""):
print "This is not yet implemented"
def tpe_to_spearmint_helper(space, save=""):
try:
import hyperopt
except ImportError:
print "Cannot find hyperopt. To use this converter, modify $PYTHONPATH to contain a hyperopt installation"
# First convert to smac
tmp = tpe_to_smac.convert_tpe_to_smac_from_file(space)
handle, tmp_file_name = tempfile.mkstemp()
fh = open(tmp_file_name, 'w')
fh.write(tmp)
fh.close()
# From smac convert to spearmint
new_space = smac_to_spearmint.convert_smac_to_spearmint(tmp_file_name)
os.remove(tmp_file_name)
return new_space
def tpe_to_smac_helper(space, save=""):
try:
import hyperopt
except ImportError:
print "Cannot find hyperopt. To use this converter, modify $PYTHONPATH to contain a hyperopt installation"
return tpe_to_smac.convert_tpe_to_smac_from_file(space)
def main():
# python convert.py --from SMAC --to TPE -f space.any -s space.else
prog = "python convert.py"
description = "Automatically convert a searchspace from one format to another"
parser = ArgumentParser(description=description, prog=prog)
parser.add_argument("--from", dest="conv_from", choices=['SMAC', 'Smac', 'smac',
'TPE', 'Tpe', 'tpe', 'hyperopt',
'SPEARMINT', 'Spearmint', 'spearmint'],
default="", help="Convert from which format?", required=True)
parser.add_argument("--to", dest="conv_to", choices=['SMAC', 'Smac', 'smac',
'TPE', 'Tpe', 'tpe', 'hyperopt',
'SPEARMINT', 'Spearmint', 'spearmint'],
default="", help="Convert to which format?", required=True)
parser.add_argument("-f", "--file", dest="space",
default="", help="Where is the searchspace to be converted?", required=True)
parser.add_argument("-s", "--save", dest="save",
default="", help="Where to save the new searchspace?")
args, unknown = parser.parse_known_args()
space = os.path.abspath(args.space)
if not os.path.isfile(space):
print "%s is not a valid path" % space
sys.exit(1)
# Unifying strings
args.conv_to = upper(args.conv_to)
args.conv_from = upper(args.conv_from)
if args.conv_from == "HYPEROPT":
args.conv_from = "TPE"
if args.conv_to == "HYPEROPT":
args.conv_to == "TPE"
if args.conv_to == args.conv_from:
print "Converting from %s to %s makes no sense" % (args.conv_to, args.conv_from)
# This is like a switch statement
options = {'SMAC': {'SPEARMINT': smac_to_spearmint_helper,
'TPE': smac_to_tpe_helper},
'SPEARMINT': {'SMAC': spearmint_to_smac_helper,
'TPE': spearmint_to_tpe_helper},
'TPE': {'SPEARMINT': tpe_to_spearmint_helper,
'SMAC': tpe_to_smac_helper}
}
new_space = options[args.conv_from][args.conv_to](space, args.save)
if args.save != "":
fh = open(args.save, 'w')
fh.write(new_space)
fh.close()
else:
print new_space
if __name__ == "__main__":
main() |
florianfesti/boxes | boxes/generators/dispenser.py | Python | gpl-3.0 | 4,538 | 0.001544 | #!/usr/bin/env python3
# Copyright (C) 2013-2016 Florian Festi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from boxes import *
class FrontEdge(edges.BaseEdge):
"""An edge with room to get your fingers around cards"""
def __call__(self, length, **kw):
depth = self.settings.y * 2 / 3
t = self.settings.thickness
r = min(depth-t, length/4)
self.edge(length/4-t, tabs=2)
self.corner(90, t)
self.edge(depth-t-r, tabs=2)
self.corner(-90, r)
self.edge(length/2 - 2*r)
self.corner(-90, r)
self.edge(depth-t-r, tabs=2)
self.corner(90, t)
self.edge(length/4-t, tabs=2)
class Dispenser(Boxes):
"""Dispenser for stackable (flat) items of same size"""
description = """Set *bottomheight* to 0 for a wall mounting variant.
Please add mounting holes yourself."""
ui_group = "Misc"
def __init__(self):
Boxes.__init__(self)
self.addSettingsArgs(edges.FingerJointSettings)
self.addSettingsArgs(edges.StackableSettings)
self.buildArgParser(x=100, y=100, h=100)
self.argparser.add_argument(
"--slotheight", action="store", type=float, default=10.0,
help="height of the dispenser slot / items (in mm)")
self.argparser.add_argument(
"--bottomheight", action="store", type=float, default=0.0,
help="height underneath the dispenser (in mm)")
self.argparser.add_argument(
"--sideedges", ac | tion="store", type=ArgparseEdgeType("Fh"),
choices=list("Fh"), default="F",
help="edges used for holding the front panels and back")
def render(self):
x, y, h, hs = self.x, self.y, self.h, self.slotheight
hb = self.bottomheight
t = self.thickness
se = self.sideedges
fe = FrontEdge(self, self)
hb = max(0, hb-self.edges["š"].spacing())
th = h + (hb+t if hb else 0.0)
hh | = hb + 0.5*t
with self.saved_context():
self.rectangularWall(x, y, [fe, "f", "f", "f"],
label="Floor", move="right")
self.rectangularWall(x, y, "eeee", label="Lid bottom", move="right")
self.rectangularWall(x, y, "EEEE", label="Lid top", move="right")
self.rectangularWall(x, y, "ffff", move="up only")
if hb:
frontedge = edges.CompoundEdge(self, "Ef", (hb+t+hs, h-hs))
self.rectangularWall(
y, th, ("š", frontedge, "e", "f"), ignore_widths=[6],
callback=[lambda:self.fingerHolesAt(0, hh, y, 0)],
label="Left wall", move="right mirror")
self.rectangularWall(
x, th, ["š", se, "e", se], ignore_widths=[1, 6],
callback=[lambda:self.fingerHolesAt(0, hh, x, 0)],
label="Back wall", move="right")
self.rectangularWall(
y, th, ("š", frontedge, "e", "f"), ignore_widths=[6],
callback=[lambda:self.fingerHolesAt(0, hh, y, 0)],
label="Right wall", move="right")
else:
frontedge = edges.CompoundEdge(self, "Ef", (hs, h-hs))
self.rectangularWall(
y, th, ("h", frontedge, "e", "f"),
label="Left wall", ignore_widths=[6], move="right mirror")
self.rectangularWall(
x, th, ["h", se, "e", se], ignore_widths=[1, 6],
label="Back wall", move="right")
self.rectangularWall(
y, th, ("h", frontedge, "e", "f"),
label="Right wall", ignore_widths=[6], move="right")
self.rectangularWall(x/3, h-hs, "eee" + se,
label="Left front", move="right")
self.rectangularWall(x/3, h-hs, "eee" + se,
label="Right front", move="mirror right")
|
paulhtremblay/boto_emr | examples/process_crawl_text1.py | Python | mit | 1,482 | 0.013495 | from pyspark.sql import Row
#import boto_emr.parse_marc as parse_marc
from pyspark import SparkContext
import datetime
def process_by_fields(l):
host = None
date = None
text = None
ip_address = None
warc_type = None
for line in l[1]:
fields = line.split(':', 1)
if fields and len(fields) == 2:
if fields[0] == 'hostname':
host = fields[1].strip()
elif fields[0] == 'WARC-Date':
date = fields[1].strip()
elif fields[0] == 'WARC-IP-Address':
| ip_address = fields[1].strip()
elif fields[0] == 'WARC-Type':
warc_type = fields[1].strip()
else:
text = line
return Row(host =host, date = date, text = text,
ip_address = ip_address, warc_type = warc_type)
def process_file(my_iter):
the_id = "init"
final = []
for chunk in my_iter:
lin | es = chunk[1].split("\n")
for line in lines:
if line[0:15] == 'WARC-Record-ID:':
the_id = line[15:]
final.append(Row(the_id = the_id, line = line))
return iter(final)
def get_hdfs_path():
return "/mnt/temp"
sc = SparkContext(appName = "test crawl {0}".format(datetime.datetime.now()))
rdd = sc.wholeTextFiles(get_hdfs_path())\
.mapPartitions(process_file)\
.map(lambda x: (x.the_id, x.line))\
.groupByKey()\
.map(process_by_fields)
print(rdd.take(10))
|
xlqian/navitia | source/jormungandr/jormungandr/interfaces/v1/decorators.py | Python | agpl-3.0 | 1,461 | 0.000684 | # This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License a | s published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If | not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from jormungandr.interfaces.v1.serializer import serialize_with
def get_serializer(serpy):
return serialize_with(serpy)
def get_obj_serializer(obj):
return get_serializer(serpy=obj.output_type_serializer)
|
wooga/play-deliver | playdeliver/listing.py | Python | mit | 1,526 | 0 | """This module helps for uploading and downloading listings from/to play."""
import os
import json
from file_util import mkdir_p
from file_util import list_dir_abspath
def upload(client, source_dir):
"""Upload listing files in source_dir. folder herachy."""
print('')
print('upload store listings')
print('---------------------')
listings_folder = os.path.join(source_dir, 'listings')
langfolders = filter(os.path.isdir, list_dir_abspath(listings_folder))
for language_dir in langfolders:
language = os.path.basename(language_dir)
with open(os.path.join(language_dir, 'listing.json')) as listings_file:
listing = json.load(listings_file)
listing_response = client.update(
'listings', language=language, body=listing)
print(' Listing for language %s was updated.' %
listing_response['language'])
def download(client, target_dir):
"""Download listing files from play and saves them into folder herachy."""
print( | '')
print('download store listings')
print('---------------------')
listings = client.list('listings')
for listing in listings:
path = os.path.join(target_dir, 'listings', listing['language'])
mkdir_p(path)
with open(os.path.join(path, 'listing.json'), 'w') as outfile | :
print("save listing for {0}".format(listing['language']))
json.dump(
listing, outfile, sort_keys=True,
indent=4, separators=(',', ': '))
|
richardcornish/django-itunespodcast | podcast/migrations/0007_auto_20170920_2212.py | Python | bsd-3-clause | 429 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09 | -20 22:12
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('podcast', '0006_auto_20170920_1833'),
]
operations = [
migrati | ons.RenameField(
model_name='episode',
old_name='summary',
new_name='notes',
),
]
|
hes19073/hesweewx | bin/user/hausWD.py | Python | gpl-3.0 | 15,327 | 0.004176 | # -*- coding: utf-8 -*-
##This program is free software; you can redistribute it and/or modify it under
##the terms of the GNU General Public License as published by the Free Software
##Foundation; either version 2 of the License, or (at your option) any later
##version.
##
##This program is distributed in the hope that it will be useful, but WITHOUT
##ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
##FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
##details.
##
## Version: 0.3 Date: 20 Februar 2020
##
## Revision History
## 20 Februar 2020 v0.3 - Zaehler und Delta Preise
## 02 October 2019 v0.2 - Preise Gas Wasser Strom
## 26 September 2019 v0.1 - Initial implementation
##
## *Total = Zaehlerdifferenz Verbrauch
## *Deltawerte = Zaehlimpule s0
## Werte *Total Differenz aus Handeintrag und alter Zaehlerstand
## Werte *Delta Differenz aus Impuls Neu und Impuls alt
#
# Service to calculate Preise for Wasser-, Strom- und Gasverbrauch
#
# in weewx.config
# Add this service to weewx.conf, then restart weewx
#[Engine]
# [[Services]]
# process_services = ..., user,hausWD,HausWdCalculate,
# archive_service = ...., user,hausWD,HausWdArchive
#[HausWD]
# # Werte in Euro
# Brennwert = 9.8
# Zustandszahl = 0.95
# Gaspreis = 0.0619
# Strompreis = 0.272
# Grundpreisstrom = 123.32
# Wasserpreis = 1.47
# Abwasserpreis = 2.65
# Grundpreiswasser = 12.98
#
#
from __future__ import absolute_import
from __future__ import print_function
import logging
import datetime
import time
import weewx
import weedb
import weeutil.config
import weeutil.logger
import weewx.engine
import weewx.manager
import weeutil.weeutil
from weewx.units import convert, obs_group_dict
from weeutil.config import search_up, accumulateLeaves
from weeutil.weeutil import to_float
log = logging.getLogger(__name__)
HAUSWD_VERSION = '0.3'
#===============================================================================
# Class HausWdCalculate
#===============================================================================
class HausWdCalculate(weewx.engine.StdService):
def __init__(self, engine, config_dict):
super(HausWdCalculate, self).__init__(engine, config_dict)
d = config_dict.get('HausWD', {})
self.BrennWert = weeutil.weeutil.to_float(d.get('Brennwert', 9.8))
self.ZustandsZahl = weeutil.weeutil.to_float(d.get('Zustandszahl', 0.95))
self.GasPreis = weeutil.weeutil.to_float(d.get('Gaspreis', 0.0619))
self.StromPreis = weeutil.weeutil.to_float(d.get('Strompreis', 0.272))
self.WasserPreis = weeutil.weeutil.to_float(d.get('Wasserpreis', 1.47))
self.AbwasserPreis = weeutil.weeutil.to_float(d.get('Abwasserpreis', 2.65))
# bind ourself to both loop and archive events
self.bind(weewx.NEW_LOOP_PACKET, self.new_loop_packet)
self.bind(weewx.NEW_ARCHIVE_RECORD, self.new_archive_record)
def new_loop_packet(self, event):
data_x = {}
if 'gasTotal' in event.packet:
data_x['gasZ_m3'] = event.packet['gasTotal']
data_x['gasZ_kWh'] = event.packet['gasTotal'] * self.BrennWert * self.ZustandsZahl
data_x['gasZ_preis'] = event.packet['gasTotal'] * self.BrennWert * self.ZustandsZahl * self.GasPreis
else:
data_x['gasZ_m3'] = 0.0
data_x['gasZ_kWh'] = 0.0
data_x['gasZ_preis'] = 0.0
if 'eleTotal' in event.packet:
data_x['eleZ_kWh'] = event.packet['eleTotal']
data_x['eleZ_preis'] = event.packet['eleTotal'] * self.StromPreis
else:
data_x['eleZ_kWh'] = 0.0
data_x['eleZ_preis'] = 0.0
if 'eleATotal' in event.packet:
data_x['eleAZ_kWh'] = event.packet['eleATotal']
data_x['eleAZ_preis'] = event.packet['eleATotal'] * self.StromPreis
else:
data_x['eleAZ_m3'] = 0.0
data_x['eleAZ_preis'] = 0.0
if 'elePVTotal' in event.packet:
data_x['elePVZ_kWh'] = event.packet['elePVTotal']
data_x['elePVZ_preis'] = event.packet['elePVTotal'] * self.StromPreis
else:
data_x['elePVZ_kWh'] = 0.0
data_x['elePVZ_preis'] = 0.0
if 'wasTotal' in event.packet and 'wasATotal' in event.packet:
was_new = event.packet['wasTotal']
waa_new = event.packet['wasATotal']
data_x['wasZ_m3'] = was_new
data_x['wasZ_preis'] = was_new * self.WasserPreis
data_x['wasAZ_m3'] = waa_new
data_x['wasAZ_preis'] = waa_new * self.WasserPreis
data_x['wasG_preis'] = (was_new * self.WasserPreis) + ((was_new - waa_new) * self.AbwasserPreis)
else:
data_x['wasZ_m3'] = 0.0
data_x['wasAZ_m3'] = 0.0
data_x['wasZ_preis'] = 0.0
data_x['wasAZ_preis'] = 0.0
# Wertung der Impuls Zaehler Werte
""" read data impulse for calculate preis """
if 'gasDelta' in event.packet:
gas_new = event.packet['gasDelta'] * 0.01
data_x['gas_m3'] = gas_new
data_x['gas_kWh'] = gas_new * self.BrennWert * self.ZustandsZahl
data_x['gas_preis'] = gas_new * self.BrennWert * self.ZustandsZahl * self.GasPreis
else:
data_x['gas_m3'] = 0.0
data_x['gas_kWh'] = 0.0
data_x['gas_preis'] = 0.0
if 'eleDelta' in event.packet:
ele_new = event.packet['eleDelta'] * 0.001
data_x['ele_kWh'] = ele_new
data_x['ele_preis'] = ele_new * self.StromPreis
else:
data_x['ele_kWh'] = 0.0
data_x['ele_preis'] = 0.0
if 'eleADelta' in event.packet:
ela_new = event.packet['eleADelta'] * 0.001
data_x['eleA_kWh'] = ela_new
data_x['eleA_preis'] = ela_new * self.StromPreis
else:
data_x['eleA_kWh'] = 0.0
data_x['eleA_preis'] = 0.0
if 'elePVDelta' in event.packet:
elp_new = event.packet['elePVDelta'] * 0.001
data_x['elePV_kWh'] = elp_new
data_x['elePV_preis'] = elp_new * self.StromPreis
else:
data_x['elePV_kWh'] = 0.0
data_x['elePV_preis'] = 0.0
if 'wasDelta' in event.packet and 'wasADelta' in event.packet:
#was_new = event.packet['wasDelta'] * 0.001
#waa_new = event.packet['wasADelta'] * 0.001
was_new = event.packet['wasDelta']
waa_new = event.packet['wasADelta']
data_x['was_m3'] = was_new
data_x['wasA_m3'] = waa_new
data_x['was_preis'] = was_new * self.WasserPreis
data_x['wasA_preis'] = waa_new * self.WasserPreis
data_x['wasG_preis'] = (was_new * self.WasserPreis) + ((was_new - waa_new) * self.AbwasserPreis)
else:
data_x['was_m3'] = 0.0
data_x['wasA_m3'] = 0.0
data_x['was_preis'] = 0.0
data_x['wasA_preis'] = 0.0
data_x['wasG_preis'] = 0.0
event.p | acket.update(data_x)
def new_archive_ | record(self, event):
data_x = {}
if 'gasTotal' in event.record:
data_x['gasZ_m3'] = event.record['gasTotal']
data_x['gasZ_kWh'] = event.record['gasTotal'] * self.BrennWert * self.ZustandsZahl
data_x['gasZ_preis'] = event.record['gasTotal'] * self.BrennWert * self.ZustandsZahl * self.GasPreis
else:
data_x['gasZ_m3'] = 0.0
data_x['gasZ_kWh'] = 0.0
data_x['gasZ_preis'] = 0.0
if 'eleTotal' in event.record:
data_x['eleZ_kWh'] = event.record['eleTotal']
data_x['eleZ_preis'] = event.record['eleTotal'] * self.StromPreis
else:
data_x['eleZ_kWh'] = 0.0
data_x['eleZ_preis'] = 0.0
if 'eleATotal' in event.record:
data_x['eleAZ_kWh'] = event.record['eleATotal']
data_x['eleAZ_preis'] = event. |
zhaobin19918183/zhaobinCode | zhaobin/mysite/photologue/management/commands/plcache.py | Python | gpl-3.0 | 1,406 | 0.002134 | from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from photologue.models import PhotoSize, ImageModel
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--reset', '-r', action='store_true', dest='reset', help='Reset photo cache before generating'),
)
help = ('Manages Photologue cache file for the given sizes.')
args = '[sizes]'
requires_model_validation = True
can_import_settings = True
def handle(self, *args, **options):
| return create_cache(args, options)
def create_cache(sizes, options):
"""
Creates | the cache for the given files
"""
reset = options.get('reset', None)
size_list = [size.strip(' ,') for size in sizes]
if len(size_list) < 1:
sizes = PhotoSize.objects.filter(pre_cache=True)
else:
sizes = PhotoSize.objects.filter(name__in=size_list)
if not len(sizes):
raise CommandError('No photo sizes were found.')
print 'Caching photos, this may take a while...'
for cls in ImageModel.__subclasses__():
for photosize in sizes:
print 'Cacheing %s size images' % photosize.name
for obj in cls.objects.all():
if reset:
obj.remove_size(photosize)
obj.create_size(photosize)
|
3dfxsoftware/cbss-addons | product_do_merge/wizard/__init__.py | Python | gpl-2.0 | 26 | 0 | import base_product_merg | e
| |
xeroc/python-graphenelib | tests/test_transactions.py | Python | mit | 11,452 | 0.001834 | # -*- coding: utf-8 -*-
import unittest
from pprint import pprint
from binascii import hexlify
from datetime import datetime, timedelta, timezone
from .fixtures import (
formatTimeFromNow,
timeformat,
Account_create,
Operation,
Signed_Transaction,
MissingSignatureForKey,
PrivateKey,
PublicKey,
)
prefix = "GPH"
wif = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
ref_block_num = 34294
ref_block_prefix = 3707022213
expiration = "2016-04-06T08:29:27"
class Testcases(unittest.TestCase):
def doit(self, printWire=False):
tx = Signed_Transaction(
ref_block_num=ref_block_num,
ref_block_prefix=ref_block_prefix,
expiration=expiration,
operations=[self.op],
)
self.assertEqual(tx.id, "0e67819255826ebe19c81f850cb8bf880a5ea9be")
# No let's wrap ops in Operation!
tx = Signed_Transaction(
ref_block_num=ref_block_num,
ref_block_prefix=ref_block_prefix,
expiration=expiration,
operations=[Operation(self.op)],
)
self.assertEqual(tx.id, "0e67819255826ebe19c81f850cb8bf880a5ea9be")
# Sign with prefix
tx = tx.sign([wif], chain=prefix)
tx.verify([PrivateKey(wif).pubkey], prefix)
txWire = hexlify(bytes(tx)).decode("ascii")
# Sign with manual chain id object
tx2 = tx.sign(
[wif],
chain={
"chain_id": "b8d1603965b3eb1acba27e62ff59f74efa3154d43a4188d381088ac7cdf35539",
"core_symbol": "CORE",
"prefix": "GPH",
},
)
tx2.verify([PrivateKey(wif).pubkey], "GPH")
txWire2 = hexlify(bytes(tx)).decode("ascii")
# identify by chain id
tx3 = tx.sign(
[wif],
chain="b8d1603965b3eb1acba27e62ff59f74efa3154d43a4188d381088ac7cdf35539",
)
tx3.verify([PrivateKey(wif).pubkey], "GPH")
txWire3 = hexlify(bytes(tx)).decode("ascii")
if printWire:
print()
print(txWire)
print()
# Compare expected result with test unit
self.assertEqual(self.cm[:-130], txWire[:-130])
self.assertEqual(self.cm[:-130], txWire2[:-130])
self.assertEqual(self.cm[:-130], txWire3[:-130])
def test_signed_transaction(self):
self.op = Account_create(
**{
"fee": {"amount": 1467634, "asset_id": "1.3.0"},
"registrar": "1.2.33",
"referrer": "1.2.27",
"referrer_percent": 3,
"name": "foobar-f124",
"owner": {
"weight_threshold": 1,
"account_auths": [],
"key_auths": [
["GPH6pbVDAjRFiw6fkiKYCrkz7PFeL7XNAfefrsREwg8MKpJ9VYV9x", 1]
],
"address_auths": [],
},
"active": {
"weight_threshold": 1,
"account_auths": [],
"key_auths": [
["GPH6pbVDAjRFiw6fkiKYCrkz7PFeL7XNAfefrsREwg8MKpJ9VYV9x", 1]
],
"address_auths": [],
},
"options": {
"memo_key": "GPH5TPTziKkLexhVKsQKtSpo4bAv5RnB8oXcG4sMHEwCcTf3r7dqE",
"voting_account": "1.2.5",
"num_witness": 0,
"num_committee": 0,
"votes": [],
"extensions": [],
},
"extensions": {},
"prefix": "GPH",
}
)
tx = Signed_Transaction(
ref_block_num=ref_block_num,
ref_block_prefix=ref_block_prefix,
expiration=expiration,
operations=[self.op],
)
self.assertIn("chain_id", tx.getChainParams("GPH"))
self.assertIn(
"chain_id",
tx.getChainParams(
{
"chain_id": "b8d1603965b3eb1acba27e62ff59f74efa3154d43a4188d381088ac7cdf35539",
"core_symbol": "CORE",
"prefix": "GPH",
}
),
)
with self.assertRaises(ValueError):
self.assertIn(
"chain_id", tx.getChainParams({"core_symbol": "CORE", "prefix": "GPH"})
)
with self.assertRaises(ValueError):
self.assertIn("chain_id", tx.getChainParams(list()))
tx.sign([wif])
# Test for duplicates, does not raise!
tx.sign([wif, wif])
tx.verify()
with self.assertRaises(ValueError):
tx.verify(["GPH6pbVDAjRFiw6fkiKYCrkz7PFeL7XNAfefrsREwg8MKpJ9VYV9x"])
with self.assertRaises(MissingSignatureForKey):
tx.verify(
[PublicKey("GPH6pbVDAjRFiw6fkiKYCrkz7PFeL7XNAfefrsREwg8MKpJ9VYV9x")]
)
tx.verify([PrivateKey(wif).pubkey])
def test_create_account(self):
self.op = Account_create(
**{
"fee": {"amount": 1467634, "asset_id": "1.3.0"},
"registrar": "1.2.33",
"referrer": "1.2.27",
"referrer_percent": 3,
"name": "foobar-f124",
"owner": {
"weight_threshold": 1,
"account_auths": [],
"key_auths": [
["GPH6pbVDAjRFiw6fkiKYCrkz7PFeL7XNAfefrsREwg8MKpJ9VYV9x", 1],
["GPH6zLNtyFVToBsBZDsgMhgjpwysYVbsQD6YhP3kRkQhANUB4w7Qp", 1],
],
"address_auths": [],
},
"active": {
"weight_threshold": 1,
"account_auths": [],
"key_auths": [
["GPH6pbVDAjRFiw6fkiKYCrkz7PFeL7XNAfefrsREwg8MKpJ9VYV9x", 1],
["GPH6zLNtyFVToBsBZDsgMhgjpwysYVbsQD6YhP3kRkQhANUB4w7Qp", 1],
["GPH8CemMDjdUWSV5wKotEimhK6c4dY7p2PdzC2qM1HpAP8aLtZfE7", 1],
],
"address_auths": [],
},
"options": {
"memo_key": "GPH5TPTziKkLexhVKsQKtSpo4bAv5RnB8oXcG4sMHEwCcTf3r7dqE",
"voting_account": "1.2.5",
"num_witness": 0,
"num_committee": 0,
"votes": [],
"extensions": [],
},
"extensions": {
"buyback_options": {
"asset_to_buy": "1.3.127",
"asset_to_buy_issuer": "1.2.31",
"markets": ["1.3.20"],
},
"null_ext": {},
"owner_special_authority": [
1,
{"asset": "1.3.127", "num_top_holders": 10},
],
},
"prefix": "GPH",
}
)
self.cm = (
"f68585abf4dce7c804570105f26416000000000000211b03000b666f"
"6f6261722d6631323401000000000202fe8cc11cc8251de6977636b5"
"5c1ab8a9d12b0b26154ac78e56e7c4257d8bcf6901000314aa202c91"
"58990b3ec51a1aa49b2ab5d300c97b391df3beb34bb74f3c62699e01"
"000001000000000303b453f46013fdbccb90b09ba169c388c34d8445"
"4a3b9fbec68d5a7819a734fca0010002fe8cc11cc8251de6977636b5"
"5c1ab8a9d12b0b26154ac78e56e7c4257d8bcf6901000314aa202c91"
"58990b3ec51a1aa49b2ab5d300c97b391df3beb34bb74f3c62699e01"
"0000024ab336b4b14ba6d881675d1c782912783c43dbbe31693aa710"
| "ac1896bd7c3d610500000000000000000120508168b9615d48bd1184"
"6b3b9bcf000d1424a7915fb1cfa7f61150b5435c060b3147c056a1f8"
"89633c43d1b88cb463e8083fa2b62a585af9e1b7a7c23d83ae78"
)
self.doit()
| def test_create_account_sort_keys(self):
self.op = Account_create(
**{
"fee": {"amount": 1467634, "asset_id": "1.3.0"},
"registrar": "1.2. |
won21kr/pydokan | src/old/dokanMemTools.py | Python | gpl-3.0 | 1,950 | 0.029744 | from ctypes import *
debug = False
def setDwordByPoint(valueAddress, value):
'''
valueAddress[0] = value && 0xff
valueAddress[1] = (value >> 8) && 0xff
'''
i = 0
while i < 4:
#print value, i
memset(valueAddress+i, value&0xff | , 1)
i += 1
value >>= 8
return valueAddress + 4
def setLongLongByPoint(valueAddress, value):
setDwordByPoint(valueAddress, value & 0xffffffff)
setDwordByPoint(valueAddress+4, (value>>32) & 0xffffffff)
return valueAddress + 8
def setStringByPoint(valueAddress, value, length):
cnt = 0
for i in value:
cnt += 2
if cnt+ | 2 > length:
break
#0061: u'a' -> 0x00000000: 61, 0x00000001: 00
memset(valueAddress, ord(i)&0xff, 1)
valueAddress += 1
memset(valueAddress, (ord(i)>>8)&0xff, 1)
valueAddress += 1
memset(valueAddress, 0, 1)
valueAddress += 1
memset(valueAddress, 0, 1)
return valueAddress + length
def setFileTimeByPoint(valueAddress, value):
valueAddress = setDwordByPoint(valueAddress, value.dwLowDateTime)
valueAddress = setDwordByPoint(valueAddress, value.dwHighDateTime)
return valueAddress
def setByHandleFileInformationByPoint(valueAddress, value):
valueAddress = setDwordByPoint(valueAddress, value.dwFileAttributes)
valueAddress = setFileTimeByPoint(valueAddress, value.ftCreationTime)
valueAddress = setFileTimeByPoint(valueAddress, value.ftLastAccessTime)
valueAddress = setFileTimeByPoint(valueAddress, value.ftLastWriteTime)
valueAddress = setDwordByPoint(valueAddress, value.dwVolumeSerialNumber)
valueAddress = setDwordByPoint(valueAddress, value.nFileSizeHigh)
valueAddress = setDwordByPoint(valueAddress, value.nFileSizeLow)
valueAddress = setDwordByPoint(valueAddress, value.nNumberOfLinks)
valueAddress = setDwordByPoint(valueAddress, value.nFileIndexHigh)
valueAddress = setDwordByPoint(valueAddress, value.nFileIndexLow)
return valueAddress
#def setByPoint(valueAddress, value):
|
coderiot/pyexchange | pyexchange/exchange/bitstamp.py | Python | gpl-2.0 | 2,316 | 0 | #!/usr/bin/env python
# encoding: utf-8
from datetime import datetime
from pyexchange.exchange import models
base_url = "https://www.bitstamp.net/api"
class Bitstamp(models.Exchange):
"""Docstring for Bitstamp """
_markets_map = {'btc_usd': 'btc_usd'}
def __init__(self, market="btc_usd"):
| """@todo: to be defined1
:currency: @todo
"""
self.market = market
def depth(self):
"""@todo: Docstring for depth
:r | eturns: @todo
"""
url = "/".join([base_url, 'order_book/'])
resp = self._request('GET', url).json()
asks = []
for p, a in resp['asks']:
asks.append(models.Order(price=self._create_decimal(p),
amount=self._create_decimal(a)))
bids = []
for p, a in resp['bids']:
bids.append(models.Order(price=self._create_decimal(p),
amount=self._create_decimal(a)))
return asks, bids
def ticker(self):
"""@todo: Docstring for ticker
:returns: @todo
"""
url = "/".join([base_url, 'ticker/'])
resp = self._request('GET', url).json()
return models.Ticker(
high=self._create_decimal(resp['high']),
low=self._create_decimal(resp['low']),
last=self._create_decimal(resp['last']),
buy=self._create_decimal(resp['bid']),
sell=self._create_decimal(resp['ask']),
vol=self._create_decimal(resp['volume']))
def trades(self):
"""@todo: Docstring for trades
:returns: @todo
"""
url = "/".join([base_url, 'transactions/'])
resp = self._request('GET', url).json()
trades = []
for t in resp:
date = datetime.fromtimestamp(int(t['date']))
amount = self._create_decimal(t['amount'])
price = self._create_decimal(t['price'])
tid = t['tid']
trades.append(models.Trade(date=date,
price=price,
amount=amount,
tid=tid))
return trades
|
sevein/archivematica | src/dashboard/src/components/api/views.py | Python | agpl-3.0 | 22,892 | 0.002272 | # This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# stdlib, alphabetical
import base64
import json
import shutil
import logging
import os
import uuid
import re
# Core Django, alphabetical
from django.db.models import Q
import django.http
from django.views.decorators.http import require_http_methods
# External dependencies, alphabetical
from annoying.functions import get_object_or_None
from tastypie.authentication import ApiKeyAuthentication
# This project, alphabetical
import archivematicaFunctions
from components.filesystem_ajax import views as filesystem_ajax_views
from components.unit import views as unit_views
from components import helpers
from main import models
from mcpserver import Client as MCPServerClient
LOGGER = logging.getLogger('archivematica.dashboard')
SHARED_DIRECTORY_ROOT = helpers.get_server_config_value('sharedDirectory')
UUID_REGEX = re.compile(r'^[0-9A-F]{8}-[0-9A-F]{4}-4[0-9A-F]{3}-[89AB][0-9A-F]{3}-[0-9A-F]{12}$', re.IGNORECASE)
def authenticate_request(request):
error = None
api_auth = ApiKeyAuthentication()
authorized = api_auth.is_authenticated(request)
# 'authorized' can be True, False or tastypie.http.HttpUnauthorized
# Check explicitly for True, not just truthiness
if authorized is True:
client_ip = request.META['REMOTE_ADDR']
whitelist = helpers.get_setting('api_whitelist', '127.0.0.1').split()
if client_ip not in whitelist:
LOGGER.debug('API called by invalid IP %s', client_ip)
error = 'Host/IP ' + client_ip + ' not authorized.'
else:
error = 'API key not valid.'
return error
def get_unit_status(unit_uuid, unit_type):
"""
Get status for a SIP or Transfer.
Returns a dict with status info. Keys will always include 'status' and
'microservice', and may include 'sip_uuid'.
Status is one of FAILED, REJECTED, USER_INPUT, COMPLETE or PROCESSING.
Microservice is the name of the current microservice.
SIP UUID is populated only if the unit_type was unitTransfer and status is
COMPLETE. Otherwise, it is None.
:param str unit_uuid: UUID of the SIP or Transfer
:param str unit_type: unitSIP or unitTransfer
:return: Dict with status info.
"""
ret = {}
job = models.Job.objects.filter(sipuuid=unit_uuid).filter(unittype=unit_type).order_by('-createdtime', '-createdtimedec')[0]
ret['microservice'] = job.jobtype
if job.currentstep == 'Awaiting decision':
ret['status'] = 'USER_INPUT'
elif 'failed' in job.microservicegroup.lower():
ret['status'] = 'FAILED'
elif 'reject' in job.microservicegroup.lower():
ret['status'] = 'REJECTED'
elif job.jobtype == 'Remove the processing directory': # Done storing AIP
ret['status'] = 'COMPLETE'
elif models.Job.objects.fi | lter(sipuuid=unit_uuid).filter(jobtype='Create SIP from transfer objects').exists():
ret['status'] = 'COMPLETE'
# Get SIP UUID
sips = models.File.objects.filter(transfer_id=unit_uuid, sip__isnull=False).values('sip').distinct()
if sips:
ret['sip_uuid'] = sips[0]['sip']
elif models.Job.objects.filter(sipuuid=unit_uuid).filter(jobtype='Move transfer to backlog').exists():
ret['status'] = 'COMPLETE'
| ret['sip_uuid'] = 'BACKLOG'
else:
ret['status'] = 'PROCESSING'
return ret
def status(request, unit_uuid, unit_type):
# Example: http://127.0.0.1/api/transfer/status/?username=mike&api_key=<API key>
if request.method not in ('GET',):
return django.http.HttpResponseNotAllowed(['GET'])
auth_error = authenticate_request(request)
response = {}
if auth_error is not None:
response = {'message': auth_error, 'error': True}
return django.http.HttpResponseForbidden(
json.dumps(response),
content_type='application/json'
)
error = None
# Get info about unit
if unit_type == 'unitTransfer':
unit = get_object_or_None(models.Transfer, uuid=unit_uuid)
response['type'] = 'transfer'
elif unit_type == 'unitSIP':
unit = get_object_or_None(models.SIP, uuid=unit_uuid)
response['type'] = 'SIP'
if unit is None:
response['message'] = 'Cannot fetch {} with UUID {}'.format(unit_type, unit_uuid)
response['error'] = True
return django.http.HttpResponseBadRequest(
json.dumps(response),
content_type='application/json',
)
directory = unit.currentpath if unit_type == 'unitSIP' else unit.currentlocation
response['path'] = directory.replace('%sharedPath%', SHARED_DIRECTORY_ROOT, 1)
response['directory'] = os.path.basename(os.path.normpath(directory))
response['name'] = response['directory'].replace('-' + unit_uuid, '', 1)
response['uuid'] = unit_uuid
# Get status (including new SIP uuid, current microservice)
status_info = get_unit_status(unit_uuid, unit_type)
response.update(status_info)
if error is not None:
response['message'] = error
response['error'] = True
return django.http.HttpResponseServerError(
json.dumps(response),
content_type='application/json'
)
response['message'] = 'Fetched status for {} successfully.'.format(unit_uuid)
return helpers.json_response(response)
def waiting_for_user_input(request):
# Example: http://127.0.0.1/api/transfer/waiting?username=mike&api_key=<API key>
if request.method not in ('GET',):
return django.http.HttpResponseNotAllowed(['GET'])
auth_error = authenticate_request(request)
response = {}
if auth_error is not None:
response = {'message': auth_error, 'error': True}
return django.http.HttpResponseForbidden(
json.dumps(response),
content_type='application/json'
)
error = None
waiting_units = []
jobs = models.Job.objects.filter(currentstep='Awaiting decision')
for job in jobs:
unit_uuid = job.sipuuid
directory = os.path.basename(os.path.normpath(job.directory))
unit_name = directory.replace('-' + unit_uuid, '', 1)
waiting_units.append({
'sip_directory': directory,
'sip_uuid': unit_uuid,
'sip_name': unit_name,
'microservice': job.jobtype,
# 'choices': [] # TODO? Return list of choices, see ingest.views.ingest_status
})
response['results'] = waiting_units
if error is not None:
response['message'] = error
response['error'] = True
return django.http.HttpResponseServerError(
json.dumps(response),
content_type='application/json'
)
response['message'] = 'Fetched transfers successfully.'
return helpers.json_response(response)
def mark_hidden(request, unit_type, unit_uuid):
"""
Mark a unit as deleted and hide it in the dashboard.
This is just a wrapper around unit.views.mark_hidden that verifies API auth.
:param unit_type: 'transfer' or 'ingest' for a Transfer or SIP respectively
:param unit_uuid: UUID of the Transfer or SIP
"""
auth_error = authenticate_request(request)
response = {}
if auth_error is not None:
response = {'message': auth_error, 'error': True}
return django.http.HttpResponseForbidden(
json.dumps(response),
content_type='application/json'
) |
kellinm/blivet | blivet/flags.py | Python | gpl-2.0 | 3,954 | 0.000506 | # flags.py
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): David Lehman <dlehman@redhat.com>
#
import shlex
import selinux
from .util import open # pylint: disable=redefined-builtin
class Flags(object):
def __init__(self):
#
# mode of operation
#
self.testing = False
self.installer_mode = False
#
# minor modes (installer-specific)
#
self.automated_install = False
self.live_install = False
self.image_install = False
#
# enable/disable functionality
#
self.selinux = selinux.is_selinux_enabled()
self.multipath = True
self.dmraid = | True
self.ibft = True
self.noiswmd = False
self.gfs2 = True
self.jfs = True
self.reiserfs = True
self.arm_platform = None
se | lf.gpt = False
self.multipath_friendly_names = True
# set to False to suppress the default LVM behavior of saving
# backup metadata in /etc/lvm/{archive,backup}
self.lvm_metadata_backup = True
# whether to include nodev filesystems in the devicetree (only
# meaningful when flags.installer_mode is False)
self.include_nodev = False
self.boot_cmdline = {}
self.update_from_boot_cmdline()
self.allow_imperfect_devices = True
def get_boot_cmdline(self):
buf = open("/proc/cmdline").read().strip()
args = shlex.split(buf)
for arg in args:
(opt, _equals, val) = arg.partition("=")
if val:
self.boot_cmdline[opt] = val
def update_from_boot_cmdline(self):
self.get_boot_cmdline()
if "nompath" in self.boot_cmdline:
self.multipath = False
if "nodmraid" in self.boot_cmdline:
self.dmraid = False
if "noiswmd" in self.boot_cmdline:
self.noiswmd = True
def update_from_anaconda_flags(self, anaconda_flags):
self.installer_mode = True
self.testing = anaconda_flags.testing
self.automated_install = anaconda_flags.automatedInstall
self.live_install = anaconda_flags.livecdInstall
self.image_install = anaconda_flags.imageInstall
self.selinux = anaconda_flags.selinux
self.gfs2 = "gfs2" in self.boot_cmdline
self.jfs = "jfs" in self.boot_cmdline
self.reiserfs = "reiserfs" in self.boot_cmdline
self.arm_platform = anaconda_flags.armPlatform
self.gpt = anaconda_flags.gpt
self.multipath_friendly_names = anaconda_flags.mpathFriendlyNames
self.allow_imperfect_devices = anaconda_flags.rescue_mode
self.ibft = anaconda_flags.ibft
self.dmraid = anaconda_flags.dmraid
# We don't want image installs writing backups of the *image* metadata
# into the *host's* /etc/lvm. This can get real messy on build systems.
if self.image_install:
self.lvm_metadata_backup = False
flags = Flags()
|
pyamg/pyamg | pyamg/util/linalg.py | Python | mit | 18,547 | 0.000324 | """Linear Algebra Helper Routines."""
from warnings import warn
import numpy as np
from scipy import sparse
from scipy.sparse.linalg import aslinearoperator
from scipy.linalg import lapack, get_blas_funcs, eig, svd
from .params import set_tol
def norm(x, pnorm='2'):
"""2-norm of a vector.
Parameters
----------
x : array_like
Vector of complex or real values
pnorm : string
'2' calculates the 2-norm
'inf' calculates the infinity-norm
Returns
-------
n : float
2-norm of a v | ector
Notes
-----
- currently 1+ order of magnitude faster tha | n scipy.linalg.norm(x), which
calls sqrt(numpy.sum(real((conjugate(x)*x)),axis=0)) resulting in an
extra copy
- only handles the 2-norm and infinity-norm for vectors
See Also
--------
scipy.linalg.norm : scipy general matrix or vector norm
"""
x = np.ravel(x)
if pnorm == '2':
return np.sqrt(np.inner(x.conj(), x).real)
if pnorm == 'inf':
return np.max(np.abs(x))
raise ValueError('Only the 2-norm and infinity-norm are supported')
def infinity_norm(A):
"""Infinity norm of a matrix (maximum absolute row sum).
Parameters
----------
A : csr_matrix, csc_matrix, sparse, or numpy matrix
Sparse or dense matrix
Returns
-------
n : float
Infinity norm of the matrix
Notes
-----
- This serves as an upper bound on spectral radius.
- csr and csc avoid a deep copy
- dense calls scipy.linalg.norm
See Also
--------
scipy.linalg.norm : dense matrix norms
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags
>>> from pyamg.util.linalg import infinity_norm
>>> n=10
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = spdiags(data,[-1,0,1],n,n)
>>> print(infinity_norm(A))
4.0
"""
if sparse.isspmatrix_csr(A) or sparse.isspmatrix_csc(A):
# avoid copying index and ptr arrays
abs_A = A.__class__((np.abs(A.data), A.indices, A.indptr),
shape=A.shape)
return (abs_A * np.ones((A.shape[1]), dtype=A.dtype)).max()
if sparse.isspmatrix(A):
return (abs(A) * np.ones((A.shape[1]), dtype=A.dtype)).max()
return np.dot(np.abs(A), np.ones((A.shape[1],), dtype=A.dtype)).max()
def axpy(x, y, a=1.0):
"""Quick level-1 call to BLAS y = a*x+y.
Parameters
----------
x : array_like
nx1 real or complex vector
y : array_like
nx1 real or complex vector
a : float
real or complex scalar
Returns
-------
y : array_like
Input variable y is rewritten
Notes
-----
The call to get_blas_funcs automatically determines the prefix for the blas
call.
"""
fn = get_blas_funcs(['axpy'], [x, y])[0]
fn(x, y, a)
# def approximate_spectral_radius(A, tol=0.1, maxiter=10, symmetric=False):
# """approximate the spectral radius of a matrix
#
# Parameters
# ----------
#
# A : {dense or sparse matrix}
# E.g. csr_matrix, csc_matrix, ndarray, etc.
# tol : {scalar}
# Tolerance of approximation
# maxiter : {integer}
# Maximum number of iterations to perform
# symmetric : {boolean}
# True if A is symmetric, False otherwise (default)
#
# Returns
# -------
# An approximation to the spectral radius of A
#
# """
# if symmetric:
# method = eigen_symmetric
# else:
# method = eigen
#
# return norm( method(A, k=1, tol=0.1, which='LM', maxiter=maxiter,
# return_eigenvectors=False) )
def _approximate_eigenvalues(A, maxiter, symmetric=None, initial_guess=None):
"""Apprixmate eigenvalues.
Used by approximate_spectral_radius and condest.
Returns [W, E, H, V, breakdown_flag], where W and E are the eigenvectors
and eigenvalues of the Hessenberg matrix H, respectively, and V is the
Krylov space. breakdown_flag denotes whether Lanczos/Arnoldi suffered
breakdown. E is therefore the approximate eigenvalues of A.
To obtain approximate eigenvectors of A, compute V*W.
"""
A = aslinearoperator(A) # A could be dense or sparse, or something weird
# Choose tolerance for deciding if break-down has occurred
breakdown = set_tol(A.dtype)
breakdown_flag = False
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix')
maxiter = min(A.shape[0], maxiter)
if initial_guess is None:
v0 = np.random.rand(A.shape[1], 1)
if A.dtype == complex:
v0 = v0 + 1.0j * np.random.rand(A.shape[1], 1)
else:
v0 = initial_guess
v0 /= norm(v0)
# Important to type H based on v0, so that a real nonsymmetric matrix, can
# have an imaginary initial guess for its Arnoldi Krylov space
H = np.zeros((maxiter+1, maxiter),
dtype=np.find_common_type([v0.dtype, A.dtype], []))
V = [v0]
beta = 0.0
for j in range(maxiter):
w = A * V[-1]
if symmetric:
if j >= 1:
H[j-1, j] = beta
w -= beta * V[-2]
alpha = np.dot(np.conjugate(w.ravel()), V[-1].ravel())
H[j, j] = alpha
w -= alpha * V[-1] # axpy(V[-1],w,-alpha)
beta = norm(w)
H[j+1, j] = beta
if (H[j+1, j] < breakdown):
breakdown_flag = True
break
w /= beta
V.append(w)
V = V[-2:] # retain only last two vectors
else:
# orthogonalize against Vs
for i, v in enumerate(V):
H[i, j] = np.dot(np.conjugate(v.ravel()), w.ravel())
w = w - H[i, j]*v
H[j+1, j] = norm(w)
if (H[j+1, j] < breakdown):
breakdown_flag = True
if H[j+1, j] != 0.0:
w = w/H[j+1, j]
V.append(w)
break
w = w/H[j+1, j]
V.append(w)
# if upper 2x2 block of Hessenberg matrix H is almost symmetric,
# and the user has not explicitly specified symmetric=False,
# then switch to symmetric Lanczos algorithm
# if symmetric is not False and j == 1:
# if abs(H[1,0] - H[0,1]) < 1e-12:
# #print("using symmetric mode")
# symmetric = True
# V = V[1:]
# H[1,0] = H[0,1]
# beta = H[2,1]
# print("Approximated spectral radius in %d iterations" % (j + 1))
Eigs, Vects = eig(H[:j+1, :j+1], left=False, right=True)
return (Vects, Eigs, H, V, breakdown_flag)
def approximate_spectral_radius(A, tol=0.01, maxiter=15, restart=5,
symmetric=None, initial_guess=None,
return_vector=False):
"""Approximate the spectral radius of a matrix.
Parameters
----------
A : {dense or sparse matrix}
E.g. csr_matrix, csc_matrix, ndarray, etc.
tol : {scalar}
Relative tolerance of approximation, i.e., the error divided
by the approximate spectral radius is compared to tol.
maxiter : {integer}
Maximum number of iterations to perform
restart : {integer}
Number of restarted Arnoldi processes. For example, a value of 0 will
run Arnoldi once, for maxiter iterations, and a value of 1 will restart
Arnoldi once, using the maximal eigenvector from the first Arnoldi
process as the initial guess.
symmetric : {boolean}
True - if A is symmetric Lanczos iteration is used (more efficient)
False - if A is non-symmetric Arnoldi iteration is used (less efficient)
initial_guess : {array|None}
If n x 1 array, then use as initial guess for Arnoldi/Lanczos.
If None, then use a random initial guess.
return_vector : {boolean}
True - return an approximate dominant eigenvector and the spectral radius.
False - Do not return the approximate dominant eigenvector
Returns
|
demisto/content | Packs/Vectra/Integrations/Vectra_v2/Vectra_v2.py | Python | mit | 28,976 | 0.003176 | from CommonServerPython import *
# IMPORTS #
import json
import requests
import urllib3
from typing import Dict, List, Union
# Disable insecure warnings
urllib3.disable_warnings()
# CONSTANTS #
MAX_FETCH_SIZE = 50
DATE_FORMAT = "%Y-%m-%dT%H%M" # 2019-09-01T1012
PARAMS_KEYS = {
"threat_score": "t_score",
"threat_score_gte": "t_score_gte",
"certainty_score": "c_score",
"certainty_score_gte": "c_score_gte",
"destination_port": "dst_port"
}
# HELPER FUNCTIONS #
def create_incident_from_detection(detection: dict):
"""
converts a detection object to an Incident object
"""
labels = []
for key, value in detection.items():
labels.append({'type': key, 'value': json.dumps(value)})
return {
"name": f'Detection from Vectra with ID: {detection.get("id")}',
"labels": labels,
"rawJSON": json.dumps(detection)
}
def calc_pages(total_count: int, this_count: int):
"""
preforms ciel operation to find the total number of pages
"""
return -(-total_count // this_count) # minus minus so the floor will become ceiling
def max_timestamp(timestamp1: str, timestamp2: str) -> str:
"""
returns the older timestamp
"""
date1 = parse_date_string(timestamp1, date_format=DATE_FORMAT)
date2 = parse_date_string(timestamp2, date_format=DATE_FORMAT)
return timestamp1 if date1 > date2 else timestamp2
def update_vectra_params(kwargs: dict) -> dict:
"""
updates keys to match Vectra's syntax
"""
return {PARAMS_KEYS.get(key, key): value for key, value in kwargs.items()}
class Client:
def __init__(self, vectra_url: str, api_token: str, verify: bool, proxy: dict, fetch_size: int,
first_fetch: str, t_score_gte: int, c_score_gte: int, state: str):
"""
:param vectra_url: IP or hostname of Vectra brain (ex https://www.example.com) - required
:param api_token: API token for authentication when using API v2*
:param verify: Boolean, controls whether we verify the server's TLS certificate
:param proxy: Dictionary | mapping protocol to the URL of the proxy.
:param fetch_size: Max number of incidents to fetch in each cycle
:param first_fetch: Fetch only Detections newer than this date
:param c_score_gte: Fetch only Detections with greater/equal Certainty score
| :param t_score_gte: Fetch only Detections with greater/equal Threat score
:param state: Fetch only Detections with matching State (e.g., active, inactive, ignored)
"""
self.state = state
self.t_score_gte = t_score_gte
self.c_score_gte = c_score_gte
self.fetch_size = fetch_size
self.headers = {'Authorization': f'Token {api_token}'}
self.base_url = vectra_url + '/api/v2.1/'
self.verify = verify
self.proxies = proxy
self.first_fetch = first_fetch
def http_request(self, method='GET', url_suffix='', params=None, data=None) -> Dict:
"""
Generic HTTP request to Vectra API.
:param method: Request's method e.g., 'GET', 'POST', 'PATCH'
:param url_suffix: The URL's suffix, usually indicates the API command
:param params: Command parameters
:param data: Other data to send the request with
:return: .json() of the response if exists
"""
full_url = self.base_url + url_suffix
try:
res = requests.request(
method=method,
url=full_url,
headers=self.headers,
params=params,
data=data,
verify=self.verify,
proxies=self.proxies,
)
except requests.exceptions.ConnectTimeout:
raise Exception('Connection Timeout Error - potential reasons might be that the Server URL parameter is'
' incorrect or that the Server is not accessible from your host.')
except requests.exceptions.SSLError:
raise Exception('SSL Certificate Verification Failed \nTry selecting \'Trust any certificate\'')
except requests.exceptions.ConnectionError:
raise Exception(f'Failed to connect to - {self.base_url} \nPlease check the URL')
if not res.ok:
raise ValueError(f'Error in API call to Vectra [{res.status_code:d}]. Reason: {res.text}')
try:
return res.json()
except Exception:
raise ValueError(f"Failed to parse http response to JSON format. Original response body: \n{res.text}")
def fetch_incidents(self, last_run: Dict):
"""
Fetches Detections from Vectra into Demisto Incidents
:param last_run: Integration's last run
"""
last_timestamp: str = last_run.get('last_timestamp', self.first_fetch) # type: ignore
query_string = f'detection.threat:>={self.t_score_gte}'
query_string += f' and detection.certainty:>={self.c_score_gte}'
query_string += f' and detection.last_timestamp:>{last_timestamp}' # format: "%Y-%m-%dT%H%M"
query_string += f' and detection.state:{self.state}' if self.state != 'all' else ''
demisto.info(f'\n\nQuery String:\n{query_string}\n\n')
params = {
'query_string': query_string,
'page_size': self.fetch_size,
'page': 1,
'order_field': 'last_timestamp'
}
raw_response = self.http_request(params=params, url_suffix='search/detections') # type: ignore
demisto.info("\n\n Queried Successfully\n\n")
# Detections -> Incidents, if exists
incidents = []
if 'results' in raw_response:
res: Union[List[Dict], Dict] = raw_response.get('results') # type: ignore
detections: List[Dict] = [res] if not isinstance(res, List) else sorted(res, key=lambda h: h.get('id'))
try:
for detection in detections:
incidents.append(create_incident_from_detection(detection)) # type: ignore
# format from response: %Y-%m-%dT%H:%M:%SZ
response_last_timestamp = datetime.strptime(detection.get('last_timestamp'), # type: ignore
"%Y-%m-%dT%H:%M:%SZ").strftime("%Y-%m-%dT%H%M")
last_timestamp = max_timestamp(last_timestamp, response_last_timestamp) # type: ignore
if incidents:
last_run = {'last_timestamp': last_timestamp}
except ValueError:
raise
demisto.info(f"Last run is:\n {last_run}")
return last_run, incidents
def get_detections_command(client: Client, **kwargs):
"""
Detection objects contain all the information related to security events detected on the network.
:QUERY PARAMETERS:
:keyword fields: Filters objects listed
:keyword page: Page number. Possible values are a positive integer or last
:keyword page_size: Possible values are a positive integer or all
:keyword ordering: Orders records by last timestamp, threat score and certainty score. Default is ascending order.
Scores can sorted in descending order by prepending the query with “minus” symbol
:keyword min_id: >= the id provided
:keyword max_id: <= the id provided
:keyword state: Filter by state: active, inactive, ignored, ignored for all
:keyword type_vname: Filter by the detection type (verbose name)
:keyword category: Filter by the detection category
:keyword src_ip: Filter by source (ip address)
:keyword threat_score: Filter by threat score
:keyword threat_score_gte: Filter by threat score >= the score provided
:keyword certainty_score: Filter by certainty score
:keyword certainty_score_gte: Filter by certainty score >= the score provided
:keyword last_timestamp: Filter by last timestamp
:keyword host_id: Filter by id of the host object a detection is attributed to
:keyword tags: Filter by a tag or a comma-separated list of tags
:keyword destination: Filter by destination in the |
mpirnat/adventofcode | day23/day23.py | Python | mit | 2,854 | 0.00035 | #!/usr/bin/env python
"""
Solve day 23 of Advent of Code.
http://adventofcode.com/day/23
"""
class Computer:
def __init__(self):
"""
Our computer has 2 registers, a and b,
and an instruction pointer so that we know
which instruction to fetch next.
"""
self.a = 0
self.b = 0
self.ip = 0 # Ye olde instruction pointer
def run_program(self, program):
"""
Run a list of program instructions until we
try to move the instruction pointer beyond
the bounds of the instruction list.
"""
while True:
try:
instruction, args = self.parse_instruction(program[self.ip])
except IndexError:
return
getattr(self, instruction)(*args)
def parse_instruction(self, line):
"""
Parse a line of the program into
the instruction and its arguments.
"""
instruction, *args = line.strip().rep | lace(',', '').split()
return instruction, arg | s
def hlf(self, register):
"""
Set the register to half its current value,
then increment the instruction pointer.
"""
setattr(self, register, getattr(self, register)//2)
self.ip += 1
def tpl(self, register):
"""
Set the register to triple its current value,
then increment the instruction pointer.
"""
setattr(self, register, getattr(self, register)*3)
self.ip += 1
def inc(self, register):
"""
Increment the value in the register,
then increment the instruction pointer.
"""
setattr(self, register, getattr(self, register) + 1)
self.ip += 1
def jmp(self, offset):
"""
Jump the instruction pointer by a particular offset.
"""
self.ip += int(offset)
def jie(self, register, offset):
"""
Jump the instruction pointer by an offset
if the value in the register is even.
"""
if getattr(self, register) % 2 == 0:
self.jmp(offset)
else:
self.ip += 1
def jio(self, register, offset):
"""
Jump the instruction pointer by an offset
if the value in the register is one.
"""
if getattr(self, register) == 1:
self.jmp(offset)
else:
self.ip += 1
if __name__ == '__main__':
with open('input.txt') as f:
program = f.readlines()
computer = Computer()
# Part 1 - start with a=0, b=0
computer.run_program(program)
print("Part 1:", computer.b)
# Part 2 - now start with a=1, b=0
computer = Computer()
computer.a = 1
computer.run_program(program)
print("Part 2:", computer.b)
|
jasonwee/asus-rt-n14uhp-mrtg | src/lesson_the_internet/urllib_request_upload_files.py | Python | apache-2.0 | 3,434 | 0 | import io
import mimetypes
from urllib import request
import uuid
class MultiPartForm:
"""Accumulate the data to be used when posting a form."""
def __init__(self):
self.form_fields = []
self.files = []
# Use a large random byte string to separate
# parts of the MIME data.
self.boundary = uuid.uuid4().hex.encode('utf-8')
return
def get_content_type(self):
return 'multipart/form-data; boundary={}'.format(
self.boundary.decode('utf-8'))
def add_field(self, name, value):
"""Add a simple field to the form data."""
self.form_fields.append((name, value))
def add_file(self, fieldname, filename, fileHandle,
mimetype=None):
"""Add | a file to be uploaded."""
body = fileHandl | e.read()
if mimetype is None:
mimetype = (
mimetypes.guess_type(filename)[0] or
'application/octet-stream'
)
self.files.append((fieldname, filename, mimetype, body))
return
@staticmethod
def _form_data(name):
return ('Content-Disposition: form-data; '
'name="{}"\r\n').format(name).encode('utf-8')
@staticmethod
def _attached_file(name, filename):
return ('Content-Disposition: file; '
'name="{}"; filename="{}"\r\n').format(
name, filename).encode('utf-8')
@staticmethod
def _content_type(ct):
return 'Content-Type: {}\r\n'.format(ct).encode('utf-8')
def __bytes__(self):
"""Return a byte-string representing the form data,
including attached files.
"""
buffer = io.BytesIO()
boundary = b'--' + self.boundary + b'\r\n'
# Add the form fields
for name, value in self.form_fields:
buffer.write(boundary)
buffer.write(self._form_data(name))
buffer.write(b'\r\n')
buffer.write(value.encode('utf-8'))
buffer.write(b'\r\n')
# Add the files to upload
for f_name, filename, f_content_type, body in self.files:
buffer.write(boundary)
buffer.write(self._attached_file(f_name, filename))
buffer.write(self._content_type(f_content_type))
buffer.write(b'\r\n')
buffer.write(body)
buffer.write(b'\r\n')
buffer.write(b'--' + self.boundary + b'--\r\n')
return buffer.getvalue()
if __name__ == '__main__':
# Create the form with simple fields
form = MultiPartForm()
form.add_field('firstname', 'Doug')
form.add_field('lastname', 'Hellmann')
# Add a fake file
form.add_file(
'biography', 'bio.txt',
fileHandle=io.BytesIO(b'Python developer and blogger.'))
# Build the request, including the byte-string
# for the data to be posted.
data = bytes(form)
r = request.Request('http://localhost:8080/', data=data)
r.add_header(
'User-agent',
'PyMOTW (https://pymotw.com/)',
)
r.add_header('Content-type', form.get_content_type())
r.add_header('Content-length', len(data))
print()
print('OUTGOING DATA:')
for name, value in r.header_items():
print('{}: {}'.format(name, value))
print()
print(r.data.decode('utf-8'))
print()
print('SERVER RESPONSE:')
print(request.urlopen(r).read().decode('utf-8'))
|
XENON1T/pax | pax/plugins/ZLE.py | Python | bsd-3-clause | 6,092 | 0.00394 | import numpy as np
from pax import plugin, datastructure
from pax.dsputils import find_intervals_above_threshold_no_splitting
import matplotlib.pyplot as plt
class SoftwareZLE(plugin.TransformPlugin):
"""Emulate the Zero-length encoding of the CAEN 1724 digitizer
Makes no attempt to emulate the 2-sample word logic, so some rare edge cases will be different
Uses a separate debug setting, as need to show plots
"""
debug = False
zle_intervals_buffer = -1 * np.ones((50000, 2), dtype=np.int64)
def transform_event(self, event):
new_pulses = []
zle_intervals_buffer = self.zle_intervals_buffer
for pulse_i, pulse in enumerate(event.pulses):
if self.debug:
print("Starting ZLE in pulse %d, channel %d" % (pulse_i, pulse.channel))
if pulse.left % 2 != 0:
raise ValueError("Cannot ZLE in XED-compatible way "
"if pulse starts at odd sample index (%d)" % pulse.left)
w = pulse.raw_data.copy()
samples_for_baseline = self.config.get('initial_baseline_samples', None)
if samples_for_baseline is not None:
# This tries to do better than the digitizer: compute a baseline for the pulse
bs = w[:min(len(w) | , samples_for_baseline)]
w = np.mean(bs) - w
else:
# This is how the digitizer does it (I think?? | ?)
# Subtract the reference baseline, invert
w = self.config['digitizer_reference_baseline'] - w
if self.debug:
plt.plot(w)
# Get the ZLE threshold
# Note a threshold of X digitizer bins actually means that the data acquisition
# triggers when the waveform becomes greater than X, i.e. X+1 or more (see #273)
# hence the + 1
if str(pulse.channel) in self.config.get('special_thresholds', {}):
threshold = self.config['special_thresholds'][str(pulse.channel)] + 1
else:
threshold = self.config['zle_threshold'] + 1
# Find intervals above ZLE threshold
# We need to call the version with numba boost
n_itvs_found = find_intervals_above_threshold_no_splitting(w.astype(np.float64),
threshold=threshold,
result_buffer=zle_intervals_buffer,
)
if n_itvs_found == self.config['max_intervals']:
# more than 5000 intervals - insane!!!
# Ignore intervals beyond this -- probably will go beyond 32 intervals to encode anyway
zle_intervals_buffer[-1, 1] = pulse.length - 1
if n_itvs_found > 0:
itvs_to_encode = zle_intervals_buffer[:n_itvs_found]
if self.debug:
for l, r in itvs_to_encode:
plt.axvspan(l, r, alpha=0.5, color='red')
# Find boundaries of regions to encode by subtracting before and after window
# This will introduce overlaps and out-of-pulse indices
itvs_to_encode[:, 0] -= self.config['samples_to_store_before']
itvs_to_encode[:, 1] += self.config['samples_to_store_after']
# Clip out-of-pulse indices
itvs_to_encode = np.clip(itvs_to_encode, 0, pulse.length - 1)
# Decide which intervals to encode: deal with overlaps here
itvs_encoded = 0
itv_i = 0
while itv_i <= len(itvs_to_encode) - 1:
start = itvs_to_encode[itv_i, 0]
if itvs_encoded >= self.config['max_intervals']:
self.log.debug("ZLE breakdown in channel %d: all samples from %d onwards are stored" % (
pulse.channel, zle_intervals_buffer[-1, 0]))
stop = pulse.length - 1
itv_i = len(itvs_to_encode) - 1 # Loop will end after this last pulse is appended
else:
stop = itvs_to_encode[itv_i, 1]
# If next interval starts before this one ends, update stop and keep searching
# If last interval reached, there is no itv_i + 1, thats why the condition has <, not <=
while itv_i < len(itvs_to_encode) - 1:
if itvs_to_encode[itv_i + 1, 0] <= stop:
stop = itvs_to_encode[itv_i + 1, 1]
itv_i += 1
else:
break
# Truncate the interval to the nearest even start and odd stop index
# We use truncation rather than extension to ensure data always exists
# pulse.left is guaranteed to be even
if start % 2 != 0:
start += 1
if stop % 2 != 1:
stop -= 1
assert (stop - start + 1) % 2 == 0
if self.debug:
plt.axvspan(start, stop, alpha=0.3, color='green')
# Explicit casts necessary since we've disabled type checking for pulse class
# for speed in event builder.
# and otherwise numpy ints would get in and break e.g. BSON output.
new_pulses.append(datastructure.Pulse(
channel=int(pulse.channel),
left=int(pulse.left+start),
right=int(pulse.left+stop),
raw_data=pulse.raw_data[start:stop + 1]
))
itvs_encoded += 1
itv_i += 1
if self.debug:
plt.show()
event.pulses = new_pulses
return event
|
markokr/cc | cc/daemon/__init__.py | Python | bsd-2-clause | 1,635 | 0.016514 | import sys
import types
import skytools
from cc.job import CCJob
from cc.daemon.plugins import CCDaemonPlugin
#
# Base class for daemons
#
class CCDaemon (CCJob):
log = skytools.getLogger ('d:CCDaemon')
def find_plugins (self, mod_name, probe_func = None):
""" plugin lookup helper """
p = []
__import__ (mod_name)
m = sys.modules [mod_name]
for an in dir (m):
av = getattr (m, an)
if (isinstance (av, types.TypeType) and
issubclass (av, CCDa | emonPlugin) and
av.__module__ == m.__name__):
if not probe_func or probe_func (av):
p += [av]
else:
self.log.debug ("plugin %s probing negative", an) |
if not p:
self.log.info ("no suitable plugins found in %s", mod_name)
return p
def load_plugins (self, *args, **kwargs):
""" Look for suitable plugins, probe them, load them.
"""
self.plugins = []
for palias in self.cf.getlist ('plugins'):
pcf = self.cf.clone (palias)
mod = pcf.get ('module')
for cls in self.find_plugins (mod):
pin = cls (palias, pcf, self)
if pin.probe (*args, **kwargs):
self.plugins += [pin]
else:
self.log.debug ("plugin %s probing negative", pin.__class__.__name__)
if self.plugins:
self.log.info ("Loaded plugins: %s", [p.__class__.__name__ for p in self.plugins])
else:
self.log.warn ("No plugins loaded!")
|
glenngillen/dotfiles | .vscode/extensions/ms-python.python-2021.5.842923320/pythonFiles/lib/python/debugpy/_vendored/pydevd/_pydev_runfiles/pydev_runfiles_pytest2.py | Python | mit | 10,660 | 0.002251 | from _pydev_runfiles import pydev_runfiles_xml_rpc
import pickle
import zlib
import base64
import os
from pydevd_file_utils import canonical_normalized_path
import pytest
import sys
import time
try:
from pathlib import Path
except:
Path = None
#=========================================================================
# Load filters with tests we should skip
#=========================================================================
py_test_accept_filter = None
def _load_filters():
global py_test_accept_filter
if py_test_accept_filter is None:
| py_test_accept_filter = os.environ.get('PYDEV_PYTEST_SKIP')
if py_test_accept_filter:
py_test_accept_filter = pickle. | loads(
zlib.decompress(base64.b64decode(py_test_accept_filter)))
if Path is not None:
# Newer versions of pytest resolve symlinks, so, we
# may need to filter with a resolved path too.
new_dct = {}
for filename, value in py_test_accept_filter.items():
new_dct[canonical_normalized_path(str(Path(filename).resolve()))] = value
py_test_accept_filter.update(new_dct)
else:
py_test_accept_filter = {}
def is_in_xdist_node():
main_pid = os.environ.get('PYDEV_MAIN_PID')
if main_pid and main_pid != str(os.getpid()):
return True
return False
connected = False
def connect_to_server_for_communication_to_xml_rpc_on_xdist():
global connected
if connected:
return
connected = True
if is_in_xdist_node():
port = os.environ.get('PYDEV_PYTEST_SERVER')
if not port:
sys.stderr.write(
'Error: no PYDEV_PYTEST_SERVER environment variable defined.\n')
else:
pydev_runfiles_xml_rpc.initialize_server(int(port), daemon=True)
PY2 = sys.version_info[0] <= 2
PY3 = not PY2
class State:
start_time = time.time()
buf_err = None
buf_out = None
def start_redirect():
if State.buf_out is not None:
return
from _pydevd_bundle import pydevd_io
State.buf_err = pydevd_io.start_redirect(keep_original_redirection=True, std='stderr')
State.buf_out = pydevd_io.start_redirect(keep_original_redirection=True, std='stdout')
def get_curr_output():
buf_out = State.buf_out
buf_err = State.buf_err
return buf_out.getvalue() if buf_out is not None else '', buf_err.getvalue() if buf_err is not None else ''
def pytest_unconfigure():
if is_in_xdist_node():
return
# Only report that it finished when on the main node (we don't want to report
# the finish on each separate node).
pydev_runfiles_xml_rpc.notifyTestRunFinished(
'Finished in: %.2f secs.' % (time.time() - State.start_time,))
def pytest_collection_modifyitems(session, config, items):
# A note: in xdist, this is not called on the main process, only in the
# secondary nodes, so, we'll actually make the filter and report it multiple
# times.
connect_to_server_for_communication_to_xml_rpc_on_xdist()
_load_filters()
if not py_test_accept_filter:
pydev_runfiles_xml_rpc.notifyTestsCollected(len(items))
return # Keep on going (nothing to filter)
new_items = []
for item in items:
f = canonical_normalized_path(str(item.parent.fspath))
name = item.name
if f not in py_test_accept_filter:
# print('Skip file: %s' % (f,))
continue # Skip the file
i = name.find('[')
name_without_parametrize = None
if i > 0:
name_without_parametrize = name[:i]
accept_tests = py_test_accept_filter[f]
if item.cls is not None:
class_name = item.cls.__name__
else:
class_name = None
for test in accept_tests:
if test == name:
# Direct match of the test (just go on with the default
# loading)
new_items.append(item)
break
if name_without_parametrize is not None and test == name_without_parametrize:
# This happens when parameterizing pytest tests on older versions
# of pytest where the test name doesn't include the fixture name
# in it.
new_items.append(item)
break
if class_name is not None:
if test == class_name + '.' + name:
new_items.append(item)
break
if name_without_parametrize is not None and test == class_name + '.' + name_without_parametrize:
new_items.append(item)
break
if class_name == test:
new_items.append(item)
break
else:
pass
# print('Skip test: %s.%s. Accept: %s' % (class_name, name, accept_tests))
# Modify the original list
items[:] = new_items
pydev_runfiles_xml_rpc.notifyTestsCollected(len(items))
try:
"""
pytest > 5.4 uses own version of TerminalWriter based on py.io.TerminalWriter
and assumes there is a specific method TerminalWriter._write_source
so try load pytest version first or fallback to default one
"""
from _pytest._io import TerminalWriter
except ImportError:
from py.io import TerminalWriter
def _get_error_contents_from_report(report):
if report.longrepr is not None:
try:
tw = TerminalWriter(stringio=True)
stringio = tw.stringio
except TypeError:
import io
stringio = io.StringIO()
tw = TerminalWriter(file=stringio)
tw.hasmarkup = False
report.toterminal(tw)
exc = stringio.getvalue()
s = exc.strip()
if s:
return s
return ''
def pytest_collectreport(report):
error_contents = _get_error_contents_from_report(report)
if error_contents:
report_test('fail', '<collect errors>', '<collect errors>', '', error_contents, 0.0)
def append_strings(s1, s2):
if s1.__class__ == s2.__class__:
return s1 + s2
if sys.version_info[0] == 2:
if not isinstance(s1, basestring):
s1 = str(s1)
if not isinstance(s2, basestring):
s2 = str(s2)
# Prefer bytes
if isinstance(s1, unicode):
s1 = s1.encode('utf-8')
if isinstance(s2, unicode):
s2 = s2.encode('utf-8')
return s1 + s2
else:
# Prefer str
if isinstance(s1, bytes):
s1 = s1.decode('utf-8', 'replace')
if isinstance(s2, bytes):
s2 = s2.decode('utf-8', 'replace')
return s1 + s2
def pytest_runtest_logreport(report):
if is_in_xdist_node():
# When running with xdist, we don't want the report to be called from the node, only
# from the main process.
return
report_duration = report.duration
report_when = report.when
report_outcome = report.outcome
if hasattr(report, 'wasxfail'):
if report_outcome != 'skipped':
report_outcome = 'passed'
if report_outcome == 'passed':
# passed on setup/teardown: no need to report if in setup or teardown
# (only on the actual test if it passed).
if report_when in ('setup', 'teardown'):
return
status = 'ok'
elif report_outcome == 'skipped':
status = 'skip'
else:
# It has only passed, skipped and failed (no error), so, let's consider
# error if not on call.
if report_when in ('setup', 'teardown'):
status = 'error'
else:
# any error in the call (not in setup or teardown) is considered a
# regular failure.
status = 'fail'
# This will wo |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_05_01/operations/_express_route_ports_locations_operations.py | Python | mit | 7,987 | 0.005008 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRoutePortsLocationsOperations(object):
"""ExpressRoutePortsLocationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRoutePortsLocationListResult"]
"""Retrieves all ExpressRoutePort peering locations. Does not return available bandwidths for each
location. Available bandwidths can only be obtained when retrieving a specific peering
location.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRoutePortsLocationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_05_01.models.ExpressRoutePortsLocationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortsLocationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRoutePortsLocationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorForm | at)
return pipeline_response
return ItemPaged(
get_next, extract_data
| )
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePortsLocations'} # type: ignore
def get(
self,
location_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRoutePortsLocation"
"""Retrieves a single ExpressRoutePort peering location, including the list of available
bandwidths available at said peering location.
:param location_name: Name of the requested ExpressRoutePort peering location.
:type location_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRoutePortsLocation, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_05_01.models.ExpressRoutePortsLocation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortsLocation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'locationName': self._serialize.url("location_name", location_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRoutePortsLocation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePortsLocations/{locationName}'} # type: ignore
|
tensorflow/addons | tensorflow_addons/seq2seq/tests/beam_search_decoder_test.py | Python | apache-2.0 | 22,072 | 0.000951 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tfa.seq2seq.seq2seq.beam_search_decoder."""
import numpy as np
import pytest
import tensorflow as tf
from tensorflow_addons.seq2seq import attention_wrapper
from tensorflow_addons.seq2seq import beam_search_decoder, gather_tree
@pytest.mark.usefixtures("run_custom_and_py_ops")
def test_gather_tree():
# (max_time = 3, batch_size = 2, beam_width = 3)
# create (batch_size, max_time, beam_width) matrix and transpose it
predicted_ids = np.array(
[[[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[2, 3, 4], [5, 6, 7], [8, 9, 10]]],
dtype=np.int32,
).transpose([1, 0, 2])
parent_ids = np.array(
[[[0, 0, 0], [0, 1, 1], [2, 1, 2]], [[0, 0, 0], [1, 2, 0], [2, 1, 1]]],
dtype=np.int32,
).transpose([1, 0, 2])
# sequence_lengths is shaped (batch_size = 3)
max_sequence_lengths = [3, 3]
expected_result = np.array(
[[[2, 2, 2], [6, 5, 6], [7, 8, 9]], [[2, 4, 4], [7, 6, 6], [8, 9, 10]]]
).transpose([1 | , 0, 2])
res = gather_tree(
predicted_ids,
parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=11,
)
np.testing.assert_equal(expected_result, res)
def _test_gather_tree_from_array(depth_ndims=0, merged_batch_beam=False):
array = np.array(
[
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [0, 0, 0]],
[[2, 3, 4], [ | 5, 6, 7], [8, 9, 10], [11, 12, 0]],
]
).transpose([1, 0, 2])
parent_ids = np.array(
[
[[0, 0, 0], [0, 1, 1], [2, 1, 2], [-1, -1, -1]],
[[0, 0, 0], [1, 1, 0], [2, 0, 1], [0, 1, 0]],
]
).transpose([1, 0, 2])
expected_array = np.array(
[
[[2, 2, 2], [6, 5, 6], [7, 8, 9], [0, 0, 0]],
[[2, 3, 2], [7, 5, 7], [8, 9, 8], [11, 12, 0]],
]
).transpose([1, 0, 2])
sequence_length = [[3, 3, 3], [4, 4, 3]]
array = tf.convert_to_tensor(array, dtype=tf.float32)
parent_ids = tf.convert_to_tensor(parent_ids, dtype=tf.int32)
expected_array = tf.convert_to_tensor(expected_array, dtype=tf.float32)
max_time = tf.shape(array)[0]
batch_size = tf.shape(array)[1]
beam_width = tf.shape(array)[2]
def _tile_in_depth(tensor):
# Generate higher rank tensors by concatenating tensor and
# tensor + 1.
for _ in range(depth_ndims):
tensor = tf.stack([tensor, tensor + 1], -1)
return tensor
if merged_batch_beam:
array = tf.reshape(array, [max_time, batch_size * beam_width])
expected_array = tf.reshape(expected_array, [max_time, batch_size * beam_width])
if depth_ndims > 0:
array = _tile_in_depth(array)
expected_array = _tile_in_depth(expected_array)
sorted_array = beam_search_decoder.gather_tree_from_array(
array, parent_ids, sequence_length
)
np.testing.assert_equal(expected_array.numpy(), sorted_array.numpy())
@pytest.mark.usefixtures("run_custom_and_py_ops")
def test_gather_tree_from_array_scalar():
_test_gather_tree_from_array()
@pytest.mark.usefixtures("run_custom_and_py_ops")
def test_gather_tree_from_array_1d():
_test_gather_tree_from_array(depth_ndims=1)
@pytest.mark.usefixtures("run_custom_and_py_ops")
def test_gather_tree_from_array_1d_with_merged_batch_beam():
_test_gather_tree_from_array(depth_ndims=1, merged_batch_beam=True)
@pytest.mark.usefixtures("run_custom_and_py_ops")
def test_gather_tree_from_array_2d():
_test_gather_tree_from_array(depth_ndims=2)
@pytest.mark.usefixtures("run_custom_and_py_ops")
def test_gather_tree_from_array_complex_trajectory():
# Max. time = 7, batch = 1, beam = 5.
array = np.expand_dims(
np.array(
[
[[25, 12, 114, 89, 97]],
[[9, 91, 64, 11, 162]],
[[34, 34, 34, 34, 34]],
[[2, 4, 2, 2, 4]],
[[2, 3, 6, 2, 2]],
[[2, 2, 2, 3, 2]],
[[2, 2, 2, 2, 2]],
]
),
-1,
)
parent_ids = np.array(
[
[[0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0]],
[[0, 1, 2, 3, 4]],
[[0, 0, 1, 2, 1]],
[[0, 1, 1, 2, 3]],
[[0, 1, 3, 1, 2]],
[[0, 1, 2, 3, 4]],
]
)
expected_array = np.expand_dims(
np.array(
[
[[25, 25, 25, 25, 25]],
[[9, 9, 91, 9, 9]],
[[34, 34, 34, 34, 34]],
[[2, 4, 2, 4, 4]],
[[2, 3, 6, 3, 6]],
[[2, 2, 2, 3, 2]],
[[2, 2, 2, 2, 2]],
]
),
-1,
)
sequence_length = [[4, 6, 4, 7, 6]]
array = tf.convert_to_tensor(array, dtype=tf.float32)
parent_ids = tf.convert_to_tensor(parent_ids, dtype=tf.int32)
expected_array = tf.convert_to_tensor(expected_array, dtype=tf.float32)
sorted_array = beam_search_decoder.gather_tree_from_array(
array, parent_ids, sequence_length
)
np.testing.assert_equal(expected_array.numpy(), sorted_array.numpy())
def basic_test_array_shape_dynamic_checks(
static_shape, dynamic_shape, batch_size, beam_width, is_valid=True
):
@tf.function(input_signature=(tf.TensorSpec(dynamic_shape, dtype=tf.float32),))
def _test_body(t):
beam_search_decoder._check_batch_beam(t, batch_size, beam_width)
t = tf.random.uniform(static_shape, dtype=tf.float32)
if is_valid:
_test_body(t)
else:
with pytest.raises(tf.errors.InvalidArgumentError):
_test_body(t)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_array_shape_dynamic_checks():
basic_test_array_shape_dynamic_checks(
(8, 4, 5, 10), (None, None, 5, 10), 4, 5, is_valid=True
)
basic_test_array_shape_dynamic_checks(
(8, 20, 10), (None, None, 10), 4, 5, is_valid=True
)
basic_test_array_shape_dynamic_checks(
(8, 21, 10), (None, None, 10), 4, 5, is_valid=False
)
basic_test_array_shape_dynamic_checks(
(8, 4, 6, 10), (None, None, None, 10), 4, 5, is_valid=False
)
basic_test_array_shape_dynamic_checks((8, 4), (None, None), 4, 5, is_valid=False)
def test_array_shape_static_checks():
assert (
beam_search_decoder._check_static_batch_beam_maybe(
tf.TensorShape([None, None, None]), 3, 5
)
is True
)
assert (
beam_search_decoder._check_static_batch_beam_maybe(
tf.TensorShape([15, None, None]), 3, 5
)
is True
)
assert (
beam_search_decoder._check_static_batch_beam_maybe(
tf.TensorShape([16, None, None]), 3, 5
)
is False
)
assert (
beam_search_decoder._check_static_batch_beam_maybe(
tf.TensorShape([3, 5, None]), 3, 5
)
is True
)
assert (
beam_search_decoder._check_static_batch_beam_maybe(
tf.TensorShape([3, 6, None]), 3, 5
)
is False
)
assert (
beam_search_decoder._check_static_batch_beam_maybe(
tf.TensorShape([5, 3, None]), 3, 5
)
is False
)
def test_eos_masking():
probs = tf.constant(
[
[
[-0.2, -0.2, -0.2, -0.2, -0.2],
[-0.3, -0.3, -0.3, 3, 0],
[5, 6, 0, 0, 0],
],
[[-0.2, -0.2, -0.2, -0.2, 0], [-0.3, -0.3, -0.1, 3, 0], [5, 6, 3, 0, 0]],
|
Juniper/nova | nova/tests/unit/virt/libvirt/volume/test_iscsi.py | Python | apache-2.0 | 3,046 | 0.000328 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from os_brick import exception as os_brick_exception
from os_brick.initiator import connector
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova.virt.libvirt.volume import iscsi
class LibvirtISCSIVolumeDriverTestCase(
test_volume.LibvirtISCSIVolumeBaseTestCase):
def test_libvirt_iscsi_driver(self, transport=None):
libvirt_driver = iscsi.LibvirtISCSIVolumeDriver(self.fake_host)
self.assertIsInstance(libvirt_driver.connector,
connector.ISCSIConnector)
def test_libvirt_iscsi_driver_get_config(self):
libvirt_driver = iscsi.LibvirtISCSIVolumeDriver(self.fake_host)
device_path = '/dev/fake- | dev'
connection_info = {'data': {'device_path': device_path}}
conf = libvirt_driver.get_config(connection_ | info, self.disk_info)
tree = conf.format_dom()
self.assertEqual('block', tree.get('type'))
self.assertEqual(device_path, tree.find('./source').get('dev'))
self.assertEqual('raw', tree.find('./driver').get('type'))
self.assertEqual('native', tree.find('./driver').get('io'))
@mock.patch.object(iscsi.LOG, 'warning')
def test_libvirt_iscsi_driver_disconnect_volume_with_devicenotfound(self,
mock_LOG_warning):
device_path = '/dev/fake-dev'
connection_info = {'data': {'device_path': device_path}}
libvirt_driver = iscsi.LibvirtISCSIVolumeDriver(self.fake_host)
libvirt_driver.connector.disconnect_volume = mock.MagicMock(
side_effect=os_brick_exception.VolumeDeviceNotFound(
device=device_path))
libvirt_driver.disconnect_volume(connection_info, device_path,
mock.sentinel.instance)
msg = mock_LOG_warning.call_args_list[0]
self.assertIn('Ignoring VolumeDeviceNotFound', msg[0][0])
def test_extend_volume(self):
device_path = '/dev/fake-dev'
connection_info = {'data': {'device_path': device_path}}
libvirt_driver = iscsi.LibvirtISCSIVolumeDriver(self.fake_host)
libvirt_driver.connector.extend_volume = mock.MagicMock(return_value=1)
new_size = libvirt_driver.extend_volume(connection_info,
mock.sentinel.instance)
self.assertEqual(1, new_size)
libvirt_driver.connector.extend_volume.assert_called_once_with(
connection_info['data'])
|
awsdocs/aws-doc-sdk-examples | python/example_code/dynamodb/GettingStarted/MoviesDeleteTable.py | Python | apache-2.0 | 626 | 0.003195 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Purpose
Shows how to delete an Amazon DynamoDB ta | ble.
"""
# snippet-start:[dynamodb.python.codeexample.MoviesDeleteTable]
import boto3
def delete_movie_table(dynamodb=None):
if not dynamodb:
dynamodb = boto3.resource('dynamodb', endpoint_url="http://localhost:8000")
table = dynamodb.Table('Movies')
table.delete()
if __name__ == '__main__':
delete_movie_table()
print("Movies table deleted.")
# snippet-end:[dynamodb.python.codeexampl | e.MoviesDeleteTable]
|
GoogleCloudPlatform/cloud-ops-sandbox | tests/provisioning/test_runner.py | Python | apache-2.0 | 830 | 0 | #!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this fil | e except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under | the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
test_suite = unittest.TestLoader().discover(pattern='*_test.py', start_dir='.')
result = unittest.TextTestRunner(verbosity=2, failfast=True).run(test_suite)
if result.wasSuccessful():
exit(0)
else:
exit(1)
|
cmoutard/mne-python | mne/viz/epochs.py | Python | bsd-3-clause | 62,694 | 0.000175 | """Functions to plot epochs data
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Jaakko Leppakangas <jaeilepp@student.jyu.fi>
#
# License: Simplified BSD
from functools import partial
import copy
import warnings
import numpy as np
from ..utils import verbose, get_config, set_config, logger
from ..io.pick import pick_types, channel_type
from ..io.proj import setup_proj
from ..fixes import Counter, _in1d
from ..time_frequency import psd_multitaper
from .utils import (tight_layout, figure_nobar, _toggle_proj, _toggle_options,
_layout_figure, _setup_vmin_vmax, _channels_changed,
_plot_raw_onscroll, _onclick_help, plt_show)
from ..defaults import _handle_default
def plot_epochs_image(epochs, picks=None, sigma=0., vmin=None,
vmax=None, colorbar=True, order=None, show=True,
units=None, scalings=None, cmap='RdBu_r',
fig=None, overlay_times=None):
"""Plot Event Related Potential / Fields image
Parameters
----------
epochs : instance of Epochs
The epochs
picks : int | array-like of int | None
The indices of the channels to consider. If None, the first
five good channels are plotted.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image. If 0., no smoothing is applied.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers
colorbar : bool
Display or not a colorbar
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times)
show : bool
Show figure if True.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `units=dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If | None, defaults to `scalings=dict(eeg=1e6, grad=1e13, mag=1e15,
eog=1e6)`
cmap : matplotlib colormap
Colormap.
fig : matplotlib figure | None
Figure instance to draw the image to. Figure must contain two axes for
drawing the single trials and evoked responses. If None a new figure is
cre | ated. Defaults to None.
overlay_times : array-like, shape (n_epochs,) | None
If not None the parameter is interpreted as time instants in seconds
and is added to the image. It is typically useful to display reaction
times. Note that it is defined with respect to the order
of epochs such that overlay_times[0] corresponds to epochs[0].
Returns
-------
figs : the list of matplotlib figures
One figure per channel displayed
"""
from scipy import ndimage
units = _handle_default('units', units)
scalings = _handle_default('scalings', scalings)
import matplotlib.pyplot as plt
if picks is None:
picks = pick_types(epochs.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')[:5]
if set(units.keys()) != set(scalings.keys()):
raise ValueError('Scalings and units must have the same keys.')
picks = np.atleast_1d(picks)
if fig is not None and len(picks) > 1:
raise ValueError('Only single pick can be drawn to a figure.')
evoked = epochs.average(picks)
data = epochs.get_data()[:, picks, :]
scale_vmin = True if vmin is None else False
scale_vmax = True if vmax is None else False
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
if overlay_times is not None and len(overlay_times) != len(data):
raise ValueError('size of overlay_times parameter (%s) do not '
'match the number of epochs (%s).'
% (len(overlay_times), len(data)))
if overlay_times is not None:
overlay_times = np.array(overlay_times)
times_min = np.min(overlay_times)
times_max = np.max(overlay_times)
if ((times_min < epochs.tmin) or (times_max > epochs.tmax)):
warnings.warn('Some values in overlay_times fall outside of '
'the epochs time interval (between %s s and %s s)' %
(epochs.tmin, epochs.tmax))
figs = list()
for i, (this_data, idx) in enumerate(zip(np.swapaxes(data, 0, 1), picks)):
if fig is None:
this_fig = plt.figure()
else:
this_fig = fig
figs.append(this_fig)
ch_type = channel_type(epochs.info, idx)
if ch_type not in scalings:
# We know it's not in either scalings or units since keys match
raise KeyError('%s type not in scalings and units' % ch_type)
this_data *= scalings[ch_type]
this_order = order
if callable(order):
this_order = order(epochs.times, this_data)
if this_order is not None and (len(this_order) != len(this_data)):
raise ValueError('size of order parameter (%s) does not '
'match the number of epochs (%s).'
% (len(this_order), len(this_data)))
this_overlay_times = None
if overlay_times is not None:
this_overlay_times = overlay_times
if this_order is not None:
this_order = np.asarray(this_order)
this_data = this_data[this_order]
if this_overlay_times is not None:
this_overlay_times = this_overlay_times[this_order]
if sigma > 0.:
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma,
axis=0)
plt.figure(this_fig.number)
ax1 = plt.subplot2grid((3, 10), (0, 0), colspan=9, rowspan=2)
if scale_vmin:
vmin *= scalings[ch_type]
if scale_vmax:
vmax *= scalings[ch_type]
im = ax1.imshow(this_data,
extent=[1e3 * epochs.times[0], 1e3 * epochs.times[-1],
0, len(data)],
aspect='auto', origin='lower', interpolation='nearest',
vmin=vmin, vmax=vmax, cmap=cmap)
if this_overlay_times is not None:
plt.plot(1e3 * this_overlay_times, 0.5 + np.arange(len(this_data)),
'k', linewidth=2)
ax2 = plt.subplot2grid((3, 10), (2, 0), colspan=9, rowspan=1)
if colorbar:
ax3 = plt.subplot2grid((3, 10), (0, 9), colspan=1, rowspan=3)
ax1.set_title(epochs.ch_names[idx])
ax1.set_ylabel('Epochs')
ax1.axis('auto')
ax1.axis('tight')
ax1.axvline(0, color='m', linewidth=3, linestyle='--')
evoked_data = scalings[ch_type] * evoked.data[i]
ax2.plot(1e3 * evoked.times, evoked_data)
ax2.set_xlabel('Time (ms)')
ax2.set_xlim([1e3 * evoked.times[0], 1e3 * evoked.times[-1]])
ax2.set_ylabel(units[ch_type])
evoked_vmin = min(evoked_data) * 1.1 if scale_vmin else vmin
evoked_vmax = max(evoked_data) * 1.1 if scale_vmax else vmax
if scale_vmin or scale_vmax:
evoked_vmax = max(np.abs([evoked_vmax, evoked_vmin]))
evoked_vmin = -evoked_vmax
ax2.set_ylim([evoked_vmin, evoked_vmax])
ax2.axvline(0, color='m', linewidth=3, linestyle='--')
if colorbar:
plt.colorbar(im, cax=ax3)
tight_layout(fig=this_fig)
plt_show(show)
return fi |
111pontes/ydk-py | cisco-ios-xe/ydk/models/cisco_ios_xe/_meta/_CISCO_SUBSCRIBER_SESSION_TC_MIB.py | Python | apache-2.0 | 2,087 | 0.01677 |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REF | ERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'SubsessiontypeEnum' : _MetaInfoEnum('SubsessiontypeEnum', 'ydk.models.cisco_ios_xe.CISCO_SUBSCRIBER_SESSION_TC_MIB',
{
' | all':'all',
'other':'other',
'pppSubscriber':'pppSubscriber',
'pppoeSubscriber':'pppoeSubscriber',
'l2tpSubscriber':'l2tpSubscriber',
'l2fSubscriber':'l2fSubscriber',
'ipInterfaceSubscriber':'ipInterfaceSubscriber',
'ipPktSubscriber':'ipPktSubscriber',
'ipDhcpv4Subscriber':'ipDhcpv4Subscriber',
'ipRadiusSubscriber':'ipRadiusSubscriber',
'l2MacSubscriber':'l2MacSubscriber',
'l2Dhcpv4Subscriber':'l2Dhcpv4Subscriber',
'l2RadiusSubscriber':'l2RadiusSubscriber',
}, 'CISCO-SUBSCRIBER-SESSION-TC-MIB', _yang_ns._namespaces['CISCO-SUBSCRIBER-SESSION-TC-MIB']),
'SubsessionstateEnum' : _MetaInfoEnum('SubsessionstateEnum', 'ydk.models.cisco_ios_xe.CISCO_SUBSCRIBER_SESSION_TC_MIB',
{
'other':'other',
'pending':'pending',
'up':'up',
}, 'CISCO-SUBSCRIBER-SESSION-TC-MIB', _yang_ns._namespaces['CISCO-SUBSCRIBER-SESSION-TC-MIB']),
'SubsessionredundancymodeEnum' : _MetaInfoEnum('SubsessionredundancymodeEnum', 'ydk.models.cisco_ios_xe.CISCO_SUBSCRIBER_SESSION_TC_MIB',
{
'none':'none',
'other':'other',
'active':'active',
'standby':'standby',
}, 'CISCO-SUBSCRIBER-SESSION-TC-MIB', _yang_ns._namespaces['CISCO-SUBSCRIBER-SESSION-TC-MIB']),
}
|
petrjasek/superdesk-core | superdesk/publish/formatters/__init__.py | Python | agpl-3.0 | 5,788 | 0.001209 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
from lxml import etree
from superdesk.metadata.item import ITEM_TYPE, CONTENT_TYPE, FORMATS, FORMAT
from superdesk.etree import parse_html
from superdesk.text_utils import get_text
from superdesk.publish import registered_transmitters
formatters = []
logger = logging.getLogger(__name__)
class FormatterRegistry(type):
"""Registry metaclass for formatters."""
def __init__(cls, name, bases, attrs):
"""Register sub-classes of Formatter class when defined."""
super(FormatterRegistry, cls).__init__(name, bases, attrs)
if name != "Formatter":
formatters.append(cls)
class Formatter(metaclass=FormatterRegistry):
"""Base Formatter class for all types of Formatters like News ML 1.2, News ML G2, NITF, etc."""
def __init__(self):
self.can_preview = False
self.can_export = False
self.destination = None
self.subscriber = None
def format(self, article, subscriber, codes=None):
"""Formats the article and returns the transformed string"""
raise NotImplementedError()
def export(self, article, subscriber, codes=None):
"""Formats the article and returns the output string for export"""
raise NotImplementedError()
def can_format(self, format_type, article):
"""Test if formatter can format for given article."""
raise NotImplementedError()
def append_body_footer(self, article):
"""
Checks if the article has any Public Service Announcements and if available appends each of them to the body.
:return: body with public service announcements.
"""
try:
article["body_html"] = article["body_html"].replace("<br>", "<br/>")
except KeyError:
pass
body = ""
if article[ITEM_TYPE] in [CONTENT_TYPE.TEXT, CONTENT_TYPE.PREFORMATTED]:
body = article.get("body_html", "")
elif article[ITEM_TYPE] in [CONTENT_TYPE.AUDIO, CONTENT_TYPE.PICTURE, CONTENT_TYPE.VIDEO]:
body = article.get("description", "")
if body and article.get(FORMAT, "") == FORMATS.PRESERVED:
body = body.replace("\n", "\r\n").replace("\r\r", "\r")
parsed = parse_html(body, content="html")
for br in parsed.xpath("//br"):
br.tail = "\r\n" + br.tail if br.tail else "\r\n"
etree.strip_elements(parsed, "br", with_tail=False)
body = etree.tostring(parsed, encoding="unicode")
if body and article.get("body_footer"):
footer = article.get("body_footer")
if article.get(FORMAT, "") == FORMATS.PRESERVED:
body = "{}\r\n{}".format(body, get_text(footer))
else:
body = "{}{}".format(body, footer)
return body
def append_legal(self, article, truncate=False):
"""
Checks if the article has the legal flag on and adds 'Legal:' to the slugline
:param article: article having the slugline
:param truncate: truncates the slugline to 24 characters
:return: updated slugline
"""
slugline = article.get("slugline", "") or ""
if article.get("flags", {}).get("marked_for_legal", False):
slugline = "{}: {}".format("Legal", slugline)
if truncate:
slugline = slugline[:24]
return slugline
def map_html_to_xml(self, element, html):
"""
Map the html text tags to xml
:param etree.Element element: The xml element to populate
:param str html: the html to parse the text from
:return:
"""
root = parse_html(html, content="html")
# if there are no ptags just br
if not len(root.xpath("//p")) and len(root.xpath("//br")):
para = etree.SubElement(element, "p")
for br in root.xpath("//br"):
etree.SubElement(para, "br").text = br.text
for p in root.xpath("//p"):
para = etree.SubElement(element, "p")
if len(p.xpath(".//br")) > 0:
for br in p.xpath(".//br"):
etree.SubElement(para, "br").text = br.text
para.text = etree.tostring(p, encoding="unicode", method="text")
# there neither ptags pr br's
if len(list(element)) == 0:
etree.SubElement(element, "p").text = etree.tostring(root, encoding="unicode", method="text")
def set_destination(self, destination=None, subscriber=None):
self.destination = destination
self.subscriber = subscriber
def get_formatter(format_type, article):
for formatter_cls in formatters:
formatter_instance = formatter_cls()
if formatter_instance.can_format(format_type, article):
return formatter_instance
def get_all_formatters():
"""Return all formatters registered."""
return [formatter_cls() for formatter_cls in formatters]
from .nitf_formatter import NITFF | ormatter # NOQA
from .ninjs_formatter import NINJSFormatter, NINJS2Formatter # NOQA
from .newsml_1_2_formatter import NewsML12Formatter # NOQA
from .newsml_g2_formatter import NewsMLG2Formatter # NOQA
from .email_formatter import EmailFormatter # NOQA
from .ninjs_newsroom_formatter import NewsroomNinjsFormatter # NOQA
from .idml_formatter import IDMLFormatter # NOQA
from .ninjs_ftp_formatter import FTPNinjsFormatt | er # NOQA
from .imatrics import IMatricsFormatter # NOQA
|
cfriedt/gnuradio | grc/core/Block.py | Python | gpl-3.0 | 30,996 | 0.001226 | """
Copyright 2008-2015 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import collections
import itertools
from Cheetah.Template import Template
from .utils import epy_block_io, odict
from . Constants import (
BLOCK_FLAG_NEED_QT_GUI, BLOCK_FLAG_NEED_WX_GUI,
ADVANCED_PARAM_TAB, DEFAULT_PARAM_TAB,
BLOCK_FLAG_THROTTLE, BLOCK_FLAG_DISABLE_BYPASS,
BLOCK_FLAG_DEPRECATED,
BLOCK_ENABLED, BLOCK_BYPASSED, BLOCK_DISABLED
)
from . Element import Element
def _get_keys(lst):
return [elem.get_key() for elem in lst]
def _get_elem(lst, key):
try:
return lst[_get_keys(lst).index(key)]
except ValueError:
raise ValueError('Key "{0}" not found in {1}.'.format(key, _get_keys(lst)))
class Block(Element):
is_block = True
def __init__(self, flow_graph, n):
"""
Make a new block from nested data.
Args:
flow: graph the parent element
n: the nested odict
Returns:
block a new block
"""
# Grab the data
self._doc = (n.find('doc') or '').strip('\n').replace('\\\n', '')
self._imports = map(lambda i: i.strip(), n.findall('import'))
self._make = n.find('make')
self._var_make = n.find('var_make')
self._checks = n.findall('check')
self._callbacks = n.findall('callback')
self._bus_structure_source = n.find('bus_structure_source') or ''
self._bus_structure_sink = n.find('bus_structure_sink') or ''
self.port_counters = [itertools.count(), itertools.count()]
# Build the block
Element.__init__(self, flow_graph)
# Grab the data
params = n.findall('param')
sources = n.findall('source')
sinks = n.findall('sink')
self._name = n.find('name')
self._key = n.find('key')
category = (n.find('category') or '').split('/')
self.category = [cat.strip() for cat in category if cat.strip()]
self._flags = n.find('flags') or ''
# Backwards compatibility
if n.find('throttle') and BLOCK_FLAG_THROTTLE not in self._flags:
self._flags += BLOCK_FLAG_THROTTLE
self._grc_source = n.find('grc_source') or ''
self._block_wrapper_path = n.find('block_wrapper_path')
self._bussify_sink = n.find('bus_sink')
self._bussify_source = n.find('bus_source')
self._var_value = n.find('var_value') or '$value'
# Get list of param tabs
n_tabs = n.find('param_tab_order') or None
self._param_tab_labels = n_tabs.findall('tab') if n_tabs is not None else [DEFAULT_PARAM_TAB]
# Create the param objects
self._params = list()
# Add the id param
self.get_params().append(self.get_parent().get_parent().Param(
block=self,
n=odict({
'name': 'ID',
'key': 'id',
'type': 'id',
})
))
self.get_params().append(self.get_parent().get_parent().Param(
block=self,
n=odict({
'name': 'Enabled',
'key': '_enabled',
'type': 'raw',
'value': 'True',
'hide': 'all',
})
))
for param in itertools.imap(lambda n: self.get_parent().get_parent().Param(block=self, n=n), params):
key = param.get_key()
# Test against repeated keys
if key in self.get_param_keys():
raise Exception('Key "{0}" already exists in params'.format(key))
# Store the param
self.get_params().append(param)
# Create the source objects
self._sources = list()
for source in map(lambda n: self.get_parent().get_parent().Port(block=self, n=n, dir='source'), sources):
key = source.get_key()
# Test against repeated keys
if key in self.get_source_keys():
raise Exception('Key "{0}" already exists in sources'.format(key))
# Store the port
self.get_sources().append(source)
self.back_ofthe_bus(self.get_sources())
# Create the sink objects
self._sinks = list()
for sink in map(lambda n: self.get_parent().get_parent().Port(block=self, n=n, dir='sink'), sinks):
| key = sink.ge | t_key()
# Test against repeated keys
if key in self.get_sink_keys():
raise Exception('Key "{0}" already exists in sinks'.format(key))
# Store the port
self.get_sinks().append(sink)
self.back_ofthe_bus(self.get_sinks())
self.current_bus_structure = {'source': '', 'sink': ''}
# Virtual source/sink and pad source/sink blocks are
# indistinguishable from normal GR blocks. Make explicit
# checks for them here since they have no work function or
# buffers to manage.
self.is_virtual_or_pad = self._key in (
"virtual_source", "virtual_sink", "pad_source", "pad_sink")
self.is_variable = self._key.startswith('variable')
self.is_import = (self._key == 'import')
# Disable blocks that are virtual/pads or variables
if self.is_virtual_or_pad or self.is_variable:
self._flags += BLOCK_FLAG_DISABLE_BYPASS
if not (self.is_virtual_or_pad or self.is_variable or self._key == 'options'):
self.get_params().append(self.get_parent().get_parent().Param(
block=self,
n=odict({'name': 'Block Alias',
'key': 'alias',
'type': 'string',
'hide': 'part',
'tab': ADVANCED_PARAM_TAB
})
))
if (len(sources) or len(sinks)) and not self.is_virtual_or_pad:
self.get_params().append(self.get_parent().get_parent().Param(
block=self,
n=odict({'name': 'Core Affinity',
'key': 'affinity',
'type': 'int_vector',
'hide': 'part',
'tab': ADVANCED_PARAM_TAB
})
))
if len(sources) and not self.is_virtual_or_pad:
self.get_params().append(self.get_parent().get_parent().Param(
block=self,
n=odict({'name': 'Min Output Buffer',
'key': 'minoutbuf',
'type': 'int',
'hide': 'part',
'value': '0',
'tab': ADVANCED_PARAM_TAB
})
))
self.get_params().append(self.get_parent().get_parent().Param(
block=self,
n=odict({'name': 'Max Output Buffer',
'key': 'maxoutbuf',
'type': 'int',
'hide': 'part',
'value': '0',
'tab': ADVANCED_PARAM_TAB
})
))
self.get_params().append(self.get_parent().get_parent().Param(
block=self,
n=odict({'name': 'Comment',
'key': 'comment',
'type': ' |
opendatateam/udata | udata/sitemap.py | Python | agpl-3.0 | 977 | 0 | from functools import wraps
from flask import current_app, request
from flask_sitemap import Sitemap, sitemap_page_needed
from udata.app import cache
sitemap = Sitemap()
CACHE_KEY = 'sitemap-page-{0}'
@sitemap_page_needed.connect
def create_page(app, page, urlset):
key = CACHE_KEY.format(page)
cache.set(key, sitemap.render_page(urlset=urlset))
def load_page(fn):
@wraps(fn)
def loader(*args, **kwargs):
page = kwargs.get('page')
| key = CACHE_KE | Y.format(page)
return cache.get(key) or fn(*args, **kwargs)
return loader
def set_scheme(fn):
@wraps(fn)
def set_scheme_on_call(*args, **kwargs):
scheme = 'https' if request.is_secure else 'http'
current_app.config['SITEMAP_URL_SCHEME'] = scheme
return fn(*args, **kwargs)
return set_scheme_on_call
def init_app(app):
sitemap.decorators = []
app.config['SITEMAP_VIEW_DECORATORS'] = [load_page, set_scheme]
sitemap.init_app(app)
|
mwcraig/conda-build | conda_build/metadata.py | Python | bsd-3-clause | 20,286 | 0.004091 | from __future__ import absolute_import, division, print_function
import os
import re
import sys
from os.path import isdir, isfile, join
from conda.compat import iteritems, PY3, text_type
from conda.utils import memoized, md5_file
import conda.config as cc
from conda.resolve import MatchSpec
from conda.cli.common import specs_from_url
from . import exceptions
try:
import yaml
# try to import C loader
try:
from yaml import CBaseLoader as BaseLoader
except ImportError:
from yaml import BaseLoader
except ImportError:
sys.exit('Error: could not import yaml (required to read meta.yaml '
'files of conda recipes)')
from conda_build.config import config
from conda_build.utils import comma_join
def ns_cfg():
# Remember to update the docs of any of this changes
plat = cc.subdir
py = config.CONDA_PY
np = config.CONDA_NPY
pl = config.CONDA_PERL
assert isinstance(py, int), py
d = dict(
linux = plat. | startswith('linux-'),
linux32 = bool(plat == 'linux-32'),
linux64 = bool(plat == 'linux-64'),
arm = plat.startswith('linux-arm'),
osx = plat.startswith('osx-'),
unix = plat.startswith(('linux-', 'osx-')),
win = plat.startswith('win-'),
win32 = bool(plat == 'win-32'),
win64 = bool(plat == 'win-64'),
pl = pl,
py = py,
py3k = bool(30 <= py < 40),
| py2k = bool(20 <= py < 30),
py26 = bool(py == 26),
py27 = bool(py == 27),
py33 = bool(py == 33),
py34 = bool(py == 34),
py35 = bool(py == 35),
np = np,
os = os,
environ = os.environ,
)
for machine in cc.non_x86_linux_machines:
d[machine] = bool(plat == 'linux-%s' % machine)
d.update(os.environ)
return d
sel_pat = re.compile(r'(.+?)\s*(#.*)?\[(.+)\](?(2).*)$')
def select_lines(data, namespace):
lines = []
for i, line in enumerate(data.splitlines()):
line = line.rstrip()
if line.lstrip().startswith('#'):
# Don't bother with comment only lines
continue
m = sel_pat.match(line)
if m:
cond = m.group(3)
try:
if eval(cond, namespace, {}):
lines.append(m.group(1))
except:
sys.exit('''\
Error: Invalid selector in meta.yaml line %d:
%s
''' % (i + 1, line))
sys.exit(1)
continue
lines.append(line)
return '\n'.join(lines) + '\n'
@memoized
def yamlize(data):
try:
return yaml.load(data, Loader=BaseLoader)
except yaml.parser.ParserError as e:
if '{{' in data:
try:
import jinja2
jinja2 # Avoid pyflakes failure: 'jinja2' imported but unused
except ImportError:
raise exceptions.UnableToParseMissingJinja2(original=e)
raise exceptions.UnableToParse(original=e)
allowed_license_families = set("""
AGPL
Apache
BSD
GPL2
GPL3
LGPL
MIT
Other
PSF
Proprietary
Public-Domain
""".split())
def ensure_valid_license_family(meta):
try:
license_family = meta['about']['license_family']
except KeyError:
return
if license_family not in allowed_license_families:
raise RuntimeError(exceptions.indent(
"about/license_family '%s' not allowed. Allowed families are %s." %
(license_family, comma_join(sorted(allowed_license_families)))))
def parse(data):
data = select_lines(data, ns_cfg())
res = yamlize(data)
# ensure the result is a dict
if res is None:
res = {}
for field in FIELDS:
if field not in res:
continue
if not res[field]:
res[field] = {}
if not isinstance(res[field], dict):
raise RuntimeError("The %s field should be a dict, not %s" %
(field, res[field].__class__.__name__))
# ensure those are lists
for field in ('source/patches',
'build/entry_points', 'build/script_env',
'build/features', 'build/track_features',
'requirements/build', 'requirements/run',
'requirements/conflicts', 'test/requires',
'test/files', 'test/commands', 'test/imports'):
section, key = field.split('/')
if res.get(section) is None:
res[section] = {}
if res[section].get(key, None) is None:
res[section][key] = []
# ensure those are strings
for field in ('package/version', 'build/string', 'source/svn_rev',
'source/git_tag', 'source/git_branch', 'source/md5',
'source/git_rev', 'source/path'):
section, key = field.split('/')
if res.get(section) is None:
res[section] = {}
val = res[section].get(key, '')
if val is None:
val = ''
res[section][key] = text_type(val)
# ensure these fields are booleans
trues = {'y', 'on', 'true', 'yes'}
falses = {'n', 'no', 'false', 'off'}
for field in ('build/osx_is_app', 'build/preserve_egg_dir',
'build/binary_relocation',
'build/detect_binary_files_with_prefix',
'build/skip', 'app/own_environment'):
section, key = field.split('/')
if res.get(section) is None:
res[section] = {}
try:
val = res[section].get(key, '').lower()
except AttributeError:
# val wasn't a string
continue
if val in trues:
res[section][key] = True
elif val in falses:
res[section][key] = False
ensure_valid_license_family(res)
return sanitize(res)
def sanitize(meta):
"""
Sanitize the meta-data to remove aliases/handle deprecation
"""
# make a copy to avoid side-effects
meta = dict(meta)
sanitize_funs = [('source', _git_clean), ]
for section, func in sanitize_funs:
if section in meta:
meta[section] = func(meta[section])
return meta
def _git_clean(source_meta):
"""
Reduce the redundancy in git specification by removing git_tag and
git_branch.
If one is specified, copy to git_rev.
If more than one field is used to specified, exit
and complain.
"""
git_rev_tags_old = ('git_branch', 'git_tag')
git_rev = 'git_rev'
git_rev_tags = (git_rev,) + git_rev_tags_old
has_rev_tags = tuple(bool(source_meta[tag]) for
tag in git_rev_tags)
if sum(has_rev_tags) > 1:
msg = "Error: mulitple git_revs:"
msg += ', '.join("{}".format(key) for key, has in
zip(git_rev_tags, has_rev_tags) if has)
sys.exit(msg)
# make a copy of the input so we have no side-effects
ret_meta = dict(source_meta)
# loop over the old versions
for key, has in zip(git_rev_tags[1:], has_rev_tags[1:]):
# update if needed
if has:
ret_meta[git_rev_tags[0]] = ret_meta[key]
# and remove
del ret_meta[key]
return ret_meta
# If you update this please update the example in
# conda-docs/docs/source/build.rst
FIELDS = {
'package': ['name', 'version'],
'source': ['fn', 'url', 'md5', 'sha1', 'sha256', 'path',
'git_url', 'git_tag', 'git_branch', 'git_rev',
'hg_url', 'hg_tag',
'svn_url', 'svn_rev', 'svn_ignore_externals',
'patches'],
'build': ['number', 'string', 'entry_points', 'osx_is_app',
'features', 'track_features', 'preserve_egg_dir',
'no_link', 'binary_relocation', 'script', 'noarch_python',
'has_prefix_files', 'binary_has_prefix_files', 'script_env',
'detect_binary_files_with_prefix', 'rpaths',
'always_include_files', 'skip', 'msvc_compiler'],
'requirements': ['build', 'run', 'conflicts'],
'app': ['entry', 'icon', 'summary', 'type', 'cli_opts',
'own_environment'],
'test': ['requires', 'commands', 'files', 'imports'],
'about': ['home', 'lice |
allevin/PyGithub | scripts/add_attribute.py | Python | lgpl-3.0 | 6,009 | 0.00649 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Thialfihar <thi@thialfihar.org> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2018 bbi-yggy <yossarian@blackbirdinteractive.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of | the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. | #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import os.path
import sys
className, attributeName, attributeType = sys.argv[1:4]
if len(sys.argv) > 4:
attributeClassType = sys.argv[4]
else:
attributeClassType = ""
types = {
"string": ("string", None, "self._makeStringAttribute(attributes[\"" + attributeName + "\"])"),
"int": ("integer", None, "self._makeIntAttribute(attributes[\"" + attributeName + "\"])"),
"bool": ("bool", None, "self._makeBoolAttribute(attributes[\"" + attributeName + "\"])"),
"datetime": ("datetime.datetime", "(str, unicode)", "self._makeDatetimeAttribute(attributes[\"" + attributeName + "\"])"),
"class": (":class:`" + attributeClassType + "`", None, "self._makeClassAttribute(" + attributeClassType + ", attributes[\"" + attributeName + "\"])"),
}
attributeDocType, attributeAssertType, attributeValue = types[attributeType]
fileName = os.path.join("github", className + ".py")
with open(fileName) as f:
lines = list(f)
newLines = []
i = 0
added = False
isCompletable = True
isProperty = False
while not added:
line = lines[i].rstrip()
i += 1
if line.startswith("class "):
if "NonCompletableGithubObject" in line:
isCompletable = False
elif line == " @property":
isProperty = True
elif line.startswith(" def "):
attrName = line[8:-7]
# Properties will be inserted after __repr__, but before any other function.
if attrName != "__repr__" and (attrName == "_identity" or attrName > attributeName or not isProperty):
if not isProperty:
newLines.append(" @property")
newLines.append(" def " + attributeName + "(self):")
newLines.append(" \"\"\"")
newLines.append(" :type: " + attributeDocType)
newLines.append(" \"\"\"")
if isCompletable:
newLines.append(" self._completeIfNotSet(self._" + attributeName + ")")
newLines.append(" return self._" + attributeName + ".value")
newLines.append("")
if isProperty:
newLines.append(" @property")
added = True
isProperty = False
newLines.append(line)
added = False
inInit = False
while not added:
line = lines[i].rstrip()
i += 1
if line == " def _initAttributes(self):":
inInit = True
if inInit:
if not line or line.endswith(" = github.GithubObject.NotSet"):
if line:
attrName = line[14:-29]
if not line or attrName > attributeName:
newLines.append(" self._" + attributeName + " = github.GithubObject.NotSet")
added = True
newLines.append(line)
added = False
inUse = False
while not added:
try:
line = lines[i].rstrip()
except IndexError:
line = ""
i += 1
if line == " def _useAttributes(self, attributes):":
inUse = True
if inUse:
if not line or line.endswith(" in attributes: # pragma no branch"):
if line:
attrName = line[12:-36]
if not line or attrName > attributeName:
newLines.append(" if \"" + attributeName + "\" in attributes: # pragma no branch")
if attributeAssertType:
newLines.append(" assert attributes[\"" + attributeName + "\"] is None or isinstance(attributes[\"" + attributeName + "\"], " + attributeAssertType + "), attributes[\"" + attributeName + "\"]")
newLines.append(" self._" + attributeName + " = " + attributeValue)
added = True
newLines.append(line)
while i < len(lines):
line = lines[i].rstrip()
i += 1
newLines.append(line)
with open(fileName, "wb") as f:
for line in newLines:
f.write(line + "\n")
|
arendst/Sonoff-Tasmota | pio/http-uploader.py | Python | gpl-3.0 | 412 | 0.004854 | Import("env")
import os
# pio < 4.0.0
# from base64 import b64decode
# env.Replace(UPLOADER="pio\espupload.py")
# env.Replace(U | PLOADERFLAGS="")
# env.Replace(UPLOADCMD="$UPLOADER -u " + b64decode(ARGUMENTS.get("UPLOAD_PORT")) + " -f $SOURCES")
# pio >= 4.0.0
env.Replace(UPLOADER=os.path.join("pio", "espupload.py"))
env.Replace(UPLOADERFLAGS="")
env.Replace(UPLOADCMD="$UPLOADER -u $UPLOAD_PORT -f $S | OURCES")
|
owaiskhan/Retransmission-Combining | gnuradio-examples/python/network/vector_source.py | Python | gpl-3.0 | 2,271 | 0.004844 | #!/usr/bin/env python
#
# Copyright 2006,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope th | at it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see | the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio.eng_option import eng_option
from optparse import OptionParser
class vector_source(gr.top_block):
def __init__(self, host, port, pkt_size, eof):
gr.top_block.__init__(self, "vector_source")
data = [i*0.01 for i in range(1000)]
vec = gr.vector_source_f(data, True)
udp = gr.udp_sink(gr.sizeof_float, host, port, pkt_size, eof=eof)
self.connect(vec, udp)
if __name__ == '__main__':
parser = OptionParser(option_class=eng_option)
parser.add_option("", "--host", type="string", default="localhost",
help="Remote host name (domain name or IP address")
parser.add_option("", "--port", type="int", default=65500,
help="port number to connect to")
parser.add_option("", "--packet-size", type="int", default=1471,
help="packet size.")
parser.add_option("", "--no-eof", action="store_true", default=False,
help="don't send EOF on disconnect")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
raise SystemExit, 1
# Create an instance of a hierarchical block
top_block = vector_source(options.host, options.port, options.packet_size,
not options.no_eof)
try:
# Run forever
top_block.run()
except KeyboardInterrupt:
# Ctrl-C exits
pass
|
ashwinreddy/rlg | rlg/tasks/snake.py | Python | mit | 106 | 0.009434 | fr | om task import Task
class SnakeTask(Task):
def __init__(s | elf):
Task.__init__(self, 'snake')
|
liyueshining/moon | mocorderconverter/MocOrder.py | Python | mit | 4,028 | 0.000993 | from xml.dom.minidom import parseString
from xml.dom import minidom
from MocNode import MocNode
import os
import sys
radioTypes = ['GSM', 'UMTS', 'TD', 'AG', 'MCE']
def getMocOrder(path):
files = os.listdir(path)
for afile in files:
if afile.lower().endswith("-mocinfo-sdrm.xml"):
mocinfoXML = open(afile, "r").read()
mocinfoXML = mocinfoXML.replace('encoding="GBK"', 'encoding="utf-8"')
mocOrder = MocOrder(mocinfoXML, path)
mocOrder.generateMocOrderXML()
def getType(version):
if version in radioTypes:
return "radio"
return "ground"
class MocOrder:
def __init__(self, mocinfoxml, path):
self.mocinfoxml = mocinfoxml
self.filePath = path
self.radioType = ''
self.mocs = []
self.mocNodes = {}
def parsexml(self):
domtree = parseString(self.mocinfoxml)
domcument = domtree.documentElement
if domcument.hasAttribute("version"):
self.radioType = domcument.getAttribute("version")
print "radioType is %s" % self.radioType
mocinfos = domcument.getElementsByTagName("mocinfo")
for mocinfo in mocinfos:
mocNode = MocNode()
name = mocinfo.getAttribute('name')
parent = mocinfo.getAttribute('parent')
self.mocs.append(name)
mocNode.setName(name)
mocNode.setParent(parent)
print "mocInfo name is %s parent is %s " % (name, parent)
fields = mocinfo.getElementsByTagName("field")
refmocs = []
for field in fields:
fieldName = field.getAttribute('name')
if fieldName[0 : 3] == "ref":
if fieldName[3].isnumeric():
refmoc = fieldName[4:]
else:
refmoc = fieldName[3:]
if refmoc not in refmocs:
refmocs.append(refmoc)
print " ref field is %s" % refmocs
mocNode.setRefmoc(refmocs)
self.mocNodes[name] = mocNode
de | f dealWithMocs(self):
for mocNode in self.mocNodes:
name = self.mocNodes[mocNode].name
refmocs = self.mocNodes[mocNode].refmoc
if len(refmocs) | == 0:
continue
for refmoc in refmocs:
if refmoc not in self.mocs:
continue
if self.mocs.index(refmoc) < self.mocs.index(name):
continue
if self.mocNodes[refmoc].parent == name:
continue
selfIndex = self.mocs.index(name)
refIndex = self.mocs.index(refmoc)
self.mocs.remove(name)
self.mocs.insert(selfIndex, refmoc)
self.mocs.remove(refmoc)
self.mocs.insert(refIndex, name)
self.mocs.reverse()
def generateMocOrderXML(self):
self.parsexml()
self.dealWithMocs()
doc = minidom.Document()
root = doc.createElement("root")
doc.appendChild(root)
orderedMocList = doc.createElement("orderedMocList")
orderedMocList.setAttribute("mocType", getType(self.radioType))
orderedMocList.setAttribute("version", self.radioType)
for mo in self.mocs:
moc = doc.createElement("moc")
moc.setAttribute("name", mo)
orderedMocList.appendChild(moc)
root.appendChild(orderedMocList)
if self.radioType in radioTypes:
xmlName = self.radioType + "-" + getType(self.radioType) + "-cm-mocorder.xml"
else:
xmlName = self.radioType + "-cm-mocorder.xml"
absoluteFilePath = self.filePath + "/" + xmlName
xmlFile = file(absoluteFilePath, "w")
doc.writexml(xmlFile, "\t", " ", "\n", "UTF-8")
xmlFile.close()
if __name__ == "__main__":
print 'arguments passed is: ' + sys.argv[1]
getMocOrder(sys.argv[1])
|
foobarbazblarg/stayclean | stayclean-2019-october/update-google-chart.py | Python | mit | 8,223 | 0.002311 | #!/usr/bin/python
# TODO: issues with new oauth2 stuff. Keep using older version of Python for now.
# #!/usr/bin/env python
import json
import gspread
from oauth2client.client import SignedJwtAssertionCredentials
import datetime
from participantCollection import ParticipantCollection
# Edit Me!
participantFileNames = ['../stayclean-2014-november/participants.txt',
'../stayclean-2014-december/participants.txt',
'../stayclean-2015-january/participants.txt',
'../stayclean-2015-february/participants.txt',
'../stayclean-2015-march/participants.txt',
'../stayclean-2015-april/participants.txt',
'../stayclean-2015-may/participants.txt',
'../stayclean-2015-june/participants.txt',
'../stayclean-2015-july/participants.txt',
'../stayclean-2015-august/participants.txt',
'../stayclean-2015-september/participants.txt',
'../stayclean-2015-october/participants.txt',
'../stayclean-2015-november/participants.txt',
'../stayclean-2015-december/participants.txt',
'../stayclean-2016-january/participants.txt',
'../stayclean-2016-february/participants.txt',
'../stayclean-2016-march/participants.txt',
'../stayclean-2016-april/participants.txt',
'../stayclean-2016-may/participants.txt',
'../stayclean-2016-june/participants.txt',
| '../stayclean-2016-july/participants.txt',
'../stayclean-2016-august/participants.txt',
'../stayclean-2016-september/particip | ants.txt',
'../stayclean-2016-october/participants.txt',
'../stayclean-2016-november/participants.txt',
'../stayclean-2016-december/participants.txt',
'../stayclean-2017-january/participants.txt',
'../stayclean-2017-february/participants.txt',
'../stayclean-2017-march/participants.txt',
'../stayclean-2017-april/participants.txt',
'../stayclean-2017-may/participants.txt',
'../stayclean-2017-june/participants.txt',
'../stayclean-2017-july/participants.txt',
'../stayclean-2017-august/participants.txt',
'../stayclean-2017-september/participants.txt',
'../stayclean-2017-october/participants.txt',
'../stayclean-2017-november/participants.txt',
'../stayclean-2017-december/participants.txt',
'../stayclean-2018-january/participants.txt',
'../stayclean-2018-february/participants.txt',
'../stayclean-2018-march/participants.txt',
'../stayclean-2018-april/participants.txt',
'../stayclean-2018-may/participants.txt',
'../stayclean-2018-june/participants.txt',
'../stayclean-2018-july/participants.txt',
'../stayclean-2018-august/participants.txt',
'../stayclean-2018-september/participants.txt',
'../stayclean-2018-october/participants.txt',
'../stayclean-2018-november/participants.txt',
'../stayclean-2018-december/participants.txt',
'../stayclean-2019-january/participants.txt',
'../stayclean-2019-february/participants.txt',
'../stayclean-2019-march/participants.txt',
'../stayclean-2019-april/participants.txt',
'../stayclean-2019-may/participants.txt',
'../stayclean-2019-june/participants.txt',
'../stayclean-2019-july/participants.txt',
'../stayclean-2019-august/participants.txt',
'../stayclean-2019-september/participants.txt',
'./participants.txt']
sortedRelapseDates = []
for participantFileName in participantFileNames:
participants = ParticipantCollection(fileNameString=participantFileName)
sortedRelapseDates = sortedRelapseDates + participants.allRelapseDates()
sortedRelapseDates.sort()
earliestReportDate = sortedRelapseDates[0]
latestReportDate = sortedRelapseDates[-1]
reportDates = []
numberOfRelapsesPerDate = []
reportDatesAndNumberOfRelapses = {}
dayOfWeekIndexesAndNumberOfInstances = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
reportDate = earliestReportDate
while reportDate <= latestReportDate:
reportDatesAndNumberOfRelapses[reportDate] = 0
# dayOfWeekIndexesAndNumberOfInstances[reportDate.weekday()] = dayOfWeekIndexesAndNumberOfInstances[reportDate.weekday()] + 1
dayOfWeekIndexesAndNumberOfInstances[reportDate.weekday()] += 1
reportDate += datetime.timedelta(days=1)
for relapseDate in sortedRelapseDates:
# reportDatesAndNumberOfRelapses[relapseDate] = reportDatesAndNumberOfRelapses[relapseDate] + 1
reportDatesAndNumberOfRelapses[relapseDate] += 1
dayOfWeekIndexesAndTotalNumberOfRelapses = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
for participantFileName in participantFileNames:
participants = ParticipantCollection(fileNameString=participantFileName)
# print participants.relapseDayOfWeekIndexesAndParticipants()
for index, parts in participants.relapseDayOfWeekIndexesAndParticipants().iteritems():
# dayOfWeekIndexesAndTotalNumberOfRelapses[index] = dayOfWeekIndexesAndTotalNumberOfRelapses[index] + len(parts)
dayOfWeekIndexesAndTotalNumberOfRelapses[index] += len(parts)
dayOfWeekIndexesAndAverageNumberOfRelapses = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
for index, instances in dayOfWeekIndexesAndNumberOfInstances.iteritems():
# dayOfWeekIndexesAndAverageNumberOfRelapses[index] = int(round(float(dayOfWeekIndexesAndTotalNumberOfRelapses[index]) / float(instances)))
dayOfWeekIndexesAndAverageNumberOfRelapses[index] = float(dayOfWeekIndexesAndTotalNumberOfRelapses[index]) / float(instances)
spreadsheetTitle = "StayClean monthly challenge relapse data"
# spreadsheetTitle = "Test spreadsheet"
json_key = json.load(open('../google-oauth-credentials.json'))
scope = ['https://spreadsheets.google.com/feeds']
credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'].encode(), scope)
gc = gspread.authorize(credentials)
spreadSheet = None
try:
spreadSheet = gc.open(spreadsheetTitle)
except gspread.exceptions.SpreadsheetNotFound:
print "No spreadsheet with title " + spreadsheetTitle
exit(1)
workSheet = spreadSheet.get_worksheet(0)
columnACells = workSheet.range("A2:A" + str(len(reportDatesAndNumberOfRelapses) + 1))
columnBCells = workSheet.range("B2:B" + str(len(reportDatesAndNumberOfRelapses) + 1))
columnCCells = workSheet.range("C2:C8")
columnDCells = workSheet.range("D2:D8")
reportDate = earliestReportDate
rowIndex = 0
while reportDate <= latestReportDate:
columnACells[rowIndex].value = str(reportDate)
columnBCells[rowIndex].value = str(reportDatesAndNumberOfRelapses[reportDate])
rowIndex += 1
reportDate += datetime.timedelta(days=1)
for weekdayIndex in range(0, 7):
weekdayName = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'][weekdayIndex]
# spreadsheetClient.UpdateCell(weekdayIndex + 2,3,weekdayName,spreadsheetId)
# spreadsheetClient.UpdateCell(weekdayIndex + 2,4,str(dayOfWeekIndexesAndAverageNumberOfRelapses[weekdayIndex]),spreadsheetId)
columnCCells[weekdayIndex].value = weekdayName
columnDCells[weekdayIndex].value = str(dayOfWeekIndexesAndAverageNumberOfRelapses[weekdayIndex])
allCells = columnACells + columnBCells + columnCCells + columnDCells
|
xieyaxiongfly/Atheros_CSI_tool_OpenWRT_src | scripts/dl_github_archive.py | Python | gpl-2.0 | 14,504 | 0.001241 | #!/usr/bin/env python
#
# Copyright (c) 2018 Yousong Zhou <yszhou4tech@gmail.com>
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
import argparse
import calendar
import datetime
import errno
import fcntl
import hashlib
import json
import os
import os.path
import re
import shutil
import ssl
import subprocess
import sys
import time
import urllib2
TMPDIR = os.environ.get('TMP_DIR') or '/tmp'
TMPDIR_DL = os.path.join(TMPDIR, 'dl')
class PathException(Exception): pass
class DownloadGitHubError(Exception): pass
class Path(object):
"""Context class for preparing and cleaning up directories.
If ```preclean` is ``False``, ``path`` will NOT be removed on context enter
If ``path`` ``isdir``, then it will be created on context enter.
If ``keep`` is True, then ``path`` will NOT be removed on context exit
"""
def __init__(self, path, isdir=True, preclean=False, keep=False):
self.path = path
self.isdir = isdir
self.preclean = preclean
self.keep = keep
def __enter__(self):
if self.preclean:
self.rm_all(self.path)
if self.isdir:
self.mkdir_all(self.path)
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self.keep:
self.rm_all(self.path)
@staticmethod
def mkdir_all(path):
"""Same as mkdir -p."""
names = os.path.split(path)
p = ''
for name in names:
p = os.path.join(p, name)
Path._mkdir(p)
@staticmethod
def _rmdir_dir(dir_):
names = Path._listdir(dir_)
for name in names:
p = os.path.join(dir_, name)
Path.rm_all(p)
Path._rmdir(dir_)
@staticmethod
def _mkdir(path):
Path._os_func(os.mkdir, path, errno.EEXIST)
@staticmethod
def _rmdir(path):
Path._os_func(os.rmdir, path, errno.ENOENT)
@staticmethod
def _remove(path):
Path._os_func(os.remove, path, errno.ENOENT)
@staticmethod
def _listdir(path):
return Path._os_func(os.listdir, path, errno.ENOENT, default=[])
@staticmethod
def _os_func(func, path, errno, default=None):
"""Call func(path) in an idempotent way.
On exception ``ex``, if the type is OSError and ``ex.errno == errno``,
return ``default``, otherwise, re-raise
"""
try:
return func(path)
except OSError as e:
if e.errno == errno:
return default
else:
raise
@staticmethod
def rm_all(path):
"""Same as rm -r."""
if os.path.islink(path):
Path._remove(path)
elif os.path.isdir(path):
Path._rmdir_dir(path)
else:
Path._remove(path)
@staticmethod
def untar(path, into=None):
"""Extract tarball at ``path`` into subdir ``into``.
return subdir name if and only if there exists one, otherwise raise PathException
"""
| args = ('tar', '-C' | , into, '-xzf', path, '--no-same-permissions')
subprocess.check_call(args, preexec_fn=lambda: os.umask(0o22))
dirs = os.listdir(into)
if len(dirs) == 1:
return dirs[0]
else:
raise PathException('untar %s: expecting a single subdir, got %s' % (path, dirs))
@staticmethod
def tar(path, subdir, into=None, ts=None):
"""Pack ``path`` into tarball ``into``."""
# --sort=name requires a recent build of GNU tar
args = ['tar', '--numeric-owner', '--owner=0', '--group=0', '--sort=name']
args += ['-C', path, '-cf', into, subdir]
envs = os.environ.copy()
if ts is not None:
args.append('--mtime=@%d' % ts)
if into.endswith('.xz'):
envs['XZ_OPT'] = '-7e'
args.append('-J')
elif into.endswith('.bz2'):
args.append('-j')
elif into.endswith('.gz'):
args.append('-z')
envs['GZIP'] = '-n'
else:
raise PathException('unknown compression type %s' % into)
subprocess.check_call(args, env=envs)
class GitHubCommitTsCache(object):
__cachef = 'github.commit.ts.cache'
__cachen = 2048
def __init__(self):
Path.mkdir_all(TMPDIR_DL)
self.cachef = os.path.join(TMPDIR_DL, self.__cachef)
self.cache = {}
def get(self, k):
"""Get timestamp with key ``k``."""
fileno = os.open(self.cachef, os.O_RDONLY | os.O_CREAT)
with os.fdopen(fileno) as fin:
try:
fcntl.lockf(fileno, fcntl.LOCK_SH)
self._cache_init(fin)
if k in self.cache:
ts = self.cache[k][0]
return ts
finally:
fcntl.lockf(fileno, fcntl.LOCK_UN)
return None
def set(self, k, v):
"""Update timestamp with ``k``."""
fileno = os.open(self.cachef, os.O_RDWR | os.O_CREAT)
with os.fdopen(fileno, 'wb+') as f:
try:
fcntl.lockf(fileno, fcntl.LOCK_EX)
self._cache_init(f)
self.cache[k] = (v, int(time.time()))
self._cache_flush(f)
finally:
fcntl.lockf(fileno, fcntl.LOCK_UN)
def _cache_init(self, fin):
for line in fin:
k, ts, updated = line.split()
ts = int(ts)
updated = int(updated)
self.cache[k] = (ts, updated)
def _cache_flush(self, fout):
cache = sorted(self.cache.iteritems(), cmp=lambda a, b: b[1][1] - a[1][1])
cache = cache[:self.__cachen]
self.cache = {}
os.ftruncate(fout.fileno(), 0)
fout.seek(0, os.SEEK_SET)
for k, ent in cache:
ts = ent[0]
updated = ent[1]
line = '{0} {1} {2}\n'.format(k, ts, updated)
fout.write(line)
class DownloadGitHubTarball(object):
"""Download and repack archive tarabll from GitHub.
Compared with the method of packing after cloning the whole repo, this
method is more friendly to users with fragile internet connection.
However, there are limitations with this method
- GitHub imposes a 60 reqs/hour limit for unauthenticated API access.
This affects fetching commit date for reproducible tarballs. Download
through the archive link is not affected.
- GitHub archives do not contain source codes for submodules.
- GitHub archives seem to respect .gitattributes and ignore pathes with
export-ignore attributes.
For the first two issues, the method will fail loudly to allow fallback to
clone-then-pack method.
As for the 3rd issue, to make sure that this method only produces identical
tarballs as the fallback method, we require the expected hash value to be
supplied. That means the first tarball will need to be prepared by the
clone-then-pack method
"""
__repo_url_regex = re.compile(r'^(?:https|git)://github.com/(?P<owner>[^/]+)/(?P<repo>[^/]+)')
def __init__(self, args):
self.dl_dir = args.dl_dir
self.version = args.version
self.subdir = args.subdir
self.source = args.source
self.url = args.url
self._init_owner_repo()
self.xhash = args.hash
self._init_hasher()
self.commit_ts = None # lazy load commit timestamp
self.commit_ts_cache = GitHubCommitTsCache()
self.name = 'github-tarball'
def download(self):
"""Download and repack GitHub archive tarball."""
self._init_commit_ts()
with Path(TMPDIR_DL, keep=True) as dir_dl:
# fetch tarball from GitHub
tarball_path = os.path.join(dir_dl.path, self.subdir + '.tar.gz.dl')
with Path(tarball_path, isdir=False):
self._fetch(tarball_path)
# unpack
d = os.path.join(dir_dl.path, self.subdir + '.untar')
with Path(d, preclean=True) as dir_untar:
tarball_prefix = Pa |
mindbody/API-Examples | SDKs/Python/test/test_add_client_request.py | Python | bsd-2-clause | 956 | 0 | # coding: utf-8
" | ""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.add | _client_request import AddClientRequest # noqa: E501
from swagger_client.rest import ApiException
class TestAddClientRequest(unittest.TestCase):
"""AddClientRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAddClientRequest(self):
"""Test AddClientRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.add_client_request.AddClientRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
P4ELTE/t4p4s | src/compiler.py | Python | apache-2.0 | 22,115 | 0.004251 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
# Copyright 2016-2020 Eotvos Lorand University, Budapest, Hungary
import argparse
from hlir16.hlir import *
from compiler_log_warnings_errors import *
import compiler_log_warnings_errors
from compiler_load_p4 import load_from_p4
from compiler_exception_handling import *
import compiler_common
import re
import os
import sys
import pkgutil
generate_code_files = True
# Inside the compiler, these variables are considered singleton.
args = []
hlir = None
def replace_insert2(insert):
simple = re.split(r'^\$([a-zA-Z_][a-zA-Z_0-9]*)$', insert)
if len(simple) == 3:
return ("{}", simple[1])
# replace $$[light][text1]{expr}{text2} inserts, where all parts except {expr} are optional
m = re.match(r'(?P<type>\$\$?)(\[(?P<light>[^\]]+)\])?(\[(?P<text1>[^\]]+)\])?{\s*(?P<expr>[^}]*)\s*}({(?P<text2>[^}]+)})?', insert)
light = m.group("light")
txt1 = m.group('text1') or ''
expr = m.group('expr')
txt2 = m.group('text2') or ''
# no highlighting
if m.group("type") == '$':
fmt = f'{escape_brace(txt1)}{{}}{escape_brace(txt2)}'
else:
light_param = f",{light}" if light not in (None, "") else ""
fmt = f'" T4LIT({escape_brace(txt1)}{{}}{escape_brace(txt2)}{light_par | am}) "'
return (fmt, expr)
def replace_insert(insert):
simple = re.split(r'^\$([a-zA-Z_][a-zA-Z_0-9]*)$', insert)
if len(simple) == 3:
yield (simple[1],)
return
# replace $$[light][text1]{expr}{text2} inserts, where all parts except {expr} are optional
m = re.match(r'(?P<type>\$\$?)( | \[(?P<light>[^\]]+)\])?(\[(?P<text1>[^\]]+)\])?{\s*(?P<expr>[^}]*)\s*}({(?P<text2>[^}]+)})?', insert)
if not m:
yield insert
return
light = m.group("light")
txt1 = m.group('text1') or ''
expr = m.group('expr')
txt2 = m.group('text2') or ''
# no highlighting
if m.group("type") == '$':
yield escape_brace(txt1)
yield (escape_brace(expr),)
yield escape_brace(txt2)
else:
light_param = f",{light}" if light not in (None, "") else ""
yield '" T4LIT("'
yield escape_brace(txt1)
yield (escape_brace(expr),)
yield escape_brace(txt2)
if light:
yield f",{light}"
yield ') "'
def adjust_indentation(indenter, line_idx, file):
indent_levels = {
"[": ( 0, True),
"{": (+1, True),
"}": (-1, False),
}
old_indent = compiler_common.file_indentation_level
indent_change, return_old_indent = indent_levels[indenter]
compiler_common.file_indentation_level += indent_change
# #{ starts a new indentation level from the next line
# also, #} unindents starting this line
if indenter == '{' and compiler_common.file_indentation_level == 0:
addError("Compiler", f"Too much unindent in {file}:{line_idx}")
return old_indent if return_old_indent else compiler_common.file_indentation_level
def escape_slash(s):
return re.sub(r'(\\|")', r'\\\1', s)
def escape_brace(s):
return re.sub(r'(\{|\})', r'\1\1', s)
def split_and_translate(content, extra_content="", no_quote_allowed=False):
parts = re.split(r'(\$+(?:(?:\[[^\]]*\])*(?:\{[^\}]*\})+|[a-zA-Z_][a-zA-Z_0-9]*))', content)
return translate_line_main_content2(parts, extra_content, no_quote_allowed)
def translate_line_main_content(parts, extra_content, no_quote_allowed):
replaceds = [repl for part in parts for repl in replace_insert(part)]
raws = [part[0] if type(part) is tuple else part for part in replaceds]
no_apostrophes = all("'" not in raw for raw in raws)
no_quotes = all('"' not in raw for raw in raws)
if no_apostrophes or no_quotes:
quote = "'" if no_apostrophes else '"'
has_inserts = any(type(part) is tuple for part in replaceds)
has_bad_inserts = any(type(part) is tuple and any('(' in p for p in part) for part in replaceds)
if has_bad_inserts:
return translate_line_main_content2(parts, extra_content, no_quote_allowed)
esc = escape_brace if has_inserts else (lambda p: p)
content = "".join((f'{{{part[0]}}}' if part[0] != '' else '') + "".join(esc(p) for p in part[1:]) if type(part) is tuple else esc(part) for part in replaceds)
formatter = 'f' if has_inserts else ''
return False, f'{formatter}{quote}{content}{quote}'
return translate_line_main_content2(parts, extra_content, no_quote_allowed)
def translate_line_main_content2(parts, extra_content, no_quote_allowed):
if len(parts) == 1:
if no_quote_allowed and '\\' not in parts[0] and '"' not in parts[0]:
return False, parts[0]
return True, f'"{escape_slash(parts[0])}"'
match_with_rests = [(replace_insert2(parts[1+2*i]), parts[2+2*i]) for i in range((len(parts)-1)//2)]
all_fmt = "".join(((re.sub(r'\{\}', '', fmt) if expr == "" else fmt) + escape_brace(txt) for (fmt, expr), txt in match_with_rests))
all_fmt = escape_slash(f'{escape_brace(parts[0])}{all_fmt}') + extra_content
if "'" not in all_fmt:
quote = "'"
all_fmt = re.sub(r'\\"', '"', all_fmt)
else:
quote = '"'
all_escapes_txt = ", ".join((escape_brace(expr) or '""' for (fmt, expr), txt in match_with_rests if expr != ""))
if all_escapes_txt == "":
if no_quote_allowed:
return False, f'{all_fmt}'.strip()
return True, f'{quote}{all_fmt}{quote}'
else:
return True, f'{quote}{all_fmt}{quote}.format({all_escapes_txt})'
def translate_line_with_insert(file, genfile, line_idx, line):
"""Gets a line that contains an insert
and transforms it to a Python code section."""
_empty, indent, maybe_pre, indenter, content, _empty2 = re.split(r'^([ \t]*)#(pre|aft)?([\[\{\}])(.*)$', line)
line_indent = adjust_indentation(indenter, line_idx, file)
prepend_append_funname = "prepend" if maybe_pre == "pre" else "append" if maybe_pre == "aft" else ""
prepend_append_txt = f"[{maybe_pre}]" if maybe_pre != "" else ""
no_hint = "nohint" in args['hint']
extra_content = f" // {prepend_append_txt} {file_prefix(file, genfile)}{line_idx}" if not no_hint and maybe_pre else ""
_is_escaped, line = split_and_translate(content, extra_content)
if maybe_pre:
return f'{indent}{prepend_append_funname}_statement({line})'
par_indent = f', indent_level = {line_indent}' if line_indent != 0 else ''
par_lineno = f', lineno = {line_idx}' if line_idx is not None else ''
return f'{indent}generated_code += add_code({line}{par_indent}{par_lineno})'
def increase(idx):
if idx is None:
return None
return idx + 1
def add_empty_lines(code_lines):
"""Returns an enumerated list of the lines.
When an empty line separates follows an escaped code part,
an empty line is inserted into the generated list with None as line number."""
new_lines = []
is_block_with_sequence = False
last_indent = 0
already_added = False
for idx, line in code_lines:
if "#[" in line:
is_block_with_sequence = True
if not line.strip() and last_indent == 0 and not already_added:
new_lines.append((idx, line))
new_lines.append((None, "#["))
last_indent = 0
already_added = True
else:
if not line.strip():
continue
new_lines.append((increase(idx), line))
last_indent = len(line) - len(line.lstrip())
already_added = False
return new_lines
def add_gen_in_def(code_lines, orig_file):
"""If a function's name starts with 'gen_' in a generated file,
that function produces code.
This is a helper function that initialises and returns the appropriate variable.
Also, if "return" is encountered on a single line,
the requisite return value is inserted."""
new_lines = []
is_inside_gen = False
for idx, line in code_lines:
if is_inside_gen:
if re.match(r'^[ \t]*return[ \t]*$', line):
line = re.sub(r' |
jesusbriales/rgbd_benchmark_tools | src/rgbd_benchmark_tools/plot_graph.py | Python | bsd-2-clause | 2,247 | 0.010681 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 17 09:02:31 2015
@author: jesus
"""
import argparse
import numpy as np
import h5py
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='''
Plot mean precision (averaged from samples) for different cases wrt sampling ratio.
''')
parser.add_argument('h5file', help='HDF5 file in which the metrics are stored in the group eval for each sample')
parser.add_argument('group', help='H5 path of the main group containing... ratioGroups')
parser.add_argument('delta_unit', help='delta_unit of the metrics to collect')
parser.add_argument('metric', help='metric to plot')
args = parser.parse_args()
h5f = h5py.File(args.h5file,'a')
unit = args.delta_unit
metric = args.metric
# Open the base group for all the cases to show
main_group = h5f[args.group]
# Pre-create/allocate
numOfRatios = len(main_group)
# cases = []
cases = ['Dell_','GMS_','Max','Meil','Ramp_']
lines = {}
for case in cases:
lines[case] = np.empty(numOfRatios)
# Iterate through the different sampling ratios
ratios = main_group.keys()
for x,ratio in enumerate(ratios):
# Iterate, for the current ratio, through all cases
ratioGroup = main_group[ratio]
# Preallocate if not done yet
# if cases == []:
# cases = ratioGroup.keys()
# for case in cases | :
# lines[case] = np.em | pty(numOfRatios)
# Store values in the lines dictionary
for case in cases:
caseGroup = ratioGroup[case]
dset = caseGroup['eval/'+unit+'/'+metric]
lines[case][x] = np.mean( dset[:] )
# Plot the graph
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
fig = plt.figure()
ax = fig.add_subplot(111)
for key in lines:
ax.plot(lines[key],'-',color="blue")
#ax.plot([t for t,e in err_rot],[e for t,e in err_rot],'-',color="red")
ax.set_xlabel('time [s]')
ax.set_ylabel('translational error [m]')
plt.savefig('figure',dpi=300)
|
patrys/opbeat_python | tests/instrumentation/django_tests/template_tests.py | Python | bsd-3-clause | 5,153 | 0.002911 | import pytest # isort:skip
pytest.importorskip("django") # isort:skip
from os.path import join
import django
from django.test import TestCase
import mock
import pytest
from conftest import BASE_TEMPLATE_DIR
from opbeat.contrib.django.models import get_client, opbeat
try:
# Django 1.10+
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
# Testing Django 1.8+ backends
TEMPLATES = (
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
BASE_TEMPLATE_DIR
],
},
{
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'DIRS': [
join(BASE_TEMPLATE_DIR, 'jinja2')
],
},
)
if django.VERSION >= (2, 0):
middleware_settings_name = 'MIDDLEWARE'
else:
middleware_settings_name = 'MIDDLEWARE_CLASSES'
class TracesTest(TestCase):
def setUp(self):
self.opbeat = get_client()
opbeat.instrumentation.control.instrument()
@mock.patch("opbeat.traces.RequestsStore.should_collect")
def test_template_rendering(self, should_collect):
should_collect.return_value = False
with self.settings(**{middleware_settings_name: [
'opbeat.contrib.django.middleware.OpbeatAPMMiddleware']}):
self.client.get(reverse('render-heavy-template'))
self.client.get(reverse('render-heavy-template'))
self.client.get(reverse('render-heavy-template'))
transactions, traces = self.opbeat.instrumentation_store.get_all()
self.assertEqual(len(transactions), 1)
self.assertEqual(len(traces), 3, [t['signature'] for t in traces])
kinds = ['transaction', 'code', 'template.django']
self.assertEqual(set([t['kind'] for t in traces]),
set(kinds))
# Reorder according to the kinds list so we can just test them
kinds_dict = dict([(t['kind'], t) for t in traces])
traces = [kinds_dict[k] for k in kinds]
self.assertEqual(traces[0]['kind'], 'transaction')
self.assertEqual(traces[0]['signature'], 'transaction')
self.assertEqual(traces[0]['transaction'], 'GET tests.contrib.django.testapp.views.render_template_view')
self.assertEqual(len(traces[0]['durations']), 3)
self.assertEqual(len(traces[0]['parents']), 0)
self.assertEqual(traces[1]['kind'], 'code')
self.assertEqual(traces[1]['signature'], 'something_expensive')
self.assertEqual(traces[1]['transaction'],
'GET tests.contrib.django.testapp.views.render_template_view')
self.assertEqual(len(traces[1]['durations']), 3)
self.assertEqual(traces[1]['parents'], ('transaction', 'list_users.html'))
self.assertEqual(traces[2]['kind'], 'template.django')
self.assertEqual(traces[2]['signature'], 'list_users.html')
self.assertEqual(traces[2]['transaction'],
'GET tests.contrib.django.testapp.views.render_template_view')
self.assertEqual(len(traces[2]['durations']), 3)
self.assertEqual(traces[2]['parents'], ('transaction',))
@pytest.mark.skipif(django.VERSION < (1, 8),
reason='Jinja2 support introduced with Django 1.8')
@mock.patch("opbeat.traces.RequestsStore.should_collect")
def test_template_rendering_django18_jinja2(self, should_collect):
should_collect.return_value = False
with self.settings(**{middleware_settings_name: [
'opbeat.contrib.django.middleware.OpbeatAPMMiddleware'],
'TEMPLATES': TEMPLATES
}):
self.client.get(reverse('render-jinja2-template'))
self.client.get(reverse('render-jinja2-template'))
self.client.get(reverse('render-jinja2-template'))
transactions, traces = self.opbeat.instrumentation_store.get_all()
self.assertEqual(len(transactions), 1)
self.assertEqual(len(traces), 2, [t['signature'] for t in traces])
kinds = ['transaction', 'template.jinja2']
self.assertEqual(set([t['kind' | ] for t in traces]),
set(kinds))
# Reorder acc | ording to the kinds list so we can just test them
kinds_dict = dict([(t['kind'], t) for t in traces])
traces = [kinds_dict[k] for k in kinds]
self.assertEqual(traces[0]['kind'], 'transaction')
self.assertEqual(traces[0]['signature'], 'transaction')
self.assertEqual(traces[0]['transaction'],
'GET tests.contrib.django.testapp.views.render_jinja2_template')
self.assertEqual(len(traces[0]['durations']), 3)
self.assertEqual(len(traces[0]['parents']), 0)
self.assertEqual(traces[1]['kind'], 'template.jinja2')
self.assertEqual(traces[1]['signature'], 'jinja2_template.html')
self.assertEqual(traces[1]['transaction'],
'GET tests.contrib.django.testapp.views.render_jinja2_template')
self.assertEqual(len(traces[1]['durations']), 3)
self.assertEqual(traces[1]['parents'], ('transaction',))
|
ericmjl/bokeh | tests/unit/bokeh/core/property/test_instance.py | Python | bsd-3-clause | 3,539 | 0.006499 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from _util_property import _TestHasProps, _TestModel, _TestModel2
from bokeh._testing.util.api import verify_all
from bokeh.core.has_props import HasProps
from bokeh.core.properties import Float, Int
# Module under test
import bokeh.core.property.instance as bcpi # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'Instance',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Test_Instance(object):
def test_init(self) -> None:
with pytest.raises(TypeError):
bcpi.Instance()
def test_serialized(self) -> None:
prop = bcpi.Instance(_TestModel)
assert prop.serialized == True
def test_readonly(self) -> None:
prop = | bcpi.Instance(_TestModel)
assert prop.readonly == False
def test_valid(self) -> None:
prop = bcpi.Instance(_TestModel)
assert prop.is_valid(None)
assert prop.is_valid(_TestModel())
def test_invalid(self) -> None:
prop = bcpi.Instance(_TestModel)
assert not prop.is_valid(False)
assert not prop.is_val | id(True)
assert not prop.is_valid(0)
assert not prop.is_valid(1)
assert not prop.is_valid(0.0)
assert not prop.is_valid(1.0)
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid("")
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(_TestModel2())
assert not prop.is_valid(_TestHasProps())
def test_from_json(self) -> None:
class MapOptions(HasProps):
lat = Float
lng = Float
zoom = Int(12)
v1 = bcpi.Instance(MapOptions).from_json(dict(lat=1, lng=2))
v2 = MapOptions(lat=1, lng=2)
assert v1.equals(v2)
def test_has_ref(self) -> None:
prop = bcpi.Instance(_TestModel)
assert prop.has_ref
def test_str(self) -> None:
prop = bcpi.Instance(_TestModel)
assert str(prop) == "Instance(_TestModel)"
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
Test___all__ = verify_all(bcpi, ALL)
|
StefanKjartansson/Cloudstack-Python-Client | cloudstack/baseclient.py | Python | apache-2.0 | 6,613 | 0.00378 | import urllib2
import urllib
import hashlib
import json
import time
import socket
import os
import logging
import hmac
import base64
from cloud_exceptions import CloudException
from dataobject import *
__all__ = ['BaseClient', 'DataObject', 'CloudException']
logger = logging.getLogger('cloud_com.baseclient')
class BaseClient(object):
def __init__(self, url, username=None, password=None,
apiKey=None, secretKey=None, password_encode=True):
'''
url and either a username and password or apiKey and secretKey
'''
self.use_login = False
if username and password:
self.use_login = True
if self.use_login:
self.username = username
self.password = password
elif (apiKey and secretKey):
self.apiKey = apiKey
else:
raise Exception('''Either provide a valid set of
username--password or apiKey--secretKey''')
self.url = url
if self.url.endswith('?'):
self.url = self.url[:-1]
if self.use_login:
self.caller = urllib2.build_opener(
urllib2.HTTPCookieProcessor())
urllib2.install_opener(self.caller)
#Will throw error if login fails.
self.login_response = self.__execute__('login', {
'username': self.username,
'password': (hashlib.md5(self.password).hexdigest()
if password_encode else self.password)})
self.is_connected = True
else:
class SignatureCaller(object):
def __init__(self, secretKey):
self.secretKey = secretKey
def open(self, *args, **kwargs):
url = args[0]
base_url, query_string = url.split('?')
msg = '&'.join(sorted([s.lower() for s in
query_string.split('&')]))
logging.debug('unsignedRequest: %s' % msg)
signature = base64.b64encode(
hmac.new(self.secretKey,
msg=msg, digestmod=hashlib.sha1).digest())
logging.debug('Signature: %s' % signature)
url = '%s?%s&signature=%s' % (base_url,
query_string, urllib.quote(signature))
logging.debug('Calling API: %s' % url)
return urllib2.urlopen(url, **kwargs)
self.caller = SignatureCaller(secretKey)
self.is_connected = True
@classmethod
def loadFromProperties(klass, properties_file):
if properties_file.startswith('~'):
p = os.path.expanduser('~') + properties_file[1:]
else:
p = properties_file
logger.info('Using properties file: %s' % p)
with open(p, 'r') as f:
properties = dict([l.split('=')
for l in f.read().split('\n')
if not l.startswith('#') and l is not ''])
url = properties.pop('url', None)
if not url:
raise Exception('Missing url from properties')
return klass(url, **properties)
def __execute__(self, command, kwargs, async=False):
try:
if not command:
raise Exception('Missing command!')
kwargs = kwargs or {}
params = dict([(k, v) for (k, v)
in kwargs.items()
if v is not None])
if not self.use_login:
params.update({'apiKey': self.apiKey})
params.update({'command': command, 'response': 'json'})
logger.debug('Executing command %s with arguments: %s' % (
command, str(params)))
if async:
logger.debug('Command is asynchronous')
jobid = self.process(command.lower() + 'response',
json.loads(self.caller.open(
self.url + '?' + urllib.urlencode(params)).read())).jobid
logger.debug('Async jobid: %d' % jobid)
job = self.queryAsyncJobResult(jobid)
logger.debug('Async Job Info: %s' % job)
while job.jobstatus == '0':
time.sleep(2)
job = self.queryAsyncJobResult(jobid)
logger.debug('Async Job Info: %s' % job)
if job.jobstatus == '1':
return self.__execute__('queryAsyncJobResult',
{'jobid': jobid})
elif job.jobstatus == '2':
raise Exception('Asynchronous exception %s: %s.' % (
job.jobresultcode, job.jobresult))
return json.loads(self.caller.open(
| self.url + '?' + urllib.urlencode(params)).read())
except urllib2.HTTPError as (errno):
raise CloudException(errno.code)
def process_async(self, comma | nd, kwargs, _class=DataObject):
logger.debug(
'Processing asynchronous command %s with arguments: %s' % (
command, str(kwargs)))
data = self.__execute__(command, kwargs, True)
if data['queryasyncjobresultresponse']['jobresulttype'] == u'object':
obj = [v[0] for (k, v)
in data['queryasyncjobresultresponse'].items()
if not k.startswith('job')][0]
obj['api_client'] = self
logging.debug('Creating %s with params: %s' % (
str(_class), str(obj)))
return _class(**dict([(str(k), v) for (k, v)
in obj.items()]))
return self.process_list(selector, data, _class)
def process(self, selector, data, _class=DataObject):
logger.debug('Processing result with selector: %s and data: %s' % (
selector, str(data)))
return _class(**dict([(str(k), v) for (k, v) in
data.get(selector).items()] +
[('api_client', self)]))
def process_list(self, selector, data, _class=DataObject):
logger.debug(
'Processing list results with selector: %s and data: %s' % (
selector, str(data)))
n = data
for i in selector.split('>'):
n = n.get(i)
if not n:
logger.error(
'Expected list, got null. Selector: %s, Data: %s' % (selector,
str(data)))
return []
return [_class(**dict([(str(k), v) for (k, v)
in d.items()] + [('api_client', self)]))
for d in n]
|
xtream1101/web-scraper | main.py | Python | mit | 1,737 | 0.001727 | import os
import sys
import argparse
import configparser
from utils.process import Process
# They are imported as all lowercase
# so it is case insensitive in the config file
from modules.tuebl import Tuebl as tuebl
from modules.itebooks import ItEbooks as itebooks
from modules.wallhaven import Wallhaven as wallhaven
parser = argparse.ArgumentParser()
parser.add_argument('config', help='custom config file', nargs='?', default='./config.ini')
args = parser.parse_args()
config = configparser.ConfigParser()
def stop():
"""
Save any data before ex | iting
"""
for site in scrape:
print(scrape[site].log("Exiting..."))
scrape[site].stop()
sys.exit(0)
if __name__ == "__main__":
# Read co | nfig file
if not os.path.isfile(args.config):
print("Invalid config file")
sys.exit(0)
config.read(args.config)
# Parse config file
scrape = {}
for site in config.sections():
if config[site]['enabled'].lower() == 'true':
try: # If it not a class skip it
site_class = getattr(sys.modules[__name__], site.lower())
except AttributeError as e:
print("\nThere is no module named " + site + "\n")
continue
dl_path = os.path.expanduser(config[site]['download_path'])
num_files = int(config[site]['number_of_files'])
threads = int(config[site]['threads'])
scrape[site] = Process(site_class, dl_path, num_files, threads)
# Start site parser
try:
for site in scrape:
print("#### Scrapeing: " + site)
scrape[site].start()
except Exception as e:
print("Exception [main]: " + str(e))
stop() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.