repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
julien-hadleyjack/genrss-py | src/genrss/tracklist.py | Python | bsd-2-clause | 1,857 | 0.003231 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from lxml import html
from tinydb import TinyDB, Query
import requests
from . import get_logger, CONFIG
class TracklistManager(object):
db = TinyDB(CONFIG["technical"]["tracklist-db"], indent=2, separators=(',', ': '))
get_logger().info("Starting tracklist manager with database at %s", CONFIG["technical"]["tracklist-db"])
@classmethod
def get_tracklist(cls, pid):
result = cls.db.get(Query().pid == pid)
if not result:
get_logger().debug("Getting tracklist for: %s", pid)
tracklist = Tracklist(pid).listing
cls.db.insert({"pid": pid, "tracklist": tracklist})
else:
tracklist = result["tracklist"]
return tracklist
class Tracklist(object):
def __init__(self, pid):
"""
See also https://github.com/StevenMaude/bbc_radio_tracklisting_downloader.
:param pid: the unique pid of the episode
"""
self.pid = pid
self.listing = []
url = "http://www.bbc.co.uk/programmes/{}/segments.inc".format(self.pid)
page = requests.get(url)
| tree = html.fromstring(page.text)
for track in tree.xpath('//div[@class="segment__track"]'):
try:
| artist_names = track.xpath('.//span[@property="byArtist"]//span[@class="artist"]/text()')
except ValueError:
artist_names = []
artist = ', '.join(artist_names)
try:
title, = track.xpath('.//p/span[@property="name"]/text()')
except ValueError:
title = ''
self.listing.append([artist, title])
def __repr__(self):
return "Tracklist[pid={self.pid}, len={amount}]".format(amount=len(self.listing), **locals())
|
fsalamero/pilas | pilas/actores/__init__.py | Python | lgpl-3.0 | 2,442 | 0.02297 | # -*- encoding: utf-8 -*-
# Pilas engine - A video game framework.
#
# Copyright 2010 - Hugo Ruscitti
# License: LGPLv3 (see http://www.gnu.org/licenses/lgpl.html)
#
# Website - http://www.pilas-engine.com.ar
import math
import inspect
import pilas
from . import utils
from .actor import Actor
todos = []
__doc__ = """
Módulo pilas.actores
====================
El módulo actores contiene una serie de clases
para representar personajes de videojuegos.
Para crear actores en una escena del juego simplemente
se tiene que crear un nuevo objeto a partir de una
clase.
Por ejemplo, para crear un pongüino podríamos
escribir la siguiente sentencia:
>>> p = pilas.actores.Pingu()
"""
from .mono import Mono
from .ejes import Ejes
from .animado import Animado
from .animacion import Animacion
from .explosion import Explosion
from .bomba import Bomba
from .pingu import Pingu
from .banana import Banana
from .texto import Texto
from .temporizador import Temporizador
from .moneda import Moneda
from .pizarra import Pizarra
from .pelota import Pelota
from .puntaje import Puntaje
from .estrella import Estrella
from .caja import Caja
from .nave import Nave
from .navekids import NaveKids
from .cursordisparo import CursorDisparo
from .piedra import Piedra
from .menu import Menu
from .opcion import Opcion
from .tortuga import Tortuga
from .mapa import Mapa
from .mapatiled import MapaTiled
from .martian import Martian
from .boton import Boton
from .aceituna import Aceituna
from .globo import Globo
from .dialogo import Dialogo
from .globoelegir import GloboElegir
from .pausa import Pausa
from .mano import CursorMano
from .cooperativista import Cooperativista
from .zanahoria import Zanahoria
from .energia import Energia
from .texto_inferior import TextoInferior
from .sonido import Sonido
from .personajes_rpg import Calvo
from .personajes_rpg import Maton
from .pacman import Pacman
from .fantasma import Fantasma
from .humo import Humo
from .proyectil import Bala
from .proyectil i | mport Misil
from .proyectil import Dinamita
from .proyectil import EstrellaNinja
from .torreta import Torreta
from .ovni import Ovni
from .manzana import Manzana
from .robot import | Robot
from .robot import Board
def listar_actores():
"""Devuelve una lista con todos los actores disponibles para crear en pilas
"""
return [k for k, v in vars(pilas.actores).items()
if inspect.isclass(v) and issubclass(v, Actor)]
|
xavfernandez/pip | src/pip/_internal/utils/wheel.py | Python | mit | 7,302 | 0 | """Support functions for working with wheel files.
"""
from __future__ import absolute_import
import logging
from email.parser import Parser
from zipfile import ZipFile
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.pkg_resources import DistInfoDistribution
from pip._vendor.six import PY2, ensure_str
from pip._internal.exceptions import UnsupportedWheel
from pip._internal.utils.pkg_resources import DictMetadata
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from email.message import Message
from typing import Dict, Tuple
from pip._vendor.pkg_resources import Distribution
if PY2:
from zipfile import BadZipfile as BadZipFile
else:
from zipfile import BadZipFile
VERSION_COMPATIBLE = (1, 0)
logger = logging.getLogger(__name__)
class WheelMetadata(DictMetadata):
"""Metadata provider that maps metadata decoding exceptions to our
internal exception type.
"""
def __init__(self, metadata, wheel_name):
# type: (Dict[str, bytes], str) -> None
super(WheelMetadata, self).__init__(metadata)
self._wheel_name = wheel_name
def get_metadata(self, name):
# type: (str) -> str
try:
return super(WheelMetadata, self).get_metadata(name)
except UnicodeDecodeError as e:
# Augment the default error with the origin of the file.
raise UnsupportedWheel(
"Error decoding metadata for {}: {}".format(
self._wheel_name, e
)
| )
def pkg_resources_distribution_for_wheel(wheel_zip, name, loca | tion):
# type: (ZipFile, str, str) -> Distribution
"""Get a pkg_resources distribution given a wheel.
:raises UnsupportedWheel: on any errors
"""
info_dir, _ = parse_wheel(wheel_zip, name)
metadata_files = [
p for p in wheel_zip.namelist() if p.startswith("{}/".format(info_dir))
]
metadata_text = {} # type: Dict[str, bytes]
for path in metadata_files:
# If a flag is set, namelist entries may be unicode in Python 2.
# We coerce them to native str type to match the types used in the rest
# of the code. This cannot fail because unicode can always be encoded
# with UTF-8.
full_path = ensure_str(path)
_, metadata_name = full_path.split("/", 1)
try:
metadata_text[metadata_name] = read_wheel_metadata_file(
wheel_zip, full_path
)
except UnsupportedWheel as e:
raise UnsupportedWheel(
"{} has an invalid wheel, {}".format(name, str(e))
)
metadata = WheelMetadata(metadata_text, location)
return DistInfoDistribution(
location=location, metadata=metadata, project_name=name
)
def parse_wheel(wheel_zip, name):
# type: (ZipFile, str) -> Tuple[str, Message]
"""Extract information from the provided wheel, ensuring it meets basic
standards.
Returns the name of the .dist-info directory and the parsed WHEEL metadata.
"""
try:
info_dir = wheel_dist_info_dir(wheel_zip, name)
metadata = wheel_metadata(wheel_zip, info_dir)
version = wheel_version(metadata)
except UnsupportedWheel as e:
raise UnsupportedWheel(
"{} has an invalid wheel, {}".format(name, str(e))
)
check_compatibility(version, name)
return info_dir, metadata
def wheel_dist_info_dir(source, name):
# type: (ZipFile, str) -> str
"""Returns the name of the contained .dist-info directory.
Raises AssertionError or UnsupportedWheel if not found, >1 found, or
it doesn't match the provided name.
"""
# Zip file path separators must be /
subdirs = list(set(p.split("/")[0] for p in source.namelist()))
info_dirs = [s for s in subdirs if s.endswith('.dist-info')]
if not info_dirs:
raise UnsupportedWheel(".dist-info directory not found")
if len(info_dirs) > 1:
raise UnsupportedWheel(
"multiple .dist-info directories found: {}".format(
", ".join(info_dirs)
)
)
info_dir = info_dirs[0]
info_dir_name = canonicalize_name(info_dir)
canonical_name = canonicalize_name(name)
if not info_dir_name.startswith(canonical_name):
raise UnsupportedWheel(
".dist-info directory {!r} does not start with {!r}".format(
info_dir, canonical_name
)
)
# Zip file paths can be unicode or str depending on the zip entry flags,
# so normalize it.
return ensure_str(info_dir)
def read_wheel_metadata_file(source, path):
# type: (ZipFile, str) -> bytes
try:
return source.read(path)
# BadZipFile for general corruption, KeyError for missing entry,
# and RuntimeError for password-protected files
except (BadZipFile, KeyError, RuntimeError) as e:
raise UnsupportedWheel(
"could not read {!r} file: {!r}".format(path, e)
)
def wheel_metadata(source, dist_info_dir):
# type: (ZipFile, str) -> Message
"""Return the WHEEL metadata of an extracted wheel, if possible.
Otherwise, raise UnsupportedWheel.
"""
path = "{}/WHEEL".format(dist_info_dir)
# Zip file path separators must be /
wheel_contents = read_wheel_metadata_file(source, path)
try:
wheel_text = ensure_str(wheel_contents)
except UnicodeDecodeError as e:
raise UnsupportedWheel("error decoding {!r}: {!r}".format(path, e))
# FeedParser (used by Parser) does not raise any exceptions. The returned
# message may have .defects populated, but for backwards-compatibility we
# currently ignore them.
return Parser().parsestr(wheel_text)
def wheel_version(wheel_data):
# type: (Message) -> Tuple[int, ...]
"""Given WHEEL metadata, return the parsed Wheel-Version.
Otherwise, raise UnsupportedWheel.
"""
version_text = wheel_data["Wheel-Version"]
if version_text is None:
raise UnsupportedWheel("WHEEL is missing Wheel-Version")
version = version_text.strip()
try:
return tuple(map(int, version.split('.')))
except ValueError:
raise UnsupportedWheel("invalid Wheel-Version: {!r}".format(version))
def check_compatibility(version, name):
# type: (Tuple[int, ...], str) -> None
"""Raises errors or warns if called with an incompatible Wheel-Version.
Pip should refuse to install a Wheel-Version that's a major series
ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when
installing a version only minor version ahead (e.g 1.2 > 1.1).
version: a 2-tuple representing a Wheel-Version (Major, Minor)
name: name of wheel or package to raise exception about
:raises UnsupportedWheel: when an incompatible Wheel-Version is given
"""
if version[0] > VERSION_COMPATIBLE[0]:
raise UnsupportedWheel(
"%s's Wheel-Version (%s) is not compatible with this version "
"of pip" % (name, '.'.join(map(str, version)))
)
elif version > VERSION_COMPATIBLE:
logger.warning(
'Installing from a newer Wheel-Version (%s)',
'.'.join(map(str, version)),
)
|
cloudanswers/ccp-integrations | webhooks/helpscout.py | Python | agpl-3.0 | 265 | 0 | from | flask import Blueprint, request, jsonify
import time
app = Blueprint('helpscout_webhook', __name__)
@app.route("/webhooks/helpscout_app_callback", methods=['POST'])
def helpscout_ | webhook():
return jsonify({"html": "<b>test</b> test %s" % time.gmtime()})
|
tensorflow/examples | tensorflow_examples/lite/model_maker/core/data_util/object_detector_dataloader.py | Python | apache-2.0 | 15,144 | 0.00383 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataloader for object detection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
from typing import Collection, Dict, List, Optional, Tuple, TypeVar, Union
import tensorflow as tf
from tensorflow_examples.lite.model_maker.core.api.api_util import mm_export
from tensorflow_examples.lite.model_maker.core.data_util import dataloader
from tensorflow_examples.lite.model_maker.core.data_util import object_detector_dataloader_util as util
import yaml
from tensorflow_examples.lite.model_maker.third_party.efficientdet import dataloader as det_dataloader
from tensorflow_examples.lite.model_maker.third_party.efficientdet.keras import label_util
DetectorDataLoader = TypeVar('DetectorDataLoader', bound='DataLoader')
# Csv lines with the label map.
CsvLines = Tuple[List[List[List[str]]], Dict[int, str]]
def _get_label_map(label_map):
"""Gets the label map dict."""
if isinstance(label_map, list):
label_map_dict = {}
for i, label in enumerate(label_map):
# 0 is resevered for background.
label_map_dict[i + 1] = label
label_map = label_map_dict
label_map = label_util.get_label_map(label_map)
if 0 in label_map and label_map[0] != 'background':
raise ValueError('0 must be resevered for background.')
label_map.pop(0, None)
name_set = set()
for idx, name in label_map.items():
if not isinstance(idx, int):
raise ValueError('The key (label id) in label_map must be integer.')
if not isinstance(name, str):
raise ValueError('The value (label name) in label_map must be string.')
if name in name_set:
raise ValueError('The value: %s (label name) can\'t be duplicated.' %
name)
name_set.add(name)
return label_map
def _group_csv_lines(csv_file: str,
set_prefixes: List[str],
delimiter: str = ',',
quotechar: str = '"') -> CsvLines:
"""Groups csv_lines for different set_names and label_map.
Args:
csv_file: filename of the csv file.
set_prefixes: Set prefix names for training, validation and test data. e.g.
['TRAIN', 'VAL', 'TEST'].
delimiter: Character used to separate fields.
quotechar: Character used to quote fields containing special characters.
Returns:
[training csv lines, validation csv lines, test csv lines], label_map
"""
# Dict that maps integer label ids to string label names.
label_map = {}
with tf.io.gfile.GFile(csv_file, 'r') as f:
reader = csv.reader(f, delimiter=delimiter, quotechar=quotechar)
# `lines_list` = [training csv lines, validation csv lines, test csv lines]
# Each csv line is a list of strings separated by delimiter. e.g.
# row 'one,two,three' in the csv file will be ['one', two', 'three'].
lines_list = [[], [], []]
for line in reader:
# Groups lines by the set_name.
set_name = line[0].strip()
for i, set_prefix in enumerate(set_prefixes):
if set_name.startswith(set_prefix):
lines_list[i].append(line)
label = line[2].strip()
# Updates label_map if it's a new label.
if label not in label_map.values():
label_map[len(label_map) + 1] = label
return lines_list, label_map
@mm_export('object_detector.DataLoader')
class DataLoader(dataloader.DataLoader):
"""DataLoader for object detector."""
def __init__(self,
tfrecord_file_patten,
size,
label_map,
annotations_json_file=None):
"""Initialize DataLoader for object detector.
Args:
tfrecord_file_patten: Glob for tfrecord files. e.g. "/tmp/coco*.tfrecord".
size: The size of the dataset.
label_map: Vari | able shows mapping label integers ids to string label
names. 0 is the reserved key for `background` and doesn't need to be
included in label_map. Label names can't be duplica | ted. Supported
formats are:
1. Dict, map label integers ids to string label names, such as {1:
'person', 2: 'notperson'}. 2. List, a list of label names such as
['person', 'notperson'] which is
the same as setting label_map={1: 'person', 2: 'notperson'}.
3. String, name for certain dataset. Accepted values are: 'coco', 'voc'
and 'waymo'. 4. String, yaml filename that stores label_map.
annotations_json_file: JSON with COCO data format containing golden
bounding boxes. Used for validation. If None, use the ground truth from
the dataloader. Refer to
https://towardsdatascience.com/coco-data-format-for-object-detection-a4c5eaf518c5
for the description of COCO data format.
"""
super(DataLoader, self).__init__(dataset=None, size=size)
self.tfrecord_file_patten = tfrecord_file_patten
self.label_map = _get_label_map(label_map)
self.annotations_json_file = annotations_json_file
@classmethod
def from_pascal_voc(
cls,
images_dir: str,
annotations_dir: str,
label_map: Union[List[str], Dict[int, str], str],
annotation_filenames: Optional[Collection[str]] = None,
ignore_difficult_instances: bool = False,
num_shards: int = 100,
max_num_images: Optional[int] = None,
cache_dir: Optional[str] = None,
cache_prefix_filename: Optional[str] = None) -> DetectorDataLoader:
"""Loads from dataset with PASCAL VOC format.
Refer to
https://towardsdatascience.com/coco-data-format-for-object-detection-a4c5eaf518c5
for the description of PASCAL VOC data format.
LabelImg Tool (https://github.com/tzutalin/labelImg) can annotate the image
and save annotations as XML files in PASCAL VOC data format.
Annotations are in the folder: `annotations_dir`.
Raw images are in the foloder: `images_dir`.
Args:
images_dir: Path to directory that store raw images.
annotations_dir: Path to the annotations directory.
label_map: Variable shows mapping label integers ids to string label
names. 0 is the reserved key for `background`. Label names can't be
duplicated. Supported format: 1. Dict, map label integers ids to string
label names, e.g.
{1: 'person', 2: 'notperson'}. 2. List, a list of label names. e.g.
['person', 'notperson'] which is
the same as setting label_map={1: 'person', 2: 'notperson'}.
3. String, name for certain dataset. Accepted values are: 'coco', 'voc'
and 'waymo'. 4. String, yaml filename that stores label_map.
annotation_filenames: Collection of annotation filenames (strings) to be
loaded. For instance, if there're 3 annotation files [0.xml, 1.xml,
2.xml] in `annotations_dir`, setting annotation_filenames=['0', '1']
makes this method only load [0.xml, 1.xml].
ignore_difficult_instances: Whether to ignore difficult instances.
`difficult` can be set inside `object` item in the annotation xml file.
num_shards: Number of shards for output file.
max_num_images: Max number of imags to process.
cache_dir: The cache directory to save TFRecord, metadata and json file.
When cache_dir is not set, a temporary folder will be created and will
not be removed automatically after training which makes it can be used
later.
cache_prefix_filename: The cache prefix filename. If not set, will
automatically generate it based on `image_dir`, `annotations_dir` and
`annotation_file |
mscuthbert/abjad | abjad/tools/selectiontools/test/test_selectiontools_Selection__all_are_components_in_same_logical_voice.py | Python | gpl-3.0 | 46,366 | 0.001229 | # -*- encoding: utf-8 -*-
import pytest
from abjad import *
Component = scoretools.Component
Selection = selectiontools.Selection
def test_selectiontools_Selection__all_are_components_in_same_logical_voice_01():
r'''Unincorporated | leaves do not share a logical voice.
Unicorporated leaves do not share a root component.
False if not allow orphans; True if allow orphans.
'''
notes = [Note("c'8"), Note("d'8"), Note("e'8"), Note("f'8")]
assert Selection._all_are_components_in_same_logical_voice(notes)
assert not Sele | ction._all_are_components_in_same_logical_voice(
notes, allow_orphans=False)
def test_selectiontools_Selection__all_are_components_in_same_logical_voice_02():
r'''Container and leaves all logical voice.
'''
container = Container("c'8 d'8 e'8 f'8")
r'''
{
c'8
d'8
e'8
f'8
}
'''
assert Selection._all_are_components_in_same_logical_voice(
list(iterate(container).by_class()))
def test_selectiontools_Selection__all_are_components_in_same_logical_voice_03():
r'''Tuplet and leaves all logical voice.
'''
tuplet = scoretools.FixedDurationTuplet(Duration(2, 8), "c'8 d'8 e'8")
r'''
\times 2/3 {
c'8
d'8
e'8
}
'''
assert Selection._all_are_components_in_same_logical_voice(
list(iterate(tuplet).by_class()))
def test_selectiontools_Selection__all_are_components_in_same_logical_voice_04():
r'''Voice and leaves all appear in same logical voice.
'''
voice = Voice("c'8 d'8 e'8 f'8")
r'''
\new Voice {
c'8
d'8
e'8
f'8
}
'''
assert Selection._all_are_components_in_same_logical_voice(
list(iterate(voice).by_class()))
def test_selectiontools_Selection__all_are_components_in_same_logical_voice_05():
r'''Anonymous staff and leaves all appear in same logical voice.
'''
staff = Staff("c'8 d'8 e'8 f'8")
r'''
\new Staff {
c'8
d'8
e'8
f'8
}
'''
assert Selection._all_are_components_in_same_logical_voice(
list(iterate(staff).by_class()))
def test_selectiontools_Selection__all_are_components_in_same_logical_voice_06():
r'''Voice, sequential and leaves all appear in same logical voice.
'''
voice = Voice(r'''
{
c'8
d'8
e'8
f'8
}
{
g'8
a'8
b'8
c''8
}
''')
assert systemtools.TestManager.compare(
voice,
r'''
\new Voice {
{
c'8
d'8
e'8
f'8
}
{
g'8
a'8
b'8
c''8
}
}
'''
)
assert Selection._all_are_components_in_same_logical_voice(
list(iterate(voice).by_class()))
def test_selectiontools_Selection__all_are_components_in_same_logical_voice_07():
r'''Anonymous voice, tuplets and leaves all appear in same logical voice.
'''
voice = Voice(r'''
\times 2/3 {
c'8
d'8
e'8
}
\times 2/3 {
f'8
g'8
a'8
}
''')
assert systemtools.TestManager.compare(
voice,
r'''
\new Voice {
\times 2/3 {
c'8
d'8
e'8
}
\times 2/3 {
f'8
g'8
a'8
}
}
'''
)
assert Selection._all_are_components_in_same_logical_voice(
list(iterate(voice).by_class()))
def test_selectiontools_Selection__all_are_components_in_same_logical_voice_08():
r'''Logical voice does not extend across anonymous voices.
'''
staff = Staff(r'''
\new Voice {
c'8
d'8
e'8
f'8
}
\new Voice {
g'8
a'8
b'8
c''8
}
''')
assert systemtools.TestManager.compare(
staff,
r'''
\new Staff {
\new Voice {
c'8
d'8
e'8
f'8
}
\new Voice {
g'8
a'8
b'8
c''8
}
}
'''
)
assert Selection._all_are_components_in_same_logical_voice(
staff.select_leaves(allow_discontiguous_leaves=True)[:4])
assert Selection._all_are_components_in_same_logical_voice(
staff.select_leaves(allow_discontiguous_leaves=True)[4:])
assert not Selection._all_are_components_in_same_logical_voice(
staff.select_leaves(allow_discontiguous_leaves=True))
assert not Selection._all_are_components_in_same_logical_voice(
staff[:])
def test_selectiontools_Selection__all_are_components_in_same_logical_voice_09():
r'''Logical voice encompasses across like-named voices.
'''
staff = Staff(r'''
\context Voice = "foo" {
c'8
d'8
e'8
f'8
}
\context Voice = "foo" {
g'8
a'8
b'8
c''8
}
''')
assert systemtools.TestManager.compare(
staff,
r'''
\new Staff {
\context Voice = "foo" {
c'8
d'8
e'8
f'8
}
\context Voice = "foo" {
g'8
a'8
b'8
c''8
}
}
'''
)
assert Selection._all_are_components_in_same_logical_voice(
staff.select_leaves(allow_discontiguous_leaves=True))
def test_selectiontools_Selection__all_are_components_in_same_logical_voice_10():
r'''Logical voice does not extend across differently named voices.
'''
staff = Staff(r'''
\context Voice = "foo" {
c'8
d'8
}
\context Voice = "bar" {
e'8
f'8
}
''')
assert systemtools.TestManager.compare(
staff,
r'''
\new Staff {
\context Voice = "foo" {
c'8
d'8
}
\context Voice = "bar" {
e'8
f'8
}
}
'''
)
assert not Selection._all_are_components_in_same_logical_voice(
staff.select_leaves(allow_discontiguous_leaves=True))
def test_selectiontools_Selection__all_are_components_in_same_logical_voice_11():
r'''Logical voice does not across anonymous voices.
Logical voice does not extend across anonymous staves.
'''
container = Container(r'''
\new Staff {
\new Voice {
c'8
d'8
}
}
\new Staff {
\new Voice {
e'8
f'8
}
}
''')
assert systemtools.TestManager.compare(
container,
r'''
{
\new Staff {
\new Voice {
c'8
d'8
}
}
\new Staff {
\new Voice {
e'8
f'8
}
}
}
'''
)
assert not Selection._all_are_components_in_same_logical_voice(
container.select_leaves(allow_discontiguous_leaves=True))
def test_selectiontools_Selection__all_are_components_in_same_logical_voice_12():
r'''Logical voice does not extend across anonymous voices.
Logical voice does not extend across anonymous staves.
'''
container = Container(r'''
\new Staff <<
\new Voice {
c'8
d'8
}
\new Voice {
e'8
f'8
}
>>
\new Staff < |
Mellthas/quodlibet | quodlibet/library/file.py | Python | gpl-2.0 | 15,337 | 0.000326 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import time
from pathlib import Path
from typing import Generator, Set, Iterable
from quodlibet import print_d, print_w, _, formats
from quodlibet.formats import AudioFileError, AudioFile
from quodlibet.library.base import iter_paths, Library, PicklingMixin
from quodlibet.qltk.notif import Task
from quodlibet.util.path import ismount, unexpand, normalize_path
from senf import fsn2text, fsnative
class FileLibrary(Library[fsnative, AudioFile], PicklingMixin):
"""A library containing items on a local(-ish) filesystem.
These must support the valid, exists, mounted, and reload methods,
and have a mountpoint attribute.
"""
def __init__(self, name=None):
super().__init__(name)
self._masked = {}
def _load_init(self, items):
"""Add many items to the library, check if the
mountpoints are available and mark items as masked if not.
Does not check if items are valid.
"""
mounts = {}
contents = self._contents
masked = self._masked
for item in items:
mountpoint = item.mountpoint
if mountpoint not in mounts:
is_mounted = ismount(mountpoint)
# In case mountpoint is mounted through autofs we need to
# access a sub path for it to mount
# https://github.com/quodlibet/quodlibet/issues/2146
if not is_mounted:
item.exists()
is_mounted = ismount(mountpoint)
mounts[mountpoint] = is_mounted
# at least one not mounted, make sure masked has an entry
if not is_mounted:
masked.setdefault(mountpoint, {})
if mounts[mountpoint]:
contents[item.key] = item
else:
masked[mountpoint][item.key] = item
def _load_item(self, item, force=False):
"""Add an item, or refresh it if it's already in the library.
No signals will be fired.
Return a tuple of booleans: (changed, removed)
"""
print_d(f"Loading {item.key!r}", self._name)
valid = item.valid()
# The item is fine; add it if it's not present.
if not force and valid:
print_d(f"{item.key!r} is valid.", self._name)
self._contents[item.key] = item
return False, False
else:
# Either we should force a load, or the item is not okay.
# We're going to reload; this could change the key. So
# remove the item if it's currently in.
try:
del self._contents[item.key]
except KeyError:
present = False
else:
present = True
# If the item still exists, reload it.
if item.exists():
try:
item.reload()
except AudioFileError:
print_w(f"Error reloading {item.key!r}", self._name)
return False, True
else:
print_d(f"Reloaded {item.key!r}.", self._name)
self._contents[item.key] = item
return True, False
elif not item.mounted():
# We don't know if the item is okay or not, since
# it's not not mounted. If the item was present
# we need to mark it as removed.
print_d(f"Masking {item.key!r}", self._name)
self._masked.setdefault(item.mountpoint, {})
self._masked[item.mountpoint][item.key] = item
return False, present
else:
# The item doesn't exist at all anymore. Mark it as
# removed if it was present, otherwise nothing.
print_d(f"Ignoring (so removing) {item.key!r}.", self._name)
return False, present
def reload(self, item, changed=None, removed=None):
"""Reload a song, possibly noting its status.
If sets are given, it assumes the caller will handle signals,
and only updates the sets. Otherwise, it handles signals
itself. It *always* handles library contents, so do not
try to remove (again) a song that appears in the removed set.
"""
was_changed, was_removed = self._load_item(item, force=True)
assert not (was_changed and was_removed)
if was_changed:
if changed is None:
self.emit('changed', {item})
else:
changed.add(item)
elif was_removed:
if removed is None:
self.emit('removed', {item})
else:
removed.add(item)
def rebuild(self, paths, force=False, exclude=[], cofuncid=None):
"""Reload or remove songs if they have changed or been deleted.
This generator rebuilds the library over the course of iteration.
Any paths given will be scanned for new files, using the 'scan'
method.
Only items present in the library when the rebuild is started
will be checked.
If this function is copooled, set "cofuncid" to enable pause/stop
buttons in the UI.
"""
print_d(f"Rebuilding, force is {force}", self._name)
task = Task(_("Library"), _("Checking mount points"))
if cofuncid:
task.copool(cofuncid)
for i, (point, items) in task.list(enumerate(self._masked.items())):
if ismount(point):
self._contents.update(items)
del self._masked[point]
self.emit('added', list(items.values()))
yield True
task = Task(_("Library"), _("Scanning library"))
if cofuncid:
task.copool(cofuncid)
changed, removed = set(), set()
for i, (key, item) in task.list(enumerate(sorted(self.items()))):
if key in self._contents and force or not item.valid():
self.reload(item, changed, removed)
# These numbers are pretty empirical. We should yield more
# often than we emit signals; that way the main loop stays
# interactive and doesn't get bogged down in updates.
if len(changed) >= 200:
self.emit('changed', changed)
changed = set()
if len(removed) >= 200:
self.emit('removed', removed)
removed = set()
if len(changed) > 20 or i % 200 == 0:
yield True
print_d(f"Removing {len(removed)}, changing {len(changed)}).", self._name)
if removed:
self.emit('removed', removed)
if changed:
self.emit('changed', changed)
for value in self.scan(paths, exclude, cofuncid):
yield value
def add_filename(self, filename, add=True):
"""Add a file based on its filename.
Subclasses must override this to open the file correctly.
"""
raise NotImplementedError
def contains_filename(self, filename):
"""Returns if a song for the passed filename is in the library.
Retur | ns:
bool
"""
raise NotImplementedError
def scan(self, paths, exclude=[], cofuncid=None):
def need_yield(last_yield=[0]):
current = time.time()
if abs(current - last_yield[0]) > 0.015:
last_yield[0] = current
return True
return False
def need_added(last_added=[0]):
| current = time.time()
if abs(current - last_added[0]) > 1.0:
last_added[0] = current
return True
return False
# first scan each path for new files
paths_to_load = []
for scan_path in paths:
print_d(f"Scanning {s |
persandstrom/home-assistant | tests/components/image_processing/test_microsoft_face_detect.py | Python | apache-2.0 | 5,410 | 0 | """The tests for the microsoft face detect platform."""
from unittest.mock import patch, PropertyMock
from homeassistant.core import callback
from homeassistant.const import ATTR_ENTITY_PICTURE
from homeassistant.setup import setup_component
import homeassistant.components.image_processing as ip
import homeassistant.components.microsoft_face as mf
from tests.common import (
get_test_home_assistant, assert_setup_component, load_fixture, mock_coro)
class TestMicrosoftFaceDetectSetup:
"""Test class for image processing."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
@patch('homeassistant.components.microsoft_face.'
'MicrosoftFace.update_store', return_value=mock_coro())
def test_setup_platform(self, store_mock):
"""Set up platform with one entity."""
config = {
ip.DOMAIN: {
'platform': 'microsoft_face_detect',
'source': {
'entity_id': 'camera.demo_camera'
},
'attributes': ['age', 'gender'],
},
'camera': {
'platform': 'demo'
},
mf.DOMAIN: {
'api_key': '12345678abcdef6',
}
}
with assert_setup_component(1, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
assert self.hass.states.get(
'image_processing.microsoftface_demo_camera')
@patch('homeassistant.components.microsoft_face.'
'MicrosoftFace.update_store', return_value=mock_coro())
def test_setup_platform_name(self, store_mock):
"""Set up platform with one entity and set name."""
config = {
ip.DOMAIN: {
'platform': 'microsoft_face_detect',
'source': {
'entity_id': 'camera.demo_camera',
'name': 'test local'
},
},
'camera': {
'platform': 'demo'
},
mf.DOMAIN: {
'api_key': '12345678abcdef6',
}
}
with assert_setup_component(1, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
assert self.hass.states.get('image_processing.test_local')
class TestMicrosoftFaceDetect:
"""Test class for image processing."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.config = {
ip.DOMAIN: {
'platform': 'microsoft_face_detect',
'source': {
'entity_id': 'camera.demo_camera',
'name': 'test local'
},
'attributes': ['age', 'gender'],
},
'camera': {
'platform': 'demo'
},
mf.DOMAIN: {
'api_key': '12345678abcdef6',
}
}
self.endpoint_url = "https://westus.{0}".format(mf.FACE_API_URL)
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
@patch('homeassistant.components.image_processing.microsoft_face_detect.'
'MicrosoftFaceDetectEntity.should_poll',
new_callable=PropertyMock(return_value=False))
def test_ms_detect_process_image(self, poll_mock, aioclient_mock):
"""Set up and scan a picture and test plates from event."""
aioclient_mock.get(
self.endpoint_url.format("persongroups"),
text=load_fixture('microsoft_face_persongroups.json')
)
aioclient_mock.get(
self.endpoint_url.format("persongroups/test_group1/persons"),
text=load_fixture('microsoft_face_persons.json')
)
aioclient_mock.get(
self.endpoint_url.format("persongroups/test_group2/persons"),
text=load_fixture('microsoft_face_persons.json')
)
setup_component(self.hass, ip.DOMAIN, self.config)
state = self.hass.states.get('camera.demo_camera')
url = "{0}{1}".format(
self.hass.config.api.base_url,
state.attributes.get(ATTR_ENTITY_PICTURE))
face_events = []
@callback
| def mock_face_event(event):
"""Mock event."""
face_events.append(event)
self.hass.bus.listen(' | image_processing.detect_face', mock_face_event)
aioclient_mock.get(url, content=b'image')
aioclient_mock.post(
self.endpoint_url.format("detect"),
text=load_fixture('microsoft_face_detect.json'),
params={'returnFaceAttributes': "age,gender"}
)
ip.scan(self.hass, entity_id='image_processing.test_local')
self.hass.block_till_done()
state = self.hass.states.get('image_processing.test_local')
assert len(face_events) == 1
assert state.attributes.get('total_faces') == 1
assert state.state == '1'
assert face_events[0].data['age'] == 71.0
assert face_events[0].data['gender'] == 'male'
assert face_events[0].data['entity_id'] == \
'image_processing.test_local'
|
googleapis/python-appengine-admin | google/cloud/appengine_admin_v1/services/firewall/__init__.py | Python | apache-2.0 | 745 | 0 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# | Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific languag | e governing permissions and
# limitations under the License.
#
from .client import FirewallClient
from .async_client import FirewallAsyncClient
__all__ = (
"FirewallClient",
"FirewallAsyncClient",
)
|
rootio/rootio_web | alembic/versions/initial_added_tables.py | Python | agpl-3.0 | 13,787 | 0.015014 | """Added initial tables
Revision ID: initial
Revises: None
Create Date: 2013-10-25 16:52:12.150570
"""
# revision identifiers, used by Alembic.
revision = 'initial'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table(u'radio_language',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=True),
sa.Column('iso639_1', sa.String(length=2), nullable=True),
sa.Column('iso639_2', sa.String(length=3), nullable=True),
sa.Column('locale_code', sa.String(length=10), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('radio_network',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('about', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('radio_recording',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('url', sa.String(length=160), nullable=True),
sa.Column('local_file', sa.String(length=160), nullable=True),
sa.Column('created_time', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user_details',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('age', sa.Integer(), nullable=True),
sa.Column('phone', sa.String(length=100), nullable=True),
sa.Column('url', sa.String(length=100), nullable=True),
sa.Column('location', sa.String(length=100), nullable=True),
sa.Column('bio', sa.String(length=100), nullable=True),
sa.Column('gender_code', sa.Integer(), nullable=True),
sa.Column('created_time', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table(u'telephony_phonenumber',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('carrier', sa.String(length=100), nullable=True),
sa.Column('countrycode', sa.String(length=3), nullable=True),
sa.Column('number', sa.String(length=20), nullable=False),
sa.Column('raw_number', sa.String(length=20), nullable=False),
sa.Column('number_type', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table(u'radio_programtype',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('description', sa.Text(), nullable=False),
sa.Column('definition', sa.PickleType(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(u'radio_location',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=True),
sa.Column('municipality', sa.String(length=100), nullable=True),
sa.Column('district', sa.String(length=100), nullable=True),
sa.Column('modifieddate', sa.Date(), nullable=True),
sa.Column('country', sa.String(length=100), nullable=True),
sa.Column('addressline1', sa.String(length=100), nullable=True),
sa.Column('addressline2', sa.String(length=100), nullable=True),
sa.Column('latitude', sa.Float(), nullable=True),
sa.Column('longitude', sa.Float(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user_user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('email', sa.String(length=100), nullable=False),
sa.Column('openid', sa.String(length=100), nullable=True),
sa.Column('activation_key', sa.String(length=100), nullable=True),
sa.Column('created_time', sa.DateTime(), nullable=True),
sa.Column('last_accessed', sa.DateTime(), nullable=True),
sa.Column('avatar', sa.String(length=100), nullable=True),
sa.Column('password', sa.String(length=300), nullable=False),
sa.Column('role_code', sa.SmallInteger(), nullable=True),
sa.Column('status_code', sa.SmallInteger(), nullable=True),
sa.Column('user_detail_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_detail_id'], ['user_details.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('name'),
sa.UniqueConstraint('openid')
)
op.create_table('radio_person',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=8), nullable=True),
sa.Column('firstname', sa.String(length=100), nullable=True),
sa.Column('middlename', sa.String(length=100), nullable=True),
sa.Column('lastname', sa.String(length=100), nullable=True),
sa.Column('email', sa.String(length=100), nullable=True),
sa.Column('additionalcontact', sa.String(length=100), nullable=True),
sa.Column('phone_id', sa.Integer(), nullable=True),
sa.Column('gender_code', sa.Integer(), nullable=True),
sa.Column('privacy_code', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['phone_id'], ['telephony_phonenumber.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('radio_program',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('duration', sa.Time(), nullable=True),
sa.Column('update_recurrence', sa.Text(), nullable=True),
sa.Column('language_id', sa.Integer(), nullable=True),
sa.Column('program_type_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['language_id'], ['radio_language.id'], ),
sa.ForeignKeyConstraint(['program_type_id'], ['radio_programtype.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('radio_station',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('about', sa.Text(), nullable=True),
sa.Column('frequency', sa.Float(), nullable=True),
sa.Column('owner_id', sa.Integer(), nullable=True),
sa.Column('network_id', sa.Integer(), nullable=True),
sa.Column('location_id', sa.Integer(), nullable=True),
sa.Column('cloud_phone_id', sa.Integer(), nullable=True),
sa.Column('transmitter_phone_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['cloud_phone_id'], ['telephony_phonenumber.id'], ),
sa.ForeignKeyConstraint(['location_id'], ['radio_location.id'], ),
sa.ForeignKeyConstraint(['network_id'], ['radio_network.id'], ),
sa.ForeignKeyConstraint(['owner_id'], ['user_user.id'], ),
sa.ForeignKeyConstraint(['transmitter_phone_id'], ['telephony_phonenumber.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table(u'radio_networkadmins',
sa.Column(u'user_id', sa.Integer(), nullable=True),
sa.Column(u'network_id', sa.Integer(), nu | llable=True),
sa.ForeignKeyConstraint(['network_id'], ['radio_network.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user_user.id'], ),
sa.PrimaryKeyConstraint()
)
op.create_table(u'radio_personlanguage',
sa.Column(u'language_id', sa.Integer(), nullable=True),
sa.Column(u'person_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['language_id'], ['radio_language.id'], ),
sa.ForeignKeyCons | traint(['person_id'], ['radio_person.id'], ),
sa.PrimaryKeyConstraint()
)
op.create_table('radio_episode',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('program_id', sa.Integer(), nullable=False),
sa.Column('recording_id', sa.Integer(), nullable=True),
sa.Column('created_time', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['program_id'], ['radio_program.id'], ),
sa.ForeignKeyConstraint(['recording_id'], ['radio_recording.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('radio_scheduledepisode',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('station_id', sa.Integer(), nullable=True),
sa.Column('episode_id', sa.Integer(), nullable=True),
sa.Column('start', sa.DateTime(), nullable=True),
sa.Column('end', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['episode_id'], ['radio_episode.id'], ),
sa.ForeignKeyConstraint(['station_id'], ['radio_station.id'], ),
sa.PrimaryKeyConstraint(' |
tokyo-jesus/university | src/python/koans/python3/runner/writeln_decorator.py | Python | unlicense | 473 | 0.012685 | #!/usr/bin/env python
# | encoding: utf-8
import sys
import os
# Taken from legacy python unittest
class WritelnDecorator:
"""Used to decorate file-like objects with a handy 'writeln' method"""
def __init__(self,stream):
self.stream = stream
def __getattr__(self, attr):
return getattr(self.stream,attr)
def writeln(self, arg=None):
if arg: self.write(arg)
self.write('\n') # text-mode streams translate to \r\n if needed
| |
apache/incubator-systemml | src/main/python/tests/matrix/test_transpose.py | Python | apache-2.0 | 2,059 | 0.002428 | # -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
import unittest
import random
import numpy as np
from systemds.context import SystemDSContext
np.random.seed(7)
shape = (random.randrange(1, 25), random.randrange(1, 25))
m = np.random.rand(shape[0], shape[1])
mx = np.random.rand(1, shape[1])
my = np.random.rand(shape[0], 1)
class TestTranspose(unittest.TestCase):
sds: SystemDSContext = None
@classmethod
def setUpClass(cls):
cls.sds = SystemDSContext()
@classmethod
def tearDownClass(cls):
cls.sds.close()
def test_basic(self):
trans = self.sds.from_numpy( m).t().compute()
self.assertTrue(np.allclose(trans, np.transpose(m)))
def test_empty(self):
trans = self.sds.from_numpy( np.asarray([])).t().compute()
self.assertTrue(np.allclose(trans, np.asarray([])) | )
def test_row(self):
trans = self.sds.from_numpy( mx).t().c | ompute()
self.assertTrue(np.allclose(trans, np.transpose(mx)))
def test_col(self):
trans = self.sds.from_numpy( my).t().compute()
self.assertTrue(np.allclose(trans, np.transpose(my)))
if __name__ == "__main__":
unittest.main(exit=False)
|
NekBox/nekpy | nekpy/dask/subgraph.py | Python | mit | 1,652 | 0.004843 | from copy import deepcopy
from .tasks import prepare, update_config, run, analyze, prepare_
from dask.base import tokenize
from dask.delayed import delayed
from math import ceil
from ..config import config as cfg
nekmpi_path = cfg.nekmpi
load_path = cfg.load
def series(base_in, tusr, job_step = 0, job_time = 0.):
base = deepcopy(base_in)
if job_step > 0:
njob = int((base["num_steps"]-1) / job_step) + 1
base["io_step"] = min(base["io_step"], job_step)
nio = int(job_step / base["io_step"])
elif job_time > 0:
njob = int(ceil(base["end_time"] / job_time))
nio = int(ceil(job_time / base["io_time"]))
else:
njob = 1
nio = 0 # not used
restart = 0
out_ind | ex = 0
nres = max(3, base["torder"])
end_time = job_time
res = {}
data = deepcopy(base)
base["job_name"] = base["name"]
base["job_time"] = job_time
data = prepare(base, tusr)
for i in range(njob):
diff = {"restart": restart, "outind": out_index}
if i == 0:
restart += 1
out_index += nio + 1
else:
| restart += nres
out_index += nio
if job_step > 0:
diff["num_steps"] = min(job_step, base["num_steps"] - i*job_step)
if job_time > 0:
diff["end_time"] = end_time
end_time += job_time
diff["job_name"] = "{}-{}".format(base["name"], i)
config = update_config(base, diff)
inp = prepare(config, tusr, make=False, dep=data)
data = run(config, nekmpi_path, dep=inp)
res = analyze(config, res, dep=data)
return res
|
pankajanand18/python-tests | linkedlists/reverserlist.py | Python | mit | 1,457 | 0.021277 |
from linkedlist import SinglyLinkedListNode
def reverseList(head):
tail=None
last=None
tempNode = head
while tempNode is not None:
currentNode, tempNode = tempNode, tempNode.next
currentNode.next = tail
tail = currentNode
return tail
def reverseListKNode(head,k):
tempHead= None
tempTail= None
while head is not None:
tempNode = head
last = None
tk=k
while tempNode is not None and tk > 0:
currentNode,nextNode = tempNode,tempNode.next
currentNode.next = last
last=currentNode
tempNode = nextNode
tk-=1
if tempHead is not None:
| tempTail.next = last
head.next = nextNode
else:
tempHead = last
head.next= nextNode
tempTail = head
head=nextNode
return tempHead
def printLinkedList(head):
while head is not None:
print | head.data,
head=head.next
print ''
def createList(list):
lastNode=None
head=None
for i in list:
node= SinglyLinkedListNode(i)
if lastNode == None:
lastNode = node
head = node
else:
lastNode.next = node
lastNode=node
return head
a=(i for i in xrange(1,11))
list = createList(a)
printLinkedList(list)
newList=reverseListKNode(list,2)
printLinkedList(newList)
|
skylifewww/pangolin_new | content/templatetags/content_tags.py | Python | mit | 4,762 | 0.00441 | from django import template
# from django.contrib.ad | min.util import lookup_field
# from django.core.exceptions import ObjectDoesNotExist
# from django.core.urlresolvers import NoReverseMatch, reverse
# from django.db.models import ForeignKey
# from django.template.defaulttags import NowNode
# from django.utils.safestring import mark_safe
from django.shortcuts import render_to_response, redirect, get_object_or_404
from content.models import *
from product.models import Category, Support
register = template.Library()
@register.inclusion_tag('meta/title.h | tml')
def meta_title():
meta = get_object_or_404(Meta, published=1)
return {'title': meta.meta_title}
@register.inclusion_tag('meta/author.html')
def meta_author():
meta = get_object_or_404(Meta, published=1)
return {'author': meta.meta_author}
@register.inclusion_tag('meta/description.html')
def meta_description():
meta = get_object_or_404(Meta, published=1)
return {'description': meta.meta_description}
@register.inclusion_tag('meta/keywords.html')
def meta_keywords():
meta = get_object_or_404(Meta, published=1)
return {'keywords': meta.meta_keywords}
@register.inclusion_tag('top/image_back.html')
def top_image_back():
top = get_object_or_404(Top, published=1)
# return {'image_back': top.image_back}
return {'image_back': top.slug}
@register.inclusion_tag('top/text_small.html')
def top_text_small():
top = get_object_or_404(Top, published=1)
return {'text_small': top.text_small}
@register.inclusion_tag('top/text_big.html')
def top_text_big():
top = get_object_or_404(Top, published=1)
return {'text_big': top.text_big}
@register.inclusion_tag('footer/category_footer.html')
def category_footer():
categories = Category.objects.filter(published=1).order_by('ordering')
return {'categories': categories}
@register.inclusion_tag('footer/support_footer.html')
def support_footer():
supports = Support.objects.filter(published=1).order_by('ordering')
return {'supports': supports}
# @register.inclusion_tag('slides/slides.html')
# def slides():
# slides = Slide.objects.filter(published=1).order_by('ordering')
# return {'slides': slides}
# @register.inclusion_tag('menu/main_menu.html')
# def main_menu():
# menu = Menu.objects.get(pk=1)
# items = MenuItem.objects.filter(menu=menu, published=1).order_by('ordering')
# return {'items': items}
# @register.inclusion_tag('comments/comments.html')
# def comments(paket, item_model, item_id):
# from comments.models import Comments
# nodes = Comments.objects.filter(paket=paket, item_model=item_model,item_id=item_id, published=1)
# return {'nodes':nodes, 'paket':paket, 'item_model':item_model, 'item_id':item_id}
# @register.filter(name='suit_conf')
# def suit_conf(name):
# value = get_config(name)
# return mark_safe(value) if isinstance(value, str) else value
# @register.tag
# def suit_date(parser, token):
# return NowNode(get_config('HEADER_DATE_FORMAT'))
# @register.tag
# def suit_time(parser, token):
# return NowNode(get_config('HEADER_TIME_FORMAT'))
# @register.filter
# def field_contents_foreign_linked(admin_field):
# """Return the .contents attribute of the admin_field, and if it
# is a foreign key, wrap it in a link to the admin page for that
# object.
# Use by replacing '{{ field.contents }}' in an admin template (e.g.
# fieldset.html) with '{{ field|field_contents_foreign_linked }}'.
# """
# fieldname = admin_field.field['field']
# displayed = admin_field.contents()
# obj = admin_field.form.instance
# if not hasattr(admin_field.model_admin,
# 'linked_readonly_fields') or fieldname not in admin_field \
# .model_admin \
# .linked_readonly_fields:
# return displayed
# try:
# fieldtype, attr, value = lookup_field(fieldname, obj,
# admin_field.model_admin)
# except ObjectDoesNotExist:
# fieldtype = None
# if isinstance(fieldtype, ForeignKey):
# try:
# url = admin_url(value)
# except NoReverseMatch:
# url = None
# if url:
# displayed = "<a href='%s'>%s</a>" % (url, displayed)
# return mark_safe(displayed)
# @register.filter
# def admin_url(obj):
# info = (obj._meta.app_label, obj._meta.module_name)
# return reverse("admin:%s_%s_change" % info, args=[obj.pk])
# @register.simple_tag
# def suit_bc(*args):
# return utils.value_by_version(args)
# @register.assignment_tag
# def suit_bc_value(*args):
# return utils.value_by_version(args)
|
vecnet/vnetsource | lib/views/error_views.py | Python | mpl-2.0 | 3,043 | 0.008544 | from django import http
from django.shortcuts import render, redirect
from django.template import (loader, TemplateDoesNotExist)
from django.views.decorators.csrf import requires_csrf_token
from django.core.urlresolvers import reverse
from lib.templatetags.base_extras import set_notification
import smtplib
from datetime import datetime
from django.conf import settings
import json
@requires_csrf_token
def view_server_error(request, template_name='500.html'):
"""
500 error handler.
Templates: :template:`500.html` by default, others expected: '403.html','404.html'
Context: None
"""
try:
| loader.get_template(template_name)
except TemplateDoesNotExist:
return http.HttpResponseServerError('<h1>Server Error - Template Not Found</h1>')
context = {'STATIC_URL': '/static/'}
if template_name == "500.html":
status = 500
if template_name == "403.html":
status = 403
| if template_name == "404.html":
status = 404
return render(request, template_name, context, status = status)
@requires_csrf_token
def error_submit(request):
try:
sender_email = settings.SERVER_EMAIL
server = smtplib.SMTP(settings.EMAIL_HOST, settings.EMAIL_PORT)
if getattr(settings, 'EMAIL_USE_TLS', True):
server.starttls()
to = ''
for recipient in settings.ADMINS:
to += recipient[1] + ','
header = 'To:' + to + '\n' + 'From: ' + sender_email + '\n' + 'Subject:VecNet Server Error (' + request.POST['error_code'] + ') report from ' + request.POST['name'] + '\n\n'
msg = header + 'Server Error Form Results: \n\n'
msg = msg + 'Error Type: ' + request.POST['error_code'] + '\n'
msg = msg + 'Error Reported By: ' + request.POST['name'] + '\n'
msg = msg + 'Reporter\'s Email: ' + request.POST['email'] + '\n\n'
msg = msg + 'Description of problem: ' + request.POST['rant'] + '\n\n'
msg = msg + 'Report timestamp: ' + str(datetime.today()) + '\n\n'
try:
msg = msg + 'Username: ' + request.user.username + '\n\n'
except:
pass
try:
msg = msg + "User agent: " + request.META['HTTP_USER_AGENT'] + "\n\n"
except:
pass
try:
msg = msg + "URL: " + request.path_info + "?" + request.META['QUERY_STRING'] + "\n\n"
except:
pass
try:
msg = msg + 'Session data: ' + "%s" % request.session.load() + '\n\n'
except:
pass
server.sendmail(sender_email, to, msg)
server.close()
set_notification('alert-success', '<strong>Thank you for your submission!</strong> We will resolve this issue as soon as possible.', request.session)
except:
set_notification('alert-error', '<strong>Error!</strong> Your response wasn\'t submitted. Please contact us directly at support@vecnet.org for further assistance.', request.session)
return redirect(reverse('index')) |
pawelkalinowski/project-python | Fibonacci n-th number (iterative).py | Python | mit | 1,745 | 0.005731 | """
The MIT License (MIT)
Copyright (c) 2013 Pawel Kalinowski
Permission is hereby granted, free of charge, to | any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including | without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
def fibonacci (number): # defining fibonacci function with parameter
fibo = [0,1] # creating two first elements of fibo list
i = 2 # starting at the 3rd number in Fibonacci sequence
while i <= number: # loop; checking the iteration count
fibo.append(fibo[i-1]+fibo[i-2]) # computing & adding new fibonacci numbers to already created fibo list
i = i + 1 # incrementing iteration count
return fibo[number] # returning result
user_input = input("Enter the n-th number of Fibonacci sequence you would like to learn") # user input; no validation
print fibonacci(user_input) # printing result
|
spectralDNS/spectralDNS | demo/Vortices2D.py | Python | lgpl-3.0 | 2,458 | 0.004475 | """
2D test case with three vortices
"""
from numpy import zeros, exp, pi, loadtxt, allclose, where
import matplotlib.pyplot as plt
from shenfun import Array
from spectralDNS import config, get_solver, solve
def initialize(U, X, U_hat, K_over_K2, T, **context):
w = exp(-((X[0]-pi)**2+(X[1]-pi+pi/4)**2)/(0.2)) \
+ exp(-((X[0]-pi)**2+(X[1]-pi-pi/4)**2)/(0.2)) \
- 0.5*exp(-((X[0]-pi-pi/4)**2+(X[1]-pi-pi/4)**2)/(0.4))
w_hat = U_hat[0].copy()
w_hat = T.forward(w, w_hat)
U[0] = T.backward(1j*K_over_K2[1]*w_hat, U[0])
U[1] = T.backward(-1j*K_over_K2[0]*w_hat, U[1])
U_hat = U.forward(U_hat)
def regression_test(context):
if config.solver.num_processes > 1:
return
U_ref = loadtx | t('vortices.txt')
U = context.U_hat.backward(context.U)
assert allclose(U[0], U_ref)
im = None
def update(context):
global im
params = config.params
solver = config.solver
# initialize plot
if params.tstep == 1:
| im = plt.imshow(zeros((params.N[0], params.N[1])))
plt.colorbar(im)
plt.draw()
if params.tstep % params.plot_result == 0 and params.plot_result > 0:
curl = solver.get_curl(**context)
im.set_data(curl[:, :])
im.autoscale()
plt.pause(1e-6)
if __name__ == '__main__':
config.update(
{'nu': 0.001,
'dt': 0.005,
'T': 50,
'write_result': 100,
'M': [6, 6]}, 'doublyperiodic')
config.doublyperiodic.add_argument('--plot_result', type=int, default=10) # required to allow overloading through commandline
solver = get_solver(update=update, regression_test=regression_test, mesh='doublyperiodic')
assert config.params.solver == 'NS2D'
context = solver.get_context()
context.stream = Array(context.T)
context.hdf5file.results['data'].update({'curl': [context.curl],
'stream': [context.stream]})
def update_components(**context):
"""Overload default because we want to store the curl as well"""
solver.get_velocity(**context)
solver.get_pressure(**context)
solver.get_curl(**context)
K2 = context['K2']
context['stream'] = context['T'].backward(-context['W_hat']/where(K2 == 0, 1, K2), context['stream'])
context.hdf5file.update_components = update_components
initialize(**context)
context.hdf5file.filename = "NS2D_stream"
solve(solver, context)
|
PearsonIOKI/compose-forum | askbot/models/signals.py | Python | gpl-3.0 | 3,977 | 0.004023 | """Custom django signals defined for the askbot forum application.
"""
import django.dispatch
from django.db.models.signals import pre_save, post_save, pre_delete, post_delete, post_syncdb
try:
from django.db.models.signals import m2m_changed
except ImportError:
pass
tags_updated = django.dispatch.Signal(
providing_args=['tags', 'user', 'timestamp']
)
#todo: this one seems to be unused
edit_question_or_answer = django.dispatch.Signal(
providing_args=['instance', 'modified_by']
)
delete_question_or_answer = django.dispatch.Signal(
providing_args=['instance', 'deleted_by']
)
flag_offensive = django.dispatch.Signal(providing_args=['instance', 'mark_by'])
remove_flag_offensive = django.dispatch.Signal(providing_args=['instance', 'mark_by'])
user_updated = django.dispatch.Signal(providing_args=['instance', 'updated_by'])
user_registered = django.dispatch.Signal(providing_args=['user',])
#todo: move this to authentication app
user_logged_in = django.dispatch.Signal(providing_args=['session'])
new_answer_posted = django.dispatch.Signal(
providing_args=['answer', 'user', 'form_data']
)
new_question_posted = django.dispatch.Signal(
providing_args=['question', 'user', 'form_data']
)
new_comment_posted = django.dispatch.Signal(
providing_args=['comment', 'user', 'form_data']
)
answer_edited = django.dispatch.Signal(
providing_args=['answer', | 'user', 'form_data']
)
post_updated = django.dispatch.Signal(
providing_args=[
'post',
| 'updated_by',
'newly_mentioned_users'
]
)
post_revision_published = django.dispatch.Signal(
providing_args = [
'revision',
'was_approved'
]
)
site_visited = django.dispatch.Signal(providing_args=['user', 'timestamp'])
def pop_signal_receivers(signal):
"""disables a given signal by removing listener functions
and returns the list
"""
receivers = signal.receivers
signal.receivers = list()
return receivers
def set_signal_receivers(signal, receiver_list):
"""assigns a value of the receiver_list
to the signal receivers
"""
signal.receivers = receiver_list
def pop_all_db_signal_receivers():
"""loops through all relevant signals
pops their receivers and returns a
dictionary where signals are keys
and lists of receivers are values
"""
#this is the only askbot signal that is not defined here
#must use this to avoid a circular import
from askbot.models.badges import award_badges_signal
signals = (
#askbot signals
tags_updated,
edit_question_or_answer,
delete_question_or_answer,
flag_offensive,
remove_flag_offensive,
user_updated,
user_logged_in,
user_registered,
post_updated,
award_badges_signal,
#django signals
pre_save,
post_save,
pre_delete,
post_delete,
post_syncdb,
)
if 'm2m_changed' in globals():
signals += (m2m_changed, )
receiver_data = dict()
for signal in signals:
receiver_data[signal] = pop_signal_receivers(signal)
return receiver_data
def set_all_db_signal_receivers(receiver_data):
"""takes receiver data as an argument
where the argument is as returned by the
pop_all_db_signal_receivers() call
and sets the receivers back to the signals
"""
for (signal, receivers) in receiver_data.items():
signal.receivers = receivers
|
toopy/django-toopy-website | src/toopy/admin/admin_.py | Python | mit | 638 | 0.001567 | from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
def _date(obj):
return obj.date.strftime('%Y-%m-%d %H:%M:%S')
_date.short_description = _(u'Date')
class AdminContact(admin.ModelAdmin):
fields = (
'email',
'subject',
'message',
'website',
)
list_filter = (
)
list_display = ( |
'email',
'subject',
'website',
_date,
)
search_fields = (
'email',
'subject',
| 'message',
'website',
)
|
brahul90/cheapflights | cheapflights/main.py | Python | apache-2.0 | 1,721 | 0.024404 | ## Adding ./getresults to the Python path so that the modules in folder can be imported
import sys
sys.path.insert(0, './getresults')
import datetime
from flightsearch import flightsearch, flightresult
import os
import uuid
import time
from pprint import pprint
def main():
flyfrom = 'YYZ' #input("Enter departure city or airport code, e.g. Toronto or YYZ:\n")
datefrom = '2017-04-26' #input("Enter departure date and time, e.g. 2017-03-31 12:00:\n")
flyto = 'LHR' #input("Enter arrival city or airport code, e.g. London or LHR:\n")
dateto = '2017-05-26' #input("Enter arrival date and time, e.g. 2017-03-31 20:00:\n")
searchuuid = uuid.uuid4()
searchbegintime = time.time()
search = flightsearch(searchuuid = searchuuid, searchbegintime = searchbegintime, flyfrom = flyfrom,
datefrom = datefrom, flyto = flyto, dateto = dateto)
results = aggregatedflights(search)
search.searchendtime = time.time()
for key, value in results.items():
for item in value:
| pprint(vars(item))
## This function aggegates the various results obtained from the modules in the ./getresults folder
def aggregatedflights(flightsearch):
getresultsdir = './getresults'
resultdict = {}
for filename in os.listdir(getresultsdir):
if filename.startswith("get") and filename.endswith(".py"):
modulename = filename.split('.')[0]
mod = | __import__(modulename)
resultdict[modulename] = mod.getresult(flightsearch)
else:
continue
return sortbyprice(resultdict)
def sortbyprice(flightresult):
## Coming soon
return flightresult
if __name__ == '__main__':
main()
|
RulersOfAsgard/ALAMO-scheduler | alamo_scheduler/aioweb.py | Python | apache-2.0 | 3,647 | 0 | # -*- coding: utf-8 -*-
import asyncio
import logging
from alamo_scheduler.conf import settings
from pip import get_installed_distributions
from aiohttp.client import ClientSession
from aiohttp.web import Application, Response, json_response
logger = logging.getLogger(__name__)
__all__ = ['ClientSession', 'json_response', 'Response',
'SchedulerServerApplication']
class SchedulerServerApplication(object):
_app = None
_srv = _handler = None
def init(self, loop=None):
loop = loop or self.app.loop
backlog = 128
self.configure_ | contract()
self._handler = self.app.make_handler()
self._server = loop.create_server(
self._handler, settings.SERVER_HOST,
settings.SERVER_PORT, backlog=backlog
)
self._srv, self._startup_res = loop.run_until_complete(
asyncio.gather(self._server, self.app.startup(), loop=loop)
)
logger.info('Server | started at http://{}:{}'.format(
settings.SERVER_HOST, settings.SERVER_PORT
))
def finish_connections(self):
loop = self.app.loop
self._srv.close()
loop.run_until_complete(self._srv.wait_closed())
loop.run_until_complete(self.app.shutdown())
loop.run_until_complete(self._handler.finish_connections(60))
loop.run_until_complete(self.app.cleanup())
self._app = None
@property
def app(self):
if self._app is None:
self.setup()
return self._app
def setup(self):
if self._app is None:
self._app = Application(loop=asyncio.get_event_loop())
async def ping(self, request):
return Response(body=b'pong')
async def dependencies(self, request):
installed_packages = get_installed_distributions(local_only=True)
dependencies = [dict(name=i.key, version=i.version)
for i in installed_packages]
return json_response(data=dict(dependencies=dependencies))
async def info(self, request):
package = 'alamo-scheduler'
version = next(iter([p.version for p in
get_installed_distributions(local_only=True)
if p.key == package]), '<unknown>')
return json_response(data=dict(title=package, version=version))
async def endpoints(self, request):
endpoints = []
router = request.app.router
for resource in router.resources():
path = getattr(resource, '_path', None)
if path is None:
path = getattr(resource, '_formatter')
routes = resource._routes
if '/status/' in path:
continue
for route in routes:
endpoints.append(dict(
path=path, method=route.method,
accept='application/json', tags=[]
))
return json_response(data=dict(endpoints=endpoints))
def add_route(self, *args, **kwargs):
"""Add route to server."""
self.app.router.add_route(*args, **kwargs)
def add_get(self, *args, **kwargs):
self.app.router.add_get(*args, **kwargs)
def add_post(self, *args, **kwargs):
self.app.router.add_post(*args, **kwargs)
def configure_contract(self):
self.add_route('GET', '/status/dependencies', self.dependencies)
self.add_route('GET', '/status/ping', self.ping)
self.add_route('GET', '/status/info', self.info)
self.add_route('GET', '/status/public-endpoints', self.endpoints)
self.add_route('GET', '/', self.endpoints)
|
jaety/image-river | indexer/logutil.py | Python | apache-2.0 | 584 | 0.006849 | imp | ort logging as log
import time
log.basicConfig(level=log.INFO)
warning = log.warning
info = log.info
def timed(show_args = True):
def timed_decorator(f):
def _timed(*args, **kw):
ts = time.time()
result = f(*args, **kw)
te = time.time()
if show_args:
log.info('func:%r args: [%r, %r] took: %2.4f sec' % (f.__name__, args, kw, te-ts))
else:
| log.info('func:%r took: %2.4f sec' % (f.__name__, te-ts))
return result
return _timed
return timed_decorator
|
ryota-sugimoto/hackerrank | dynamic_programming/grid_walking/tmp.py | Python | gpl-2.0 | 1,371 | 0.05981 | #!/usr/bin/env python
def n_ways(x, d, M):
memo = {}
def | loop(x,M):
if memo.has_key((x,M)):
return memo[(x,M)]
if M == 0:
return 1
| if x == d and x == 1:
return 0
if x == 1:
memo[(x,M)] = loop(x+1, M-1)
elif x == d:
memo[(x,M)] = loop(x-1, M-1)
else:
memo[(x,M)] = loop(x-1, M-1) + loop(x+1, M-1)
return memo[(x,M)]
return loop(x,M)
from math import factorial
def gen_ncr():
d = {}
def ncr(n,r):
if d.has_key((n,r)):
return d[(n,r)]
else:
d[(n,r)] = factorial(n)/(factorial(r)*factorial(n-r))
return d[(n,r)]
return ncr
def main(x, D, M):
N = len(D)
n_way_memo = {}
for i,(xi,di) in enumerate(zip(x,D)):
for k in range(M+1):
n_way_memo[(i,k)] = n_ways(xi,di,k)
print "memo done"
ncr = gen_ncr()
memo = {}
def loop(i, n):
if n == 0:
return 1
if i == N-1:
return n_way_memo[(i,n)]
if memo.has_key((i,n)):
return memo[(i,n)]
l = []
for k in range(n+1):
l.append(ncr(n,k) * n_way_memo[(i,k)] * loop(i+1,n-k))
memo[(i,n)] = sum(l)
return memo[(i,n)]
return loop(0,M)
import sys
T = int(sys.stdin.next())
for i in range(T):
N,M = map(int,sys.stdin.next().split())
x = map(int,sys.stdin.next().split())
D = map(int,sys.stdin.next().split())
print main(x,D,M)%1000000007
|
Guerillero/ArcExtractor | Extractor.py | Python | mit | 2,156 | 0.001855 | ##################################################################
# Extract a dbtable for every instance of a value in a geo-file #
# Written by Thomas Fish #
# Released under the MIT License #
# #
# FOR ARCMAP 10.X ONLY #
##################################################################
__Version__ = "1.0.0"
import arcpy
import FileTransformations
# declare map
mxd = arcpy.mapping.MapDocument("CURRENT")
# Import data from ArcMap
fileToConvert = arcpy.GetParameterAsText(0) # shapefile or GeoDatabase
listFieldValues = arcpy.GetParameterAsText(1) # text file
outLocation = arcpy.GetParameterAsText(2) # folder
fieldName = arcpy.GetParameterAsText(3) # text field
spreadsheetType = arcpy.GetParameterAsText(4) # text field -- Valid responses are dbf or csv
# translate file into layer
lyr = arcpy.mapping.Layer(fileToConvert)
lyr.name = "MyFile"
# declare arrays for this script
fieldValues = []
fileName = []
filePath = []
# declare the file to open
fin = open(listFieldValues)
# tell end user if there is an error
if fin.closed:
arcpy.AddMessage("ERROR: File failed to open")
for line in fin:
# Turn each line in the file into an entry to the array
lineClear = str.strip(line)
fieldValues.append(lineClear)
# make file names from UIDs
for i in range(len(fieldValues)):
fileName.append(fieldValues[i] + ".dbf")
for y in range(len( | fileName)):
filePath.append(outLocation + "\\" + fileName[y])
for z in range(len(fieldValues)):
# Extract out only the entries that are wanted
arcpy.AddMessage("Currently extracting " + fieldValues[z] + " in " + fieldName)
if lyr.name == "MyFile | ":
lyr.definitionQuery = fieldName + " =" + "'" + fieldValues[z] + "'"
arcpy.CopyRows_management(lyr, filePath[z])
fin.close()
if spreadsheetType == "csv":
FileTransformations.dbf2csv(outLocation, fieldValues, filePath)
elif spreadsheetType == "tsv":
FileTransformations.dbf2tsv(outLocation, fieldValues, filePath)
|
ergonomica/ergonomica | ergonomica/lib/lib/ergo_del.py | Python | gpl-2.0 | 481 | 0.008316 | #!/ | usr/bin/env python
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
"""
[lib/lib/ergo_del.py]
Defines the "del" command.
"""
from ergonomica import ErgonomicaError
def _del(argc):
"""del: Delete variables.
Usage:
del [<variable>VARS...]
"""
for var in argc.args['VARS']:
try:
del argc.ns[var]
except KeyError:
raise ErgonomicaError("[ergo: del]: No such variable '{}'.".format(var))
exports = {'del': _ | del}
|
dkopecek/amplify | third-party/quex-0.65.2/quex/engine/generator/base.py | Python | gpl-2.0 | 9,154 | 0.010269 | # (C) Frank Rene Schaefer
import quex.engine.generator.state_machine_coder as state_machine_coder
import quex.engine.generator.state_router as state_router_generator
from quex.engine.generator.languages.variable_db import variable_db
from quex.engine.analyzer.door_id_address_label import DoorID, \
dial_db
import quex.engine.generator.reload_state as reload_state_coder
import quex.engine.analyzer.engine_supply_factory as engine
from quex.engine.analyzer.terminal.core import Terminal
import quex.engine.analyzer.core as analyzer_generator
from quex.engine.tools import typed
from quex.blackboard import E_IncidenceIDs, \
setup as Setup, \
Lng
# MAIN: sm --> analyzer
# sm_txt --> code_analyzer
# terminal_txt --> code_terminals
#
# PRE_SM: pre_sm --> analyzer
# pre_sm_txt --> code_analyzer
# terminal = begin of core
#
# BIPD_SM-s: bipd_sm -> analyzer
# bipd_sm_txt -> code_analyzer
# termina = terminal for which BIPD operated
#
# COUNT_SM: count_db --> count_sm
# analyzer = get_analyzer
# modify analyzer
# terminal = exit_door_id
#
# SKIPER_SM:
#
# RANGE_SKIPPER_SM:
#
# NESTER_RANGE_SKIPPER_SM:
#
# INDENTATION_DETECTOR_SM:
def do_main(SM):
"""Main pattern matching state machine (forward).
---------------------------------------------------------------------------
Micro actions are: line/column number counting, position set/reset,
last acceptance setting/reset, lexeme start pointer set/reset, setting
terminating zero for lexeme/reset, setting last character.
DropOut --> FAILURE
BLC --> ReloadStateForward
EndOfStream --> END_OF_STREAM
Variables (potentially) required:
position, PositionRegisterN, last_acceptance, input.
"""
txt, analyzer = do_state_machine(SM, engine.Class_FORWARD())
if analyzer.last_acceptance_variable_required():
variable_db.require("last_acceptance")
return txt, analyzer
def do_pre_context(SM, PreContextSmIdList):
"""Pre-context detecting state machine (backward).
---------------------------------------------------------------------------
Micro actions are: pre-context fullfilled_f
DropOut --> Begin of 'main' state machine.
BLC --> ReloadStateBackward
EndOfStream --> 'error'
Variables (potentially) required:
pre_context_fulfilled_f[N] --> Array of flags for pre-context
indication.
RETURNS: [0] generated code text
[1] reload state BACKWARD, to be generated later.
"""
if SM is None:
return [], None
txt, analyzer = do_state_machine(SM, engine.BACKWARD_PRE_CONTEXT)
txt.append("\n%s:" % dial_db.get_label_by_door_id(DoorID.global_end_of_pre_context_check()))
# -- set the input stream back to the real current position.
# during backward lexing the analyzer went backwards, so it needs to be reset.
txt.append(" %s\n" % Lng.INPUT_P_TO_LEXEME_START())
for sm_id in PreContextSmIdList:
variable_db.require("pre_context_%i_fulfilled_f", Index = sm_id)
variable_db.require("input")
return txt, analyzer
def do_backward_input_position_detectors(BipdDb):
"""RETURNS: [0] Code for BIPD analyzer
[1] map: acceptance_id --> DoorID of entry into BIPD
The 'bipd_entry_door_id_db' is used by 'do_main()' later.
"""
result = []
for incidence_id, bipd_sm in BipdDb.iteritems():
txt, analyzer = do_state_machine(bipd_sm,
engine.Class_BACKWARD_INPUT_POSITION(incidence_id))
result.extend(txt)
return result
def do_reload_procedure(TheAnalyzer):
"""Lazy (delayed) code generation of the forward and backward reloaders.
Any state who needs reload may 'register' in a reloader. This registering may
happen after the code generation of forward or backward state machine.
"""
# Variables that tell where to go after reload success and reload failure
if TheAnalyzer is None: return []
elif not TheAnalyzer.engine_type.subject_to_reload(): return []
elif TheAnalyzer.reload_state is None: return []
elif TheAnalyzer.reload_state_extern_f: return []
variable_db.require("target_state_else_index") # upon reload failure
variable_db.require("target_state_index") # upon reload success
require_position_registers(TheAnalyzer)
return reload_state_coder.do(TheAnalyzer.reload_state)
def require_position_registers(TheAnalyzer):
"""Require an array to store input positions. This has later to be
implemented as 'variables'. Position registers are exclusively used
for post-context restore. No other engine than FORWARD would require
those.
"""
if not TheAnalyzer.engine_type.is_FORWARD():
return
if TheAnalyzer.position_register_map is None:
position_register_n = 0
else:
position_register_n = len(set(TheAnalyzer.position_register_map.itervalues()))
if position_register_n != 0:
initial_array = "{ " + ("0, " * (position_register_n - 1) + "0") + "}"
else:
# Implement a dummy array (except that there is no reload)
if Setup.buffer_based_analyzis_f: return
initial_array = "(void*)0"
variable_db.require_array("position", ElementN = position_register_n,
Initial = initial_array)
variable_db.require("PositionRegisterN",
Initial = "(size_t)%i" % position_register_n)
def do_state_router():
routed_address_set = dial_db.routed_addre | ss_set()
# If there is only one address subject to state routing, then the
# state router needs to be implemented.
#if len(routed_address_set) == 0:
# return []
# Add the address of 'terminal_end_of_file()' if it is not there, already.
# (It should not be there, if we are working on a fixed chunk, as in 'counting'.
# When counti | ng is webbed into analysis:: assert address_eof in routed_address_set)
if False:
address_eof = dial_db.get_address_by_door_id(DoorID.incidence(E_IncidenceIDs.END_OF_STREAM))
routed_address_set.add(address_eof)
dial_db.mark_label_as_gotoed(dial_db.get_label_by_address(address_eof))
routed_state_info_list = state_router_generator.get_info(routed_address_set)
return state_router_generator.do(routed_state_info_list)
def do_variable_definitions():
# Following function refers to the global 'variable_db'
return Lng.VARIABLE_DEFINITIONS(variable_db)
def do_state_machine(sm, EngineType):
"""Generates code for state machine 'sm' and the 'EngineType'.
RETURNS: list of strings
"""
assert len(sm.get_orphaned_state_index_list()) == 0
txt = []
# -- [optional] comment state machine transitions
if Setup.comment_state_machine_f:
Lng.COMMENT_STATE_MACHINE(txt, sm)
# -- Analyze state machine --> optimized version
analyzer = analyzer_generator.do(sm, EngineType)
# -- Generate code for analyzer
txt.extend(
do_analyzer(analyzer)
)
return txt, analyzer
def do_analyzer(analyzer):
state_machine_code = state_machine_coder.do(analyzer)
Lng.REPLACE_INDENT(state_machine_code)
# Variable to store the current input
variable_db.require("input")
return state_machine_code
@typed(TerminalList=[Terminal])
def do_terminals(TerminalList, TheAnalyzer):
return Lng.TERMINAL_CODE(TerminalList, TheAnalyzer)
def do_reentry_preparation(PreContextSmIdList, OnAfterMatchCode):
return Lng.REENTRY_PREPARATION(PreContextSmIdList, OnAfterMatchCode)
_increment_actions_for_utf8 = [
1, "if ( ((*iterator) & 0x80) == 0 ) { itera |
FabriceSalvaire/PyValentina | Patro/GeometryEngine/Rectangle.py | Python | gpl-3.0 | 3,505 | 0.002282 | ####################################################################################################
#
# Patro - A Python library to make patterns for fashion design
# Copyright (C) 2017 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
####################################################################################################
"""Module to implement rectangle.
"""
####################################################################################################
__all__ = ['Rectangle2D']
####################################################################################################
import math
from .Path import Path2D
from .Primitive import Primitive2P, ClosedPrimitiveMixin, PathMixin, PolygonMixin, Primitive2DMixin
from .Segment import Segment2D
####################################################################################################
class Rectangle2D(Primitive2DMixin, ClosedPrimitiveMixin, PathMixin, PolygonMixin, Primitive2P):
"""Class to implements 2D Rectangle."""
##############################################
def __init__(self, p0, p1):
# if p1 == p0:
# raise ValueError('Rectangle reduced to a point')
Primitive2P.__init__(self, p0, p1)
##############################################
@classmethod
def from_point_and_offset(self, p0, v):
return cls(p0, p0+v)
@classmethod
def from_point_and_radius(self, p0, v):
return cls(p0-v, p0+v)
##############################################
@property
def is_closed(self):
return True
##############################################
@property
def p01(self):
return self.__vector_cls__(self._p0.x, self._p1.y)
@property
def p10(self):
return self.__vector_cls__(self._p1.x, self._p0.y)
@property
def edges(self):
p0 = self._p0
p1 = self.p01
p2 = self._p1
p3 = self.p10
return (
| Segment2D(p0, p1),
Segment2D(p1, p2),
Segment2D(p2, p3),
Segment2D(p3, p0),
| )
##############################################
@property
def diagonal(self):
return self._p1 - self._p0
##############################################
@property
def perimeter(self):
d = self.diagonal
return 2*(abs(d.x) + abs(d.y))
##############################################
@property
def area(self):
d = self.diagonal
return abs(d.x * d.y)
##############################################
def is_point_inside(self, point):
bounding_box = self.bounding_box
return (point.x in bounding_box.x and
point.y in bounding_box.y)
##############################################
def distance_to_point(self, point):
raise NotImplementedError
|
manhhomienbienthuy/scikit-learn | sklearn/datasets/tests/test_openml.py | Python | bsd-3-clause | 53,604 | 0.000597 | """Test the openml loader.
"""
import gzip
import warnings
import json
import os
import re
from importlib import resources
from io import BytesIO
import numpy as np
import scipy.sparse
import sklearn
import pytest
from sklearn import config_context
from sklearn.datasets import fetch_openml as fetch_openml_orig
from sklearn.datasets._openml import (
_open_openml_url,
_arff,
_DATA_FILE,
_OPENML_PREFIX,
_get_data_description_by_id,
_get_local_path,
_retry_with_clean_cache,
)
from sklearn.datasets._arff_parser import (
_convert_arff_data,
_convert_arff_data_dataframe,
_feature_to_dtype,
)
from sklearn.utils import is_scalar_nan
from sklearn.utils._testing import assert_allclose, assert_array_equal
from urllib.error import HTTPError
from sklearn.datasets.tests.test_common import check_return_X_y
from sklearn.externals._arff import ArffContainerType
from functools import partial
from sklearn.utils._testing import fails_if_pypy
OPENML_TEST_DATA_MODULE = "sklearn.datasets.tests.data.openml"
# if True, urlopen will be monkey patched to only use local files
test_offline = True
# Do not use a cache for `fetch_openml` to avoid concurrent writing
# issues with `pytest-xdist`.
# Furthermore sklearn/datasets/tests/data/openml/ is not always consistent
# with the version on openml.org. If one were to load the dataset outside of
# the tests, it may result in data that does not represent openml.org.
fetch_openml = partial(fetch_openml_orig, data_home=None)
def _test_features_list(data_id):
# XXX Test is intended to verify/ensure correct decoding behavior
# Not usable with sparse data or datasets that have columns marked as
# {row_identifier, ignore}
def decode_column(data_bunch, col_idx):
col_name = data_bunch.feature_names[col_idx]
if col_name in data_bunch.categories:
# XXX: This would be faster with np.take, although it does not
# handle missing values fast (also not with mode='wrap')
cat = data_bunch.categ | ories[col_name]
result = [
None if is_scalar_nan(idx) else cat[int(idx)]
| for idx in data_bunch.data[:, col_idx]
]
return np.array(result, dtype="O")
else:
# non-nominal attribute
return data_bunch.data[:, col_idx]
data_bunch = fetch_openml(
data_id=data_id, cache=False, target_column=None, as_frame=False
)
# also obtain decoded arff
data_description = _get_data_description_by_id(data_id, None)
sparse = data_description["format"].lower() == "sparse_arff"
if sparse is True:
raise ValueError(
"This test is not intended for sparse data, to keep code relatively simple"
)
url = _DATA_FILE.format(data_description["file_id"])
with _open_openml_url(url, data_home=None) as f:
data_arff = _arff.load(
(line.decode("utf-8") for line in f),
return_type=(_arff.COO if sparse else _arff.DENSE_GEN),
encode_nominal=False,
)
data_downloaded = np.array(list(data_arff["data"]), dtype="O")
for i in range(len(data_bunch.feature_names)):
# XXX: Test per column, as this makes it easier to avoid problems with
# missing values
np.testing.assert_array_equal(
data_downloaded[:, i], decode_column(data_bunch, i)
)
def _fetch_dataset_from_openml(
data_id,
data_name,
data_version,
target_column,
expected_observations,
expected_features,
expected_missing,
expected_data_dtype,
expected_target_dtype,
expect_sparse,
compare_default_target,
):
# fetches a dataset in three various ways from OpenML, using the
# fetch_openml function, and does various checks on the validity of the
# result. Note that this function can be mocked (by invoking
# _monkey_patch_webbased_functions before invoking this function)
data_by_name_id = fetch_openml(
name=data_name, version=data_version, cache=False, as_frame=False
)
assert int(data_by_name_id.details["id"]) == data_id
# Please note that cache=False is crucial, as the monkey patched files are
# not consistent with reality
with warnings.catch_warnings():
# See discussion in PR #19373
# Catching UserWarnings about multiple versions of dataset
warnings.simplefilter("ignore", category=UserWarning)
fetch_openml(name=data_name, cache=False, as_frame=False)
# without specifying the version, there is no guarantee that the data id
# will be the same
# fetch with dataset id
data_by_id = fetch_openml(
data_id=data_id, cache=False, target_column=target_column, as_frame=False
)
assert data_by_id.details["name"] == data_name
assert data_by_id.data.shape == (expected_observations, expected_features)
if isinstance(target_column, str):
# single target, so target is vector
assert data_by_id.target.shape == (expected_observations,)
assert data_by_id.target_names == [target_column]
elif isinstance(target_column, list):
# multi target, so target is array
assert data_by_id.target.shape == (expected_observations, len(target_column))
assert data_by_id.target_names == target_column
assert data_by_id.data.dtype == expected_data_dtype
assert data_by_id.target.dtype == expected_target_dtype
assert len(data_by_id.feature_names) == expected_features
for feature in data_by_id.feature_names:
assert isinstance(feature, str)
# TODO: pass in a list of expected nominal features
for feature, categories in data_by_id.categories.items():
feature_idx = data_by_id.feature_names.index(feature)
# TODO: Remove when https://github.com/numpy/numpy/issues/19300 gets fixed
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=DeprecationWarning,
message="elementwise comparison failed",
)
values = np.unique(data_by_id.data[:, feature_idx])
values = values[np.isfinite(values)]
assert set(values) <= set(range(len(categories)))
if compare_default_target:
# check whether the data by id and data by id target are equal
data_by_id_default = fetch_openml(data_id=data_id, cache=False, as_frame=False)
np.testing.assert_allclose(data_by_id.data, data_by_id_default.data)
if data_by_id.target.dtype == np.float64:
np.testing.assert_allclose(data_by_id.target, data_by_id_default.target)
else:
assert np.array_equal(data_by_id.target, data_by_id_default.target)
if expect_sparse:
assert isinstance(data_by_id.data, scipy.sparse.csr_matrix)
else:
assert isinstance(data_by_id.data, np.ndarray)
# np.isnan doesn't work on CSR matrix
assert np.count_nonzero(np.isnan(data_by_id.data)) == expected_missing
# test return_X_y option
fetch_func = partial(
fetch_openml,
data_id=data_id,
cache=False,
target_column=target_column,
as_frame=False,
)
check_return_X_y(data_by_id, fetch_func)
return data_by_id
class _MockHTTPResponse:
def __init__(self, data, is_gzip):
self.data = data
self.is_gzip = is_gzip
def read(self, amt=-1):
return self.data.read(amt)
def close(self):
self.data.close()
def info(self):
if self.is_gzip:
return {"Content-Encoding": "gzip"}
return {}
def __iter__(self):
return iter(self.data)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
def _monkey_patch_webbased_functions(context, data_id, gzip_response):
# monkey patches the urlopen function. Important note: Do NOT use this
# in combination with a regular cache directory, as the files that are
# stored as cache should not be mixed up with real openml datasets
url_prefix_data_description = "https://openml.org/api/v1/json/data/"
|
jhseu/tensorflow | tensorflow/python/debug/lib/debug_events_monitors_test.py | Python | apache-2.0 | 18,786 | 0.005217 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the debug events writer Python class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python.debug.lib import debug_events_monitors
from tensorflow.python.debug.lib import debug_events_reader
from tensorflow.python.debug.lib import dumping_callback
from tensorflow.python.debug.lib import dumping_callback_test_lib
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class TestMonitor(debug_events_monitors.BaseMonitor):
def __init__(self, debug_data_reader):
super(TestMonitor, self).__init__(debug_data_reader)
# Mapping execution index to Execution data objects.
self.executions = dict()
# Mapping graph execution trace index to GraphExecutionTrace data objects.
self.graph_execution_traces = dict()
def on_executio | n(self, execution_index, execution):
if execution_index in self.executions:
raise ValueError("Duplicate execution index: %d" | % execution_index)
self.executions[execution_index] = execution
def on_graph_execution_trace(self, graph_execution_trace_index,
graph_execution_trace):
if graph_execution_trace_index in self.graph_execution_traces:
raise ValueError("Duplicate graph-execution-trace index: %d" %
graph_execution_trace_index)
self.graph_execution_traces[
graph_execution_trace_index] = graph_execution_trace
class DebugEventsMonitorTest(dumping_callback_test_lib.DumpingCallbackTestBase,
parameterized.TestCase):
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("ConciseHealth", "CONCISE_HEALTH"),
("FullTensor", "FULL_TENSOR"),
)
def testOnExecutionIsCalled(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
x = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float32)
y = constant_op.constant([[-1], [1]], dtype=dtypes.float32)
math_ops.matmul(x, y)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
test_monitor = TestMonitor(reader)
reader.update()
self.assertLen(test_monitor.executions, 1)
self.assertEmpty(test_monitor.graph_execution_traces)
execution = test_monitor.executions[0]
self.assertTrue(execution.wall_time)
self.assertEqual(execution.op_type, "MatMul")
self.assertLen(execution.output_tensor_device_ids, 1)
self.assertLen(execution.input_tensor_ids, 2)
self.assertLen(execution.output_tensor_ids, 1)
self.assertEqual(execution.num_outputs, 1)
self.assertEqual(execution.graph_id, "")
if tensor_debug_mode == "NO_TENSOR":
self.assertIsNone(execution.debug_tensor_values)
elif tensor_debug_mode == "CONCISE_HEALTH":
self.assertLen(execution.debug_tensor_values, 1)
# [tensor_id, element_count, neg_inf_count, pos_inf_count, nan_count].
self.assertLen(execution.debug_tensor_values[0], 5)
elif tensor_debug_mode == "FULL_TENSOR":
# Full tensor values are not stored in the debug_tensor_values field.
self.assertIsNone(execution.debug_tensor_values)
self.assertAllClose(
reader.execution_to_tensor_values(execution), [[[1.], [1.]]])
@parameterized.named_parameters(
("ConciseHealth", "CONCISE_HEALTH"),
("FullTensor", "FULL_TENSOR"),
)
def testOnGraphExecutionTraceIsCalled(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def unique_sum(xs):
"""Sum over the unique values, for testing."""
unique_xs, indices = array_ops.unique(xs)
return math_ops.reduce_sum(unique_xs), indices
xs = constant_op.constant([2., 6., 8., 1., 2.], dtype=dtypes.float32)
unique_sum(xs)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
test_monitor = TestMonitor(reader)
reader.update()
self.assertLen(test_monitor.executions, 1)
execution = test_monitor.executions[0]
self.assertTrue(execution.wall_time)
self.assertStartsWith(execution.op_type, "__inference_unique_sum")
self.assertLen(execution.output_tensor_device_ids, 2)
self.assertLen(execution.input_tensor_ids, 1)
self.assertLen(execution.output_tensor_ids, 2)
self.assertEqual(execution.num_outputs, 2)
self.assertTrue(execution.graph_id)
traces = test_monitor.graph_execution_traces
if tensor_debug_mode == "CONCISE_HEALTH":
self.assertLen(traces, 3) # [Placeholder:0, Unique:0 , Sum:0].
self.assertEqual(traces[0].op_type, "Placeholder")
self.assertEqual(traces[0].output_slot, 0)
self.assertEqual(traces[1].op_type, "Unique")
self.assertEqual(traces[1].output_slot, 0)
# Unique:1 is not traced under CONCISE_HEALTH mode, as it's int-dtype.
self.assertEqual(traces[2].op_type, "Sum")
self.assertEqual(traces[2].output_slot, 0)
# [tensor_id, element_count, neg_inf_count, pos_inf_count, nan_count].
self.assertLen(traces[0].debug_tensor_value, 5)
self.assertLen(traces[1].debug_tensor_value, 5)
self.assertLen(traces[2].debug_tensor_value, 5)
elif tensor_debug_mode == "FULL_TENSOR":
self.assertLen(traces, 4) # [Placeholder:0, Unique:0, Unique:1, Sum:0].
self.assertEqual(traces[0].op_type, "Placeholder")
self.assertEqual(traces[0].output_slot, 0)
self.assertIsNone(traces[0].debug_tensor_value)
self.assertAllEqual(
reader.graph_execution_trace_to_tensor_value(traces[0]),
[2., 6., 8., 1., 2.])
self.assertEqual(traces[1].op_type, "Unique")
self.assertEqual(traces[1].output_slot, 0)
self.assertIsNone(traces[1].debug_tensor_value)
self.assertAllEqual(
reader.graph_execution_trace_to_tensor_value(traces[1]),
[2., 6., 8., 1.])
self.assertEqual(traces[2].op_type, "Unique")
self.assertEqual(traces[2].output_slot, 1)
self.assertIsNone(traces[2].debug_tensor_value)
self.assertAllEqual(
reader.graph_execution_trace_to_tensor_value(traces[2]),
[0, 1, 2, 3, 0])
self.assertEqual(traces[3].op_type, "Sum")
self.assertEqual(traces[3].output_slot, 0)
self.assertIsNone(traces[3].debug_tensor_value)
self.assertAllClose(
reader.graph_execution_trace_to_tensor_value(traces[3]), 17.)
class AlertDataObjectsTest(test_util.TensorFlowTestCase):
"""Unit tests for alert-class objects."""
def testInfNanMonitor(self):
alert = debug_events_monitors.InfNanAlert(
1234,
"FooOp",
1,
size=1000, |
pinry/pinry | django_images/models.py | Python | bsd-2-clause | 4,263 | 0 | import hashlib
import os.path
from django.db import models
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.dispatch import receiver
from importlib import import_module
from django.urls import reverse
from . import utils
from .settings import IMAGE_SIZES, IMAGE_PATH, IMAGE_AUTO_DELETE
def hashed_upload_to(instance, filename, **kwargs):
image_type = 'original' if isinstance(instance, Image) else 'thumbnail'
prefix = 'image/%s/by-md5/' % (image_type,)
hasher = hashlib.md5()
for chunk in instance.image.chunks():
hasher.update(chunk)
hash_ = hasher.hexdigest()
base, ext = os.path.splitext(filename)
return '%(prefix)s%(first)s/%(second)s/%(hash)s/%(base)s%(ext)s' % {
'prefix': prefix,
'first': hash_[0],
'second': hash_[1],
'hash': hash_,
'base': base,
'ext': ext,
}
if IMAGE_PATH is None:
upload_to = hashed_upload_to
else:
if callable(IMAGE_PATH):
upload_to = IMAGE_PATH
else:
parts = IMAGE_PATH.split('.')
module_name = '.'.join(parts[:-1])
module = import_module(module_name)
upload_to = getattr(module, parts[-1])
class Image(models.Model):
image = models.ImageField(upload_to=upload_to,
height_field='height', width_field='width',
max_length=255)
height = models.PositiveIntegerField(default=0, editable=False)
width = models.PositiveIntegerField(default=0, editable=False)
def get_by_size(self, size):
return self.thumbnail_set.get(size=size)
def get_absolute_url(self, size=None):
if not size:
return self.image.url
try:
return self.get_by_size(size).image.url
except Thumbnail.DoesNotExist:
return reverse('image-thumbnail', args=(self.id, size))
class ThumbnailManager(models.Manager):
def get_or_create_at_sizes(self, image, sizes):
sizes_to_create = list(sizes)
sized = {}
for size in sizes:
if size not in IMAGE_SIZES:
raise ValueError("Received unknown size: %s" % size)
try:
sized[size] = image.get_by_size(size)
except Thumbnail.DoesNotExist:
pass
else:
sizes_to_create.remove(size)
if sizes_to_create:
bufs = [
utils.write_image_in_memory(img)
for img in utils.scale_and_crop_iter(
image.image,
[IMAGE_SIZES[size] for size in sizes_to_create])
]
for size, buf in zip(sizes_to_create, bufs):
# and save to storage
original_dir, original_file = os.path.split(image.image.name)
thumb_file = InMemoryUploadedFile(buf, "image", original_file,
None, buf.tell(), None)
sized[size], created = image.thumbnail_set.get_or_create(
size=size, defaults={'image': thumb_file})
# Make sure this is in the correct order
return [sized[size] for size in sizes]
class Thumbnail(models.Model | ):
original = models.ForeignKey(Image, on_delete=models.CASCADE)
image = models.ImageField(upload_to=upload_to,
height_field='height', width_field='width',
max_length=255)
size = models.CharField(max_length=100)
height = models.PositiveIntegerField(default=0, editable=False)
width = models.PositiveIntegerField(default=0, editable=False)
| objects = ThumbnailManager()
class Meta:
unique_together = ('original', 'size')
def get_absolute_url(self):
return self.image.url
@receiver(models.signals.post_save)
def original_changed(sender, instance, created, **kwargs):
if isinstance(instance, Image):
instance.thumbnail_set.all().delete()
@receiver(models.signals.post_delete)
def delete_image_files(sender, instance, **kwargs):
if isinstance(instance, (Image, Thumbnail)) and IMAGE_AUTO_DELETE:
if instance.image.storage.exists(instance.image.name):
instance.image.delete(save=False)
|
greencoder/hopefullysunny-django | registrations/management/commands/registration_worker.py | Python | mit | 1,481 | 0.006752 | import zmq
import datetime
import pytz
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from registrations.models import Registration
from registrations import handlers
from registrations import tasks
class Command(BaseCommand):
def log(self, message):
f = open(settings | .TASK_LOG_PATH, 'a')
now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
log_message = "%s\t%s\n" % (now, message)
self.stdout.write(log_message)
f.write(log_message)
f.close()
def handle(self, *args, **options):
context = zmq.Context()
pull_socket = context.socket(zmq.PULL)
pull_socket.bind('tcp://*:7002')
| self.log("Registration Worker ZMQ Socket Bound to 7002")
while True:
try:
data = pull_socket.recv_json()
task_name = data.pop('task')
task_kwargs = data.pop('kwargs')
self.log("Got task '%s' with kwargs: %s" % (task_name, task_kwargs))
if hasattr(tasks, task_name):
result = getattr(tasks, task_name)(**task_kwargs)
self.log("Task '%s' result: %s" % (task_name, result))
else:
self.log("Received unknown task: %s", task_name)
except Exception, e:
self.log("Error: %s" % e)
pull_socket.close()
context.term()
|
chienlieu2017/it_management | odoo/addons/point_of_sale/wizard/pos_box.py | Python | gpl-3.0 | 1,816 | 0.002203 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, _
from odoo.exceptions import UserError
from odoo.addons.account.wizard.pos_box import CashBox
class PosBox(CashBox):
_register = False
@api.multi
def run(self):
active_model = self.env.context.get('active_model', False)
active_ids = self.env.context.get('active_ids', [])
if active_model == 'pos.session':
bank_statements = [session.cash_register_id for session in self.env[active_model].browse(active_ids) if session.cash_register_id]
if not bank_statements:
raise UserError(_("There is no cash register for this PoS Session"))
return self._run(bank_statements)
else:
return super(PosBox, self).run()
class PosBoxIn(PosBox):
_inherit = 'cash.box.in'
def _calculate_values_for_statement_line(self, record):
values = super(PosBoxIn, self)._calculate_values_for_statement_line(record=record)
active_model = self.env.context.get('active_model', False)
active_ids = self.env.context.get('active_ids', [])
if active_model == 'pos.session' and active_ids:
values['ref'] = self.env[active_model].browse(active_ids)[0].name
return values
class PosBoxOut(PosBox):
_inherit = 'cash.box.out'
def _calculate_values_for_statement_line(self, record):
values = super(PosBoxOut, self)._calculate_values_for_statement_line(record)
active_model = self.env.context.get('active_model', False)
active_ids = self.env.context.get('active_ids', [])
if active_model == 'pos.session' and active_ids:
values['ref'] = self.env[active_model].brows | e(active_ids)[0].name
return valu | es
|
arielisidro/myprograms | python/6.00.1x Files/ps4/ps4a.py | Python | gpl-2.0 | 9,787 | 0.011239 | # 6.00x Problem Set 4A Template
#
# The 6.00 Word Game
# Created by: Kevin Luu <luuk> and Jenna Wiens <jwiens>
# Modified by: Sarina Canelake <sarina>
#
import random
import string
import os
VOWELS = 'aeiou'
CONSONANTS = 'bcdfghjklmnpqrstvwxyz'
HAND_SIZE = 13
SCRABBLE_LETTER_VALUES = {
'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x | ': 8, 'y': 4, 'z': 10
}
# ------------ | -----------------------
# Helper code
# (you don't need to understand this helper code)
WORDLIST_FILENAME = os.path.dirname(__file__)+"/words.txt"
print WORDLIST_FILENAME
def loadWords():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print "Loading word list from file..."
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r', 0)
# wordList: list of strings
wordList = []
for line in inFile:
wordList.append(line.strip().lower())
print " ", len(wordList), "words loaded."
return wordList
def getFrequencyDict(sequence):
"""
Returns a dictionary where the keys are elements of the sequence
and the values are integer counts, for the number of times that
an element is repeated in the sequence.
sequence: string or list
return: dictionary
"""
# freqs: dictionary (element_type -> int)
freq = {}
for x in sequence:
freq[x] = freq.get(x,0) + 1
return freq
# (end of helper code)
# -----------------------------------
#
# Problem #1: Scoring a word
#
def getWordScore(word, n):
"""
Returns the score for a word. Assumes the word is a valid word.
The score for a word is the sum of the points for letters in the
word, multiplied by the length of the word, PLUS 50 points if all n
letters are used on the first turn.
Letters are scored as in Scrabble; A is worth 1, B is worth 3, C is
worth 3, D is worth 2, E is worth 1, and so on (see SCRABBLE_LETTER_VALUES)
word: string (lowercase letters)
n: integer (HAND_SIZE; i.e., hand size required for additional points)
returns: int >= 0
"""
score=0
for letter in word:
score+=SCRABBLE_LETTER_VALUES[letter]
length=len(word)
score*=length
if length==n:
score+=50
return score
#
# Problem #2: Make sure you understand how this function works and what it does!
#
def displayHand(hand):
"""
Displays the letters currently in the hand.
For example:
>>> displayHand({'a':1, 'x':2, 'l':3, 'e':1})
Should print out something like:
a x x l l l e
The order of the letters is unimportant.
hand: dictionary (string -> int)
"""
for letter in hand.keys():
for j in range(hand[letter]):
print letter, # print all on the same line
print # print an empty line
#
# Problem #2: Make sure you understand how this function works and what it does!
#
def dealHand(n):
"""
Returns a random hand containing n lowercase letters.
At least n/3 the letters in the hand should be VOWELS.
Hands are represented as dictionaries. The keys are
letters and the values are the number of times the
particular letter is repeated in that hand.
n: int >= 0
returns: dictionary (string -> int)
"""
hand={}
numVowels = n / 3
for i in range(numVowels):
x = VOWELS[random.randrange(0,len(VOWELS))]
hand[x] = hand.get(x, 0) + 1
for i in range(numVowels, n):
x = CONSONANTS[random.randrange(0,len(CONSONANTS))]
hand[x] = hand.get(x, 0) + 1
return hand
#
# Problem #2: Update a hand by removing letters
#
def updateHand(hand, word):
"""
Assumes that 'hand' has all the letters in word.
In other words, this assumes that however many times
a letter appears in 'word', 'hand' has at least as
many of that letter in it.
Updates the hand: uses up the letters in the given word
and returns the new hand, without those letters in it.
Has no side effects: does not modify hand.
word: string
hand: dictionary (string -> int)
returns: dictionary (string -> int)
"""
# TO DO ... <-- Remove this comment when you code this function
handCopy=hand.copy()
for letter in word:
handCopy[letter]-=1
if handCopy[letter]==0:
del handCopy[letter]
return handCopy
#
# Problem #3: Test word validity
#
def isValidWord(word, hand, wordList):
"""
Returns True if word is in the wordList and is entirely
composed of letters in the hand. Otherwise, returns False.
Does not mutate hand or wordList.
word: string
hand: dictionary (string -> int)
wordList: list of lowercase strings
"""
# TO DO ... <-- Remove this comment when you code this function
handCopy=hand.copy()
for letter in word:
if letter in handCopy:
handCopy[letter]-=1
if handCopy[letter]<0:
return False
else:
return False
if word in wordList:
return True
return False
#
# Problem #4: Playing a hand
#
def calculateHandlen(hand):
"""
Returns the length (number of letters) in the current hand.
hand: dictionary (string-> int)
returns: integer
"""
# TO DO... <-- Remove this comment when you code this function
length=0
for key in hand.keys():
length+=hand[key]
return length
def playHand(hand, wordList, n):
"""
Allows the user to play the given hand, as follows:
* The hand is displayed.
* The user may input a word or a single period (the string ".")
to indicate they're done playing
* Invalid words are rejected, and a message is displayed asking
the user to choose another word until they enter a valid word or "."
* When a valid word is entered, it uses up letters from the hand.
* After every valid word: the score for that word is displayed,
the remaining letters in the hand are displayed, and the user
is asked to input another word.
* The sum of the word scores is displayed when the hand finishes.
* The hand finishes when there are no more unused letters or the user
inputs a "."
hand: dictionary (string -> int)
wordList: list of lowercase strings
n: integer (HAND_SIZE; i.e., hand size required for additional points)
"""
# BEGIN PSEUDOCODE <-- Remove this comment when you code this function; do your coding within the pseudocode (leaving those comments in-place!)
# Keep track of the total score
totalScore=0
# As long as there are still letters left in the hand:
while calculateHandlen(hand)>0:
# Display the hand
print "\nCurrent Hand: ",
displayHand(hand)
# Ask user for input
word=raw_input('Enter word, or a "." to indicate you are finished: ')
# If the input is a single period:
if word=='.':
# End the game (break out of the loop)
break
# Otherwise (the input is not a single period):
else:
# If the word is not valid:
if not isValidWord(word, hand, wordList):
# Reject invalid word (print a message followed by a blank line)
print "Invalid word, please try again."
# Otherwise (the word is valid):
else:
# Tell the user how many points the word earned, and the updated total score, in one line followed by a blank line
score=getWordScore(word, n)
totalScore+=score
print '"'+word+'" earned ',str(score), " points. Total: ",str(totalScore)+" points"
|
lkorigin/laniakea | src/rubicon/rubicon/fileimport.py | Python | gpl-3.0 | 6,906 | 0.001593 | # Copyright (C) 2018-2019 Matthias Klumpp <matthias@tenstral.net>
#
# Licensed under the GNU Lesser General Public License Version 3
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the license, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import logging as log
from glob import glob
from laniakea import LkModule
from laniakea.dud import Dud
from laniakea.utils import get_dir_shorthand_for_uuid, random_string
from laniakea.db import session_scope, Job, JobResult, JobKind, SourcePackage
from laniakea.msgstream import EventEmitter
from .rubiconfig import RubiConfig
from .utils import safe_rename
def accept_upload(conf, dud, event_emitter):
'''
Accept the upload and move its data to the right places.
'''
job_success = dud.get('X-Spark-Success') == 'Yes'
job_id = dud.get('X-Spark-Job')
# mark job as accepted and done
with session_scope() as session:
job = session.query(Job).filter(Job.uuid == job_id).one_or_none()
if not job:
log.error('Unable to mark job \'{}\' as done: The Job was not found.'.format(job_id))
# this is a weird situation, there is no proper way to handle it as this indicates a bug
# in the Laniakea setup or some other oddity.
# The least harmful thing to do is to just leave the upload alone and try again later.
return
job.result = JobResult.SUCCESS if job_success else JobResult.FAILURE
job.latest_log_excerpt = None
# move the log file and Firehose reports to the log storage
log_target_dir = os.path.join(conf.log_storage_dir, get_dir_shorthand_for_uuid(job_id))
firehose_target_dir = os.path.join(log_target_dir, 'firehose')
for fname in dud.get_files():
if fname.endswith('.log'):
os.makedirs(log_target_dir, exist_ok=True)
# move the logfile to its destination and ensure it is named correctly
target_fname = os.path.join(log_target_dir, job_id + '.log')
safe_rename(fname, target_fname)
elif fname.endswith('.firehose.xml'):
os.makedirs(firehose_target_dir, exist_ok=True)
# move the firehose report to its own directory and rename it
fh_target_fname = os.path.join(firehose_target_dir, job_id + '.firehose.xml')
safe_rename(fname, fh_target_fname)
# handle different job data
if job.module == LkModule.ISOTOPE:
from .import_isotope import handle_isotope_upload
handle_isotope_upload(session,
success=job_success,
conf=conf,
dud=dud,
job=job,
event_emitter=event_emitter)
elif job.kind == JobKind.PACKAGE_BUILD:
# the package has been imported by Dak, so we just announce this
# event to the world
spkg = session.query(SourcePackage) \
.filter(SourcePackage.source_uuid == job.trigger) \
.filter(SourcePackage.version == job.version) \
.one_or_none()
| if spkg:
suite_target_name = '?'
if job.data:
suite_target_name = job.data.get('suite', '?')
event_data = {'pkgname': spkg.name,
'version': job.version,
'architecture': job.architecture,
'suite': suite_target_name,
'job_id': job_id}
if job_success:
event_emitter.s | ubmit_event_for_mod(LkModule.ARCHIVE, 'package-build-success', event_data)
else:
event_emitter.submit_event_for_mod(LkModule.ARCHIVE, 'package-build-failed', event_data)
else:
event_emitter.submit_event('upload-accepted', {'job_id': job_id, 'job_failed': not job_success})
# remove the upload description file from incoming
os.remove(dud.get_dud_file())
log.info("Upload {} accepted.", dud.get_filename())
def reject_upload(conf, dud, reason='Unknown', event_emitter=None):
'''
If a file has issues, we reject it and put it into the rejected queue.
'''
os.makedirs(conf.rejected_dir, exist_ok=True)
# move the files referenced by the .dud file
random_suffix = random_string(4)
for fname in dud.get_files():
target_fname = os.path.join(conf.rejected_dir, os.path.basename(fname))
if os.path.isfile(target_fname):
target_fname = target_fname + '+' + random_suffix
# move the file to the rejected dir
safe_rename(fname, target_fname)
# move the .dud file itself
target_fname = os.path.join(conf.rejected_dir, dud.get_filename())
if os.path.isfile(target_fname):
target_fname = target_fname + '+' + random_suffix
safe_rename(dud.get_dud_file(), target_fname)
# also store the reject reason for future reference
with open(target_fname + '.reason', 'w') as f:
f.write(reason + '\n')
log.info('Upload {} rejected.', dud.get_filename())
if event_emitter:
event_emitter.submit_event('upload-rejected', {'dud_filename': dud.get_filename(), 'reason': reason})
def import_files_from(conf, incoming_dir):
'''
Import files from an untrusted incoming source.
IMPORTANT: We assume that the uploader can not edit their files post-upload.
If they could, we would be vulnerable to timing attacks here.
'''
emitter = EventEmitter(LkModule.RUBICON)
for dud_file in glob(os.path.join(incoming_dir, '*.dud')):
dud = Dud(dud_file)
try:
dud.validate(keyrings=conf.trusted_gpg_keyrings)
except Exception as e:
reason = 'Signature validation failed: {}'.format(str(e))
reject_upload(conf, dud, reason, emitter)
continue
# if we are here, the file is good to go
accept_upload(conf, dud, emitter)
def import_files(options):
conf = RubiConfig()
if not options.incoming_dir:
print('No incoming directory set. Can not process any files.')
sys.exit(1)
import_files_from(conf, options.incoming_dir)
|
agrc/deq-enviro | scripts/nightly/databases/test.py | Python | mit | 187 | 0.02139 | import arcpy, os
db = os.path.join(os.path.dirname(os.path.abs | path(__file__)), r'eqedocsp.sde\AGRC.VW_DSHW_FACILITY')
if arcpy | .Exists(db):
print('pass')
else:
print('fail')
|
sinkap/trappy | trappy/wa/results.py | Python | apache-2.0 | 5,153 | 0.00097 | # Copyright 2015-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Parse the results from a Workload Automation run and show it in a
"pretty" table
"""
import os
import collections, csv, re
import pandas as pd
from matplotlib import pyplot as plt
class Result(pd.DataFrame):
"""A DataFrame-like class for storing benchmark results"""
def __init__(self, *args, **kwargs):
super(Result, self).__init__(*args, **kwargs)
self.ax = None
def init_fig(self):
_, self.ax = plt.subplots()
def enlarge_axis(self, data):
"""Make sure that the axis don't clobber some of the data"""
(_, _, plot_y_min, plot_y_max) = plt.axis()
concat_data = pd.concat(data[s] for s in data)
data_min = min(concat_data)
data_max = max(concat_data)
# A good margin can be 10% of the data range
margin = (data_max - data | _min) / 10
if margin < 1:
margin = 1
update_axis = False
if data_min <= plot_y_min:
plot_y_min = data_min - margin
update_axis = True
if data_max >= plot_y_max:
plot_y_max = data_max + margin
update_axis = True
if update_axis:
self.ax.set_ylim(plot_y_min, plot_y_max)
def plot_results_benchmark(self, benchmark, title=None):
"""Plot the results of the execution of a given benchmark
|
A title is added to the plot if title is not supplied
"""
if title is None:
title = benchmark.replace('_', ' ')
title = title.title()
self[benchmark].plot(ax=self.ax, kind="bar", title=title)
plt.legend(bbox_to_anchor=(1.05, .5), loc=6)
def plot_results(self):
for bench in self.columns.levels[0]:
self.plot_results_benchmark(bench)
def get_run_number(metric):
found = False
run_number = None
if re.match("Overall_Score|score|FPS", metric):
found = True
match = re.search(r"(.+)[ _](\d+)", metric)
if match:
run_number = int(match.group(2))
if match.group(1) == "Overall_Score":
run_number -= 1
else:
run_number = 0
return (found, run_number)
def get_results(path=".", name=None):
"""Return a pd.DataFrame with the results
The DataFrame's rows are the scores. The first column is the
benchmark name and the second the id within it. For benchmarks
that have a score result, that's what's used. For benchmarks with
FPS_* result, that's the score. E.g. glbenchmarks "score" is it's
fps.
An optional name argument can be passed. If supplied, it overrides
the name in the results file.
"""
bench_dict = collections.OrderedDict()
if os.path.isdir(path):
path = os.path.join(path, "results.csv")
with open(path) as fin:
results = csv.reader(fin)
for row in results:
(is_result, run_number) = get_run_number(row[3])
if is_result:
if name:
run_id = name
else:
run_id = re.sub(r"_\d+", r"", row[0])
bench = row[1]
try:
result = int(row[4])
except ValueError:
result = float(row[4])
if bench in bench_dict:
if run_id in bench_dict[bench]:
if run_number not in bench_dict[bench][run_id]:
bench_dict[bench][run_id][run_number] = result
else:
bench_dict[bench][run_id] = {run_number: result}
else:
bench_dict[bench] = {run_id: {run_number: result}}
bench_dfrs = {}
for bench, run_id_dict in bench_dict.iteritems():
bench_dfrs[bench] = pd.DataFrame(run_id_dict)
return Result(pd.concat(bench_dfrs.values(), axis=1,
keys=bench_dfrs.keys()))
def combine_results(data):
"""Combine two DataFrame results into one
The data should be an array of results like the ones returned by
get_results() or have the same structure. The returned DataFrame
has two column indexes. The first one is the benchmark and the
second one is the key for the result.
"""
res_dict = {}
for benchmark in data[0].columns.levels[0]:
concat_objs = [d[benchmark] for d in data]
res_dict[benchmark] = pd.concat(concat_objs, axis=1)
combined = pd.concat(res_dict.values(), axis=1, keys=res_dict.keys())
return Result(combined)
|
saltstack/salt | tests/pytests/unit/modules/test_rpm_lowpkg.py | Python | apache-2.0 | 11,207 | 0.000803 | """
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
import pytest
import salt.modules.cmdmod
import salt.modules.rpm_lowpkg as rpm
import salt.utils.path
from tests.support.mock import MagicMock, patch
# pylint: disable=unused-import
try:
import rpm as rpm_lib
HAS_RPM = True
except ImportError:
HAS_RPM = False
try:
import rpm_vercmp
HAS_PY_RPM = True
except ImportError:
HAS_PY_RPM = False
# pylint: enable=unused-import
def _called_with_root(mock):
cmd = " ".join(mock.call_args[0][0])
return cmd.startswith("rpm --root /")
@pytest.fixture
def configure_loader_modules():
return {rpm: {"rpm": MagicMock(return_value=MagicMock)}}
# 'list_pkgs' function tests: 2
def test_list_pkgs():
"""
Test if it list the packages currently installed in a dict
"""
mock = MagicMock(return_value="")
with patch.dict(rpm.__salt__, {"cmd.run": mock}):
assert rpm.list_pkgs() == {}
assert not _called_with_root(mock)
def test_list_pkgs_root():
"""
Test if it list the packages currently installed in a dict,
called with root parameter
"""
mock = MagicMock(return_value="")
with patch.dict(rpm.__salt__, {"cmd.run": mock}):
rpm.list_pkgs(root="/")
assert _called_with_root(mock)
# 'verify' function tests: 2
def test_verify():
"""
Test if it runs an rpm -Va on a system, and returns the
results in a dict
"""
mock = MagicMock(
return_value={"stdout": "", "stderr": "", "retcode": 0, "pid": 12345}
)
with patch.dict(rpm.__salt__, {"cmd.run_all": mock}):
assert rpm.verify("httpd") == {}
assert not _called_with_root(mock)
def test_verify_root():
"""
Test if it runs an rpm -Va on a system, and returns the
results in a dict, called with root parameter
"""
mock = MagicMock(
return_value={"stdout": "", "stderr": "", "retcode": 0, "pid": 12345}
)
with patch.dict(rpm.__salt__, {"cmd.run_all": mock}):
rpm.verify("httpd", root="/")
assert _called_with_root(mock)
# 'file_list' function tests: 2
def test_file_list():
"""
Test if it list the files that belong to a package.
"""
mock = MagicMock(return_value="")
with patch.dict(rpm.__salt__, {"cmd.run": mock}):
assert rpm.file_list("httpd") == {"errors": [], "files": []}
assert not _called_with_root(mock)
def test_file_list_root():
"""
Test if it list the files that belong to a package, using the
root parameter.
"""
mock = MagicMock(return_value="")
with patch.dict(rpm.__salt__, {"cmd.run": mock}):
rpm.file_list("httpd", root="/")
assert _called_with_root(mock)
# 'file_dict' function tests: 2
def test_file_dict():
"""
Test if it list the files that belong to a package
"""
mock = MagicMock(return_value="")
with patch.dict(rpm.__salt__, {"cmd.run": mock}):
assert rpm.file_dict("httpd") == {"errors": [], "packages": {}}
assert not _called_with_root(mock)
def test_file_dict_root():
"""
Test if it list the files that belong to a package
"""
mock = MagicMock(return_value="")
with patch.dict(rpm.__salt__, {"cmd.run": mock}):
rpm.file_dict("httpd", root="/")
assert _called_with_root(mock)
# 'owner' function tests: 1
def test_owner():
"""
Test if it return the name of the package that owns the file.
"""
assert rpm.owner() == ""
ret = "file /usr/bin/salt-jenkins-build is not owned by any pack | age"
mock = MagicMock(return_value=ret)
with patch.dict(rpm.__salt__, {"cmd.run_stdout": mock}):
assert rpm.owner("/usr/bin/salt-jenkins-build") == ""
assert not _c | alled_with_root(mock)
ret = {
"/usr/bin/vim": "vim-enhanced-7.4.160-1.e17.x86_64",
"/usr/bin/python": "python-2.7.5-16.e17.x86_64",
}
mock = MagicMock(
side_effect=[
"python-2.7.5-16.e17.x86_64",
"vim-enhanced-7.4.160-1.e17.x86_64",
]
)
with patch.dict(rpm.__salt__, {"cmd.run_stdout": mock}):
assert rpm.owner("/usr/bin/python", "/usr/bin/vim") == ret
assert not _called_with_root(mock)
def test_owner_root():
"""
Test if it return the name of the package that owns the file,
using the parameter root.
"""
assert rpm.owner() == ""
ret = "file /usr/bin/salt-jenkins-build is not owned by any package"
mock = MagicMock(return_value=ret)
with patch.dict(rpm.__salt__, {"cmd.run_stdout": mock}):
rpm.owner("/usr/bin/salt-jenkins-build", root="/")
assert _called_with_root(mock)
# 'checksum' function tests: 2
def test_checksum():
"""
Test if checksum validate as expected
"""
ret = {
"file1.rpm": True,
"file2.rpm": False,
"file3.rpm": False,
}
mock = MagicMock(side_effect=[True, 0, True, 1, False, 0])
with patch.dict(rpm.__salt__, {"file.file_exists": mock, "cmd.retcode": mock}):
assert rpm.checksum("file1.rpm", "file2.rpm", "file3.rpm") == ret
assert not _called_with_root(mock)
def test_checksum_root():
"""
Test if checksum validate as expected, using the parameter
root
"""
mock = MagicMock(side_effect=[True, 0])
with patch.dict(rpm.__salt__, {"file.file_exists": mock, "cmd.retcode": mock}):
rpm.checksum("file1.rpm", root="/")
assert _called_with_root(mock)
@pytest.mark.parametrize("rpm_lib", ["HAS_RPM", "HAS_PY_RPM", "rpmdev-vercmp"])
def test_version_cmp_rpm_all_libraries(rpm_lib):
"""
Test package version when each library is installed
"""
rpmdev = salt.utils.path.which("rpmdev-vercmp")
patch_cmd = patch.dict(rpm.__salt__, {"cmd.run_all": salt.modules.cmdmod.run_all})
if rpm_lib == "rpmdev-vercmp":
if rpmdev:
patch_rpm = patch("salt.modules.rpm_lowpkg.HAS_RPM", False)
patch_py_rpm = patch("salt.modules.rpm_lowpkg.HAS_PY_RPM", False)
else:
pytest.skip("The rpmdev-vercmp binary is not installed")
elif rpm_lib == "HAS_RPM":
if HAS_RPM:
patch_rpm = patch("salt.modules.rpm_lowpkg.HAS_RPM", True)
patch_py_rpm = patch("salt.modules.rpm_lowpkg.HAS_PY_RPM", False)
else:
pytest.skip("The RPM lib is not installed, skipping")
elif rpm_lib == "HAS_PY_RPM":
if HAS_PY_RPM:
patch_rpm = patch("salt.modules.rpm_lowpkg.HAS_RPM", False)
patch_py_rpm = patch("salt.modules.rpm_lowpkg.HAS_PY_RPM", True)
else:
pytest.skip("The Python RPM lib is not installed, skipping")
with patch_rpm, patch_py_rpm, patch_cmd:
assert -1 == rpm.version_cmp("1", "2")
assert -1 == rpm.version_cmp("2.9.1-6.el7_2.3", "2.9.1-6.el7.4")
assert 1 == rpm.version_cmp("3.2", "3.0")
assert 0 == rpm.version_cmp("3.0", "3.0")
assert 1 == rpm.version_cmp("1:2.9.1-6.el7_2.3", "2.9.1-6.el7.4")
assert -1 == rpm.version_cmp("1:2.9.1-6.el7_2.3", "1:2.9.1-6.el7.4")
assert 1 == rpm.version_cmp("2:2.9.1-6.el7_2.3", "1:2.9.1-6.el7.4")
assert 0 == rpm.version_cmp("3:2.9.1-6.el7.4", "3:2.9.1-6.el7.4")
assert -1 == rpm.version_cmp("3:2.9.1-6.el7.4", "3:2.9.1-7.el7.4")
assert 1 == rpm.version_cmp("3:2.9.1-8.el7.4", "3:2.9.1-7.el7.4")
def test_version_cmp_rpm():
"""
Test package version if RPM-Python is installed
:return:
"""
mock_label = MagicMock(return_value=-1)
mock_log = MagicMock()
patch_label = patch("salt.modules.rpm_lowpkg.rpm.labelCompare", mock_label)
patch_log = patch("salt.modules.rpm_lowpkg.log", mock_log)
patch_rpm = patch("salt.modules.rpm_lowpkg.HAS_RPM", True)
with patch_label, patch_rpm, patch_log:
assert -1 == rpm.version_cmp("1", "2")
assert not mock_log.warning.called
assert mock_label.called
def test_version_cmp_rpmutils():
"""
Test package version if rpmUtils.miscutils called
:return:
"""
mock_log = MagicMock()
mock_rpmUtils = MagicMock()
mock_rpmUtils.miscutils = MagicMock()
|
Wheelspawn/Python-Code-Fragments | IsGraphic.py | Python | gpl-3.0 | 1,082 | 0.019409 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 5 12:49:53 2018
A program that will tell if a given degree sequence is graphic.
Uses th | e Havel-Hakimi algorithm. Returns a boolean.
Steps are printed to console.
@author: nsage
"""
def is_graphic(l): # checks to se | e if a degree list is graphic
# if a vertex has an extra edge
# or the sum is odd (sum of the degrees of v must be 2|e|)
if l[0] > len(l)-1 or sum(l)%2==1:
return False
else:
if sum(l) == 0: # if vertex elimination is complete
return True
else: # delete the highest degree vertex
v = l[0]
l = l[1:]
for i in range(v): # decrement the next v indices
l[i] -= 1
for j in range(len(l)-1,0,-1): # remove the zeros at the end
if l[j] == 0:
l.remove(l[j])
else:
break
l.sort(reverse=True)
print(l)
return is_graphic(l) # recursive call
|
jean/sentry | src/sentry/api/endpoints/auth_index.py | Python | bsd-3-clause | 2,868 | 0.000697 | from __future__ import absolute_import
from django.contrib.auth import logout
from django.contrib.auth.models import AnonymousUser
from rest_framework.response import Response
from sentry.api.authentication import QuietBasicAuthentication
from sentry.models import Authenticator
from sentry.api.base import Endpoint
from sentry.api.serializers import serialize
from sentry.utils import auth
class AuthIndexEndpoint(Endpoint):
"""
Manage session authentication
Intended to be used by the internal Sentry application to handle
authentication methods from JS endpoints by relying on internal sessions
and simple HTTP authentication.
"""
authentication_classes = [QuietBasicAuthentication]
permission_classes = ()
# XXX: it's not quite clear if this should be documented or not at
# this time.
# doc_section = DocSection.ACCOUNTS
def get(self, request):
if not request.user.is_authenticated():
return Response(status=400)
data = serialize(request.user, request.user)
data['isSuperuser'] = request.is_superuser()
return Response(data)
def post(self, request):
"""
Authenticate a User
```````````````````
This endpoint authenticates a user using the provided credentials
through a regular HTTP basic auth system. The response contains
cookies that need to be sent with further requests that require
authentication.
This is primarily used inte | rnally in Sentry.
Common example::
curl -X ###METHOD### -u username:password ###URL###
"""
if not request.user.is_authenticated():
return Response(status=400)
# If 2fa login is enabled then we cannot sign in with username and
# password through this api endpoint.
if Authenticator.objects.user_has_2fa(request.user):
return R | esponse(
{
'2fa_required': True,
'message': 'Cannot sign-in with basic auth when 2fa is enabled.'
},
status=403
)
try:
# Must use the real request object that Django knows about
auth.login(request._request, request.user)
except auth.AuthUserPasswordExpired:
return Response(
{
'message': 'Cannot sign-in with basic auth because password has expired.',
},
status=403
)
return self.get(request)
def delete(self, request, *args, **kwargs):
"""
Logout the Authenticated User
`````````````````````````````
Deauthenticate the currently active session.
"""
logout(request._request)
request.user = AnonymousUser()
return Response(status=204)
|
aginzberg/crowdsource-platform | crowdsourcing/migrations/0000_get_requester_ratings_fn.py | Python | mit | 3,733 | 0.004286 | # -*- coding: utf-8 -*-
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crowdsourcing', '0007_auto_20151208_1957'),
]
operations = [
migrations.RunSQL('''
CREATE OR REPLACE FUNCTION get_requester_ratings(IN worker_profile_id INTEGER)
RETURNS TABLE(requester_id INTEGER, requester_rating DOUBLE PRECISION,
requester_avg_rating DOUBLE PRECISION)
AS $$
SELECT
r.id,
| wr_rating.weight,
avg_wr_rating
FROM crowdsourcing_requester r
| LEFT OUTER JOIN (
SELECT
wrr.target_id,
wrr.weight AS weight
FROM crowdsourcing_workerrequesterrating wrr
INNER JOIN (
SELECT
target_id,
MAX(last_updated) AS max_date
FROM crowdsourcing_workerrequesterrating
WHERE origin_type = 'worker' AND origin_id = $1
GROUP BY target_id
) most_recent
ON wrr.target_id = most_recent.target_id AND wrr.last_updated = most_recent.max_date AND
wrr.origin_type = 'worker'
AND wrr.origin_id = $1
) wr_rating ON wr_rating.target_id = r.profile_id
LEFT OUTER JOIN (
SELECT
target_id,
AVG(weight) AS avg_wr_rating
FROM (
SELECT
wrr.target_id,
wrr.weight
FROM crowdsourcing_workerrequesterrating wrr
INNER JOIN (
SELECT
origin_id,
target_id,
MAX(last_updated) AS max_date
FROM crowdsourcing_workerrequesterrating
WHERE origin_id<>$1 AND origin_type='worker'
GROUP BY origin_id, target_id
) most_recent
ON most_recent.origin_id = wrr.origin_id AND most_recent.target_id = wrr.target_id AND
wrr.last_updated = most_recent.max_date
AND wrr.origin_id <> $1 AND wrr.origin_type = 'worker'
) recent_wr_rating
GROUP BY target_id
) avg_wr_rating
ON avg_wr_rating.target_id = r.profile_id;
$$
LANGUAGE SQL
STABLE
RETURNS NULL ON NULL INPUT;
''')
]
|
CognitionGuidedSurgery/msml | src/msml/exporter/msml_namespace.py | Python | gpl-3.0 | 231 | 0.038961 | __author__ = 'suwelack'
|
from rdflib.namespace import ClosedNamespace
#MSMLRep= ClosedNamespace(
# uri=URIRef("http://www.msml.org/ontology/msmlRepresentation#"),
# terms=[ |
# 'isRepresentationOf', 'isDataNodeFor']
#) |
ankostis/ViTables | examples/scripts/MDobjects.py | Python | gpl-3.0 | 2,868 | 0.004881 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2005-2007 Carabos Coop. V. All rights reserved
# Copyright (C) 2008-2017 Vicent Mas. All rights reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Vicent Mas - vmas@vitables.org
"""
This is meant to | exercise ViTables capability to zoom into
multidimensional cells.
It also works to check views of multidimensional attributes.
"""
import tables
import numpy
class Particle(tables.IsDescription):
"""Description of a table record."""
name = tables.StringCol(16, pos=1)
| lati = tables.IntCol(pos=2)
vector = tables.Int32Col(shape=(200,), pos=3)
matrix1 = tables.Int32Col(shape=(2, 200), pos=4)
matrix2 = tables.FloatCol(shape=(100, 2), pos=5)
matrix3 = tables.FloatCol(shape=(10, 100, 2), pos=5)
matrix4 = tables.FloatCol(shape=(2, 10, 100, 2), pos=5)
# Open a file in "w"rite mode
fileh = tables.open_file("MDobjects.h5", mode = "w")
# Create the table with compression 'on' in order to reduce size as
# much as possible
table = fileh.create_table(fileh.root, 'table', Particle, "A table",
filters=tables.Filters(complevel=1))
# Append several rows with default values
for i in range(10):
table.row.append()
table.flush()
# create new arrays
atom1 = tables.IntAtom()
shape1 = (2, 10, 10, 1)
filters1 = tables.Filters(complevel=1)
#(2, 10, 10, 3)
array1 = fileh.create_carray(fileh.root, 'array1', atom1, shape1,
filters=filters1)
atom2 = tables.FloatAtom()
shape2 = (2, 10, 10, 3, 1)
filters2 = tables.Filters(complevel=1)
#(2, 10, 10, 3, 200)
array2 = fileh.create_carray(fileh.root, 'array2', atom2, shape2,
filters=filters2)
# Add multimensional attributes to the objects
# Integers will go in /table
table.attrs.MD1 = numpy.arange(5, dtype="int8")
table.attrs.MD2 = numpy.arange(10, dtype="int64").reshape(2, 5)
# Complex will go in /array1
array1.attrs.MD1 = numpy.arange(5, dtype="complex128")
array1.attrs.MD2 = numpy.arange(10, dtype="complex128").reshape(2, 5)
# Strings will go in /array2
array2.attrs.MD1 = numpy.array(['Hi', 'world!'], dtype='|S6')
array2.attrs.MD2 = numpy.array([['Hi', 'world!'],
['Hola', 'mon!']], dtype='|S4')
fileh.close()
|
tuxite/pharmaship | pharmaship/inventory/migrations/0002_item_packing.py | Python | agpl-3.0 | 1,592 | 0.005653 | # Generated by Django 3.0.7 on 2020-12-07 08:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='article',
name='packing_content',
field=models.PositiveIntegerField(default=1, verbose_name='Quantity per pack'),
),
migrations.AddField(
model_name='article',
name='packing_name',
field=models.PositiveIntegerField(choices=[(0, 'default'), (10, 'box'), (11, 'set'), (20, 'pair'), (120, 'dozen'),], default=0),
),
migrations.AddField(
model_name='firstaidkititem',
na | me='packing_content',
field=models.PositiveIntegerField(default=1, verbose_name='Quantity per pack'),
),
migrations.AddField(
model_name='firstaidkititem',
name='packing_name',
field=models.PositiveIntegerField(choices=[(0, 'default'), (10, 'box'), | (11, 'set'), (20, 'pair'), (120, 'dozen'),], default=0),
),
migrations.AddField(
model_name='medicine',
name='packing_content',
field=models.PositiveIntegerField(default=1, verbose_name='Quantity per pack'),
),
migrations.AddField(
model_name='medicine',
name='packing_name',
field=models.PositiveIntegerField(choices=[(0, 'default'), (10, 'box'), (11, 'set'), (20, 'pair'), (120, 'dozen'),], default=0),
),
]
|
kezilu/pextant | pextant/sextant.py | Python | mit | 6,172 | 0.005347 | from flask_settings import GEOTIFF_FULL_PATH
import sys
import traceback
sys.path.append('../')
import numpy as np
import json
from datetime import timedelta
from functools import update_wrapper
from pextant.EnvironmentalModel import GDALMesh
from pextant.explorers import Astronaut
from pextant.analysis.loadWaypoints import JSONloader
from pextant.lib.geoshapely import GeoPolygon, LAT_LONG
from pextant.solvers.astarMesh import astarSolver
from flask import Flask
from flask import make_response, request, current_app
app = Flask(__name__)
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
def main(argv):
print 'STARTING SEXTANT'
geotiff_full_path = ""
try:
geotiff_full_path = argv[0]
except IndexError:
# print 'Syntax is "sextant <inputfile>"'
pass
if not geotiff_full_path or geotiff_full_path == 'sextant:app':
geotiff_full_path = GEOTIFF_FULL_PATH
print geotiff_full_path
gdal_mesh = GDALMesh(geotiff_full_path)
explorer = Astronaut(80)
solver, waypoints, environmental_model = None, None, None
@app.route('/test', methods=['GET', 'POST'])
@crossdomain(origin='*')
def test():
print str(request)
return json.dumps({'test':'test'})
@app.route('/setwaypoints', methods=['GET', 'POST'])
@crossdomain(origin='*')
def set_waypoints():
try:
global solver, waypoints, environmental_model
print('in set waypoints')
request_data = request.get_json(force=True)
xp_json = request_data['xp_json']
json_loader = JSONloader(xp_json['sequence'])
print 'loaded xp json'
waypoints = json_loader.get_waypoints()
print 'gdal mesh is built from %s' % str(geotiff_full_path)
environmental_model = gdal_mesh.loadSubSection(waypoints.geoEnvelope(), cached=True)
solver = astarSolver(environmental_model, explorer, optimize_on='Energy')
print('loaded fine')
return json.dumps({'loaded': True})
except Exception, e:
traceback.print_exc()
response = {'error': str(e),
'status_code': 400}
return response
@app.route('/solve', methods=['GET', 'POST'])
@crossdomain(origin='*')
def solve():
global solver, waypoints, environmental_model
print 'in solve'
request_data = request.get_json(force=True)
return_type = request_data['return']
if 'xp_json' in request_data:
xp_json = request_data['xp_json']
json_loader = JSONloader(xp_json['sequence'])
waypoints = json_loader.get_waypoints()
print(waypoints.to(LAT_LONG))
environmental_model = gdal_mesh.loadSubSection(waypoints.geoEnvelope(), cached=True)
solver = astarSolver(environmental_model, explorer, optimize_on='Energy')
search_results, rawpoints, _ = solver.solvemultipoint(waypoints)
return_json = {
'latlong':[]
}
if return_type == 'segmented':
for search_result in search_results.list:
lat, lon = GeoPolygon(environmental_model.ROW_COL, *np.array(search_result.raw).transpose()).to(LAT_LONG)
return_json['latlong'].append({'latitudes': list(lat), 'longitudes': list(lon)})
else:
lat, lon = GeoPolygon(environmental_mo | del.ROW_COL, *np.array(rawpoints).transpose()).to(LAT_LONG)
return_json['latlong'].append({'latitudes': list(lat), 'longitudes': list(lon)})
return json.dumps(return_json)
# OLD Stuff: delete
@app.route('/', methods=['GET', 'POST'])
@crossdomain(origin='*')
def get_waypoints():
print('got request')
data = request.get_json(force=True)
data_np = np.array(data['waypoints']).transpose( | )
#json_waypoints = JSONloader(xpjson)
waypoints = GeoPolygon(LAT_LONG, *data_np)
print waypoints.to(LAT_LONG)
environmental_model = gdal_mesh.loadSubSection(waypoints.geoEnvelope(), cached=True)
explorer = Astronaut(80)
solver = astarSolver(environmental_model, explorer, optimize_on='Energy', cached=True)
_, rawpoints, _ = solver.solvemultipoint(waypoints)
lat, lon = GeoPolygon(environmental_model.ROW_COL, *np.array(rawpoints).transpose()).to(LAT_LONG)
print((lat, lon))
return json.dumps({'latitudes': list(lat), 'longitudes': list(lon)})
if argv[0] != 'sextant:app':
app.run(host='localhost', port=5000)
# if __name__ == "__main__":
main(sys.argv[1:])
#main(['../data/maps/dem/HI_air_imagery.tif']) |
NarrativeScience/lsi | setup.py | Python | bsd-2-clause | 583 | 0 | import setuptools
import os
# This will add the __version__ to the globals
with open("src/lsi/__init__.py") as f:
exec(f.read())
setuptools.setup(
name='lsi',
version=__version__,
author="Narrative Science",
author_email="anelson@narrativescience.com",
url="https://github.com/NarrativeScience/lsi",
package_dir={'': 'src'},
packages=setuptools.find_packages('src'),
provides=setuptools.find_packages('src'),
install_requires=open('requirements.txt').readl | ines(),
entry_points={
'console_scripts': ['lsi = lsi.lsi:main']
| }
)
|
annarev/tensorflow | tensorflow/python/feature_column/feature_column_lib.py | Python | apache-2.0 | 1,597 | 0.000626 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ====================================================== | ========================
"""FeatureColumns: tools for ingesting and representing features."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,wildcard-import,g-bad-import-order
from tensorflow.python.feature_column.feature_column import *
from tensorflow.python.feature_colu | mn.feature_column_v2 import *
from tensorflow.python.feature_column.sequence_feature_column import *
from tensorflow.python.feature_column.serialization import *
# We import dense_features_v2 first so that the V1 DenseFeatures is the default
# if users directly import feature_column_lib.
from tensorflow.python.keras.feature_column.dense_features_v2 import *
from tensorflow.python.keras.feature_column.dense_features import *
from tensorflow.python.keras.feature_column.sequence_feature_column import *
# pylint: enable=unused-import,line-too-long
|
BearlyKoalafied/GGGGobbler | db/db.py | Python | mit | 9,293 | 0.003228 | import os.path
import sqlite3
from util import filepather
from db.data_structs import StaffPost
DB_FILE_NAME = "GGGGobbler.sqlite"
class DAO:
def __init__(self):
if os.path.isfile(filepather.relative_file_path(__file__, DB_FILE_NAME)):
self.open()
else:
self.open()
self.create_tables()
def open(self):
self.db = sqlite3.connect(filepather.relative_file_path(__file__, DB_FILE_NAME))
def commit(self):
self.db.commit()
def close(self):
self.db.commit()
self.db.close()
def rollback(self):
self.db.rollback()
def create_tables(self):
"""
db setup function
"""
cur = self.db.cursor()
try:
cur.execute("""CREATE TABLE poethread (
poethread_id TEXT PRIMARY KEY,
poethread_page_count INTEGER
)""")
cur.execute("""CREATE TABLE redthread (
redthread_id TEXT PRIMARY KEY,
poethread_id TEXT,
FOREIGN KEY(poethread_id) REFERENCES poethread(poethread_id)
)""")
cur.execute("""CREATE TABLE comment (
comment_id TEXT PRIMARY KEY,
comment_order_index INTEGER,
redthread_id TEXT,
FOREIGN KEY(redthread_id) REFERENCES redthread(redthread_id)
)""")
cur.execute("""CREATE TABLE staffpost (
staffpost_id TEXT PRIMARY KEY,
poethread_id TEXT,
staffpost_text TEXT,
staffpost_author TEXT,
staffpost_date TEXT,
FOREIGN KEY(poethread_id) REFERENCES poethread(poethread_id)
)""")
finally:
cur.close()
def get_old_staff_posts_by_thread_id(self, poe_thread_id):
"""
returns a list of StaffPosts of posts stored in the db for the specified thread
"""
cur = self.db.cursor()
try:
cur.execute("SELECT staffpost_id, staffpost_author, staffpost_text, | staffpost_date "
"FROM staffpost WHERE poethread_id = ?", (poe_thread_id,))
results = cur.fetchall()
if results is None:
return []
return [StaffPost(result[0], poe_thread_id, result[1], result[2], result[3])
for result in results]
finally:
cur.close()
def poe_thread_page_count(self, poe_thread_id):
| """
gets the recorded page counter of given thread
"""
cur = self.db.cursor()
try:
cur.execute("SELECT poethread_page_count FROM poethread WHERE poethread_id = ?",
(poe_thread_id,))
row = cur.fetchone()
return None if row is None else row[0]
finally:
cur.close()
def poe_thread_exists(self, poe_thread_id):
"""
checks if the given thread is in the db
"""
cur = self.db.cursor()
try:
cur.execute("SELECT 1 FROM poethread WHERE poethread_id = ?", (poe_thread_id,))
return False if cur.fetchone() is None else True
finally:
cur.close()
def add_poe_thread(self, poe_thread_id, page_count):
"""
adds a new pathofexile.com thread reacord
"""
cur = self.db.cursor()
try:
cur.execute("INSERT INTO poethread (poethread_id, poethread_page_count) "
"VALUES (?, ?)", (poe_thread_id, page_count))
finally:
cur.close()
def get_staff_posts_by_id(self, ids):
"""
gets a list of staffposts that correspond to the given list of ids
"""
cur = self.db.cursor()
try:
cur.execute("SELECT * FROM staffpost WHERE staffpost_id IN (%s)" %
", ".join("?" * len(ids)), tuple(ids))
results = cur.fetchall()
return [StaffPost(result[0], result[1], result[3], result[2], result[4])
for result in results]
finally:
cur.close()
def add_staff_posts(self, posts):
"""
adds a given list of StaffPosts to the db
"""
cur = self.db.cursor()
try:
params = []
for post in posts:
params.append((post.post_id, post.thread_id, post.md_text, post.author, post.date))
if params != []:
cur.executemany("INSERT INTO staffpost (staffpost_id, poethread_id, staffpost_text, "
"staffpost_author, staffpost_date)"
" VALUES (?, ?, ?, ?, ?)", params)
finally:
cur.close()
def update_staff_post(self, post):
"""
updates post text in the db that matches the given post's id
"""
cur = self.db.cursor()
try:
cur.execute("UPDATE staffpost SET staffpost_text = ? WHERE staffpost_id = ?",
(post.md_text, post.post_id))
finally:
cur.close()
def update_poe_thread(self, poe_thread_id, page_count):
"""
updates a threads noted page count
"""
cur = self.db.cursor()
try:
cur.execute("UPDATE poethread SET poethread_page_count = ? "
"WHERE poethread_id = ?", (page_count, poe_thread_id))
finally:
cur.close()
def get_reddit_threads_linking_here(self, poe_thread_id):
"""
returns a list of reddit thread ids that link to the given poe thread
"""
cur = self.db.cursor()
try:
cur.execute("SELECT redthread_id FROM redthread "
"WHERE poethread_id = ?", (poe_thread_id,))
results = cur.fetchall()
return [result[0] for result in results]
finally:
cur.close()
def get_comment_ids_by_thread(self, reddit_thread_id):
"""
returns an ordered list of ids for the bot comments in the specified thread
"""
cur = self.db.cursor()
try:
cur.execute("SELECT comment_id, comment_order_index FROM comment "
"WHERE redthread_id = ?", (reddit_thread_id,))
results = cur.fetchall()
count = len(results)
out = []
for i in range(count):
next = min(results, key = lambda k: k[1])
out.append(next[0])
results.remove(next)
return out
finally:
cur.close()
def reddit_thread_exists(self, reddit_thread_id):
"""
checks whether we have a specified reddit post recorded
"""
cur = self.db.cursor()
try:
cur.execute("SELECT 1 FROM redthread WHERE redthread_id = ?", (reddit_thread_id,))
return False if cur.fetchone() is None else True
finally:
cur.close()
def add_reddit_thread(self, reddit_thread_id, poe_thread_id):
"""
adds a new reddit post record
"""
cur = self.db.cursor()
try:
cur.execute("INSERT INTO redthread (redthread_id, poethread_id) VALUES (?, ?)",
(reddit_thread_id, poe_thread_id))
finally:
cur.close()
def add_reddit_comment(self, comment_id, reddit_thread_id, ordinal):
"""
adds a new reddit comment record
"""
cur = self.db.cursor()
try:
cur.execute("INSERT INTO comment (comment_id, comment_order_index, redthread_id)"
" VALUES (?, ?, ?)", (comment_id, ordinal, reddit_thread_id |
kekeller/german_endings | analyze_data/user_input.py | Python | mit | 1,854 | 0.055556 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import string
from time import strftime
def append_new_end(word,user_end):
space_count = word.count('~') # count number of placeholders
if space_count > 0:
total_word = word[:-space_count] + user_end # supplied from raw input
else:
total_word = word
return total_word
def create_updated_array(text_complete,text_new,number_sentences):
sentence = 0
while sentence < number_sentences:
word = 0
print
print text_new[sentence]
prin | t
while word < len(text_new[sentence]):
user_end = raw_input(text_new[sentence][word].encode('utf-8') + ' ')
total_word = append_new_end(text_new[sentence][word],user_end)
total_word.encode('utf-8')
text_complete[sentence].append(total_word)
word += 1
sentence += 1
return text_complete
def print_output(text_complete,text_orig,number_sent | ences):
sentence = 0
while sentence < number_sentences:
contained = [x for x in text_complete[sentence] if x not in text_orig[sentence]]
print
print "Original Sentence: " ,
write_output(strftime("%Y-%m-%d %H:%M:%S"))
write_output('\n')
write_output("Original Sentence: ")
write_output('\n')
for each in text_orig[sentence]:
print each,
write_output(each.encode('utf-8') + ' '),
print
write_output('\n')
print
write_output("User Completed Sentence: ")
write_output('\n')
print "User completed text: " ,
for each in text_complete[sentence]:
print each,
write_output(each.encode('utf-8') + ' '),
print
print
write_output('\n')
write_output("User Mistakes: ")
write_output('\n')
print "User Mistakes: "
for each in contained:
print each
write_output(each.encode('utf-8') + ' '),
print
print
sentence += 1
def write_output(input_text):
with open('output.txt', 'a') as f:
f.write(input_text)
|
jupyter/jupyterlab | scripts/generate_changelog.py | Python | bsd-3-clause | 1,155 | 0.000866 | ''' Generate a changelog for JupyterLab from the GitHub releases '''
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import re
import requests
import dateutil.parser
# Get the list of releases.
r = requests.get('https://api.github.com/repos/jupyterlab/jupyterlab/releases')
if r.status_code == 200:
releases = r.json()
with open('CHANGELOG.md', 'w') as f:
f.write('# JupyterLab Changelog\n\n')
for release in releases:
name = release['name']
tag_name = release['tag_name']
tag_url = release['html_url']
tag_date = dateutil.parser.parse(release['published_at'])
notes = release['body'].replace('\r\n', '\n' | )
notes = re.sub(r'#([0-9]+)',
r'[#\1](https://github.com/jupyterlab/jupyterlab/issues/\1)',
notes)
title = f'{name} ({tag_name})' if name != tag_name else name
f.write(f'## [{title}]({tag_url})\n')
f.write(f'#### {tag_date.strftime("%b %d, %Y")}\n')
| f.write(notes)
f.write('\n\n')
|
peterbrittain/asciimatics | asciimatics/__init__.py | Python | apache-2.0 | 414 | 0.004831 | """
Asciimatics is a package to help people create full-screen text UIs (from interactive forms to
ASCII animations) on any platform. It is licensed under the Apache Software Foundation License 2.0.
"""
__author__ = 'Peter Brittain'
try:
from .version import version
except ImportError:
# Someone is running straight fro | m the GIT repo - dummy out the version
| version = "0.0.0"
__version__ = version
|
opesci/devito | devito/ir/equations/equation.py | Python | mit | 9,325 | 0.001287 | from cached_property import cached_property
import sympy
from devito.ir.equations.algorithms import dimension_sort, lower_exprs
from devito.finite_differences.differentiable import diff2sympy
from devito.ir.support import (IterationSpace, DataSpace, Interval, IntervalGroup,
Stencil, detect_accesses, detect_oobs, detect_io,
build_intervals, build_iterators)
from devito.symbolics import CondEq, IntDiv, uxreplace
from devito.tools import Pickable, frozendict
from devito.types import Eq
__all__ = ['LoweredEq', 'ClusterizedEq', 'DummyEq']
class IREq(sympy.Eq):
_state = ('is_Increment', 'ispace', 'dspace', 'conditionals', 'implicit_dims')
@property
def is_Scalar(self):
return self.lhs.is_Symbol
is_scalar = is_Scalar
@property
def is_Tensor(self):
return self.lhs.is_Indexed
@property
def is_Increment(self):
return self._is_Increment
@property
def ispace(self):
return self._ispace
@property
def dspace(self):
return self._dspace
@cached_property
def dimensions(self):
# Note: some dimensions may be in the iteration space but not in the
# data space (e.g., a DerivedDimension); likewise, some dimensions may
# be in the data space but not in the iteration space (e.g., when a
# function is indexed with integers only)
return set(self.dspace.dimensions) | set(self.ispace.dimensions)
@property
def implicit_dims(self):
return self._implicit_dims
@cached_property
def conditionals(self):
return self._conditionals or frozendict()
@property
def directions(self):
return self.ispace.directions
@property
def dtype(self):
return self.lhs.dtype
@cached_property
def grid(self):
grids = set()
for f in self.dspace.parts:
if f.is_DiscreteFunction:
grids.add(f.grid)
if len(grids) == 1:
return grids.pop()
else:
return None
@property
def state(self):
return {i: getattr(self, i) for i in self._state}
def apply(self, func):
"""
Apply a callable to `self` and each expr-like attribute carried by `self`,
thus triggering a reconstruction.
"""
args = [func(self.lhs), func(self.rhs)]
kwargs = dict(self.state)
kwargs['conditionals'] = {k: func(v) for k, v in self.conditionals.items()}
return self.func(*args, **kwargs)
class LoweredEq(IREq):
"""
LoweredEq(devito.Eq)
LoweredEq(devito.LoweredEq, **kwargs)
LoweredEq(lhs, rhs, **kwargs)
A SymPy equation with associated IterationSpace and DataSpace.
When created as ``LoweredEq(devito.Eq)``, the iteration and data spaces are
automatically derived from analysis of ``expr``.
When created as ``LoweredEq(devito.LoweredEq, **kwargs)``, the keyword
arguments can be anything that appears in ``LoweredEq._state`` (i.e.,
ispace, dspace, ...).
When created as ``LoweredEq(lhs, rhs, **kwargs)``, *all* keywords in
``LoweredEq._state`` must appear in ``kwargs``.
"""
_state = IREq._state + ('reads', 'writes')
def __new__(cls, *args, **kwargs):
if len(args) == 1 and isinstance(args[0], LoweredEq):
# origin: LoweredEq(devito.LoweredEq, **kwargs)
input_expr = args[0]
expr = sympy.Eq.__new__(cls, *input_expr.args, evaluate=False)
for i in cls._state:
setattr(expr, '_%s' % i, kwargs.get(i) or getattr(input_expr, i))
return expr
elif len(args) == 1 and isinstance(args[0], Eq):
# origin: LoweredEq(devito.Eq)
input_expr = expr = args[0]
elif len(args) == 2:
expr = sympy.Eq.__new__(cls, *args, evaluate=False)
for i in cls._state:
setattr(expr, '_%s' % i, kwargs.pop(i))
return expr
else:
raise ValueError("Cannot construct LoweredEq from args=%s "
"and kwargs=%s" % (str(args), str(kwargs)))
# Well-defined dimension ordering
ordering = dimension_sort(expr)
# Analyze the expression
mapper = detect_accesses(expr)
oobs = detect_oobs(mapper)
conditional_dimensions = [i for i in ordering if i.is_Conditional]
# Construct Intervals for IterationSpace and DataSpace
intervals = build_intervals(Stencil.union(*mapper.values()))
iintervals = [] # iteration Intervals
dintervals = [] # data Intervals
for i in intervals:
d = i.dim
if d in oobs:
iintervals.append(i.zero())
dintervals.append(i)
else:
iintervals.append(i.zero())
dintervals.append(i.zero())
# Construct the IterationSpace
iintervals = IntervalGroup(iintervals, relations=ordering.relations)
iterators = build_iterators(mapper)
ispace = IterationSpace(iintervals, iterators)
# Construct the DataSpace
dintervals.extend([Interval(i, 0, 0) for i in ordering
if i not in ispace.dimensions + conditional_dimensions])
parts = {k: IntervalGroup(build_intervals(v)).add(iintervals)
for k, v in mapper.items() if k}
dspace = DataSpace(dintervals, parts)
# Construct the conditionals and replace the ConditionalDimensions in `expr`
conditionals = {}
for d in conditional_dimensions:
if d.condition is None:
conditionals[d] = CondEq(d.parent % d.factor, 0)
else:
conditionals[d] = diff2sympy(lower_exprs(d.condition))
if d.factor is not None:
expr = uxreplace(expr, {d: IntDiv(d.index, d.factor)})
conditionals = frozendict( | conditionals)
# Lower all Differentiable operations into SymPy operations
rhs = diff2sympy(expr.rhs)
# F | inally create the LoweredEq with all metadata attached
expr = super(LoweredEq, cls).__new__(cls, expr.lhs, rhs, evaluate=False)
expr._dspace = dspace
expr._ispace = ispace
expr._conditionals = conditionals
expr._reads, expr._writes = detect_io(expr)
expr._is_Increment = input_expr.is_Increment
expr._implicit_dims = input_expr.implicit_dims
return expr
@property
def reads(self):
return self._reads
@property
def writes(self):
return self._writes
def xreplace(self, rules):
return LoweredEq(self.lhs.xreplace(rules), self.rhs.xreplace(rules), **self.state)
def func(self, *args):
return super(LoweredEq, self).func(*args, **self.state, evaluate=False)
class ClusterizedEq(IREq, Pickable):
"""
ClusterizedEq(devito.IREq, **kwargs)
ClusterizedEq(lhs, rhs, **kwargs)
A SymPy equation with associated IterationSpace and DataSpace.
There are two main differences between a LoweredEq and a
ClusterizedEq:
* In a ClusterizedEq, the iteration and data spaces must *always*
be provided by the caller.
* A ClusterizedEq is "frozen", meaning that any call to ``xreplace``
will not trigger re-evaluation (e.g., mathematical simplification)
of the expression.
These two properties make a ClusterizedEq suitable for use in a Cluster.
"""
def __new__(cls, *args, **kwargs):
if len(args) == 1:
# origin: ClusterizedEq(expr, **kwargs)
input_expr = args[0]
expr = sympy.Eq.__new__(cls, *input_expr.args, evaluate=False)
for i in cls._state:
v = kwargs[i] if i in kwargs else getattr(input_expr, i, None)
setattr(expr, '_%s' % i, v)
elif len(args) == 2:
# origin: ClusterizedEq(lhs, rhs, **kwargs)
expr = sympy.Eq.__new__(cls, *args, evaluate=False)
for i in cls._state:
setattr(expr, '_%s' % i, kwargs.pop(i))
else:
raise |
alexryndin/ambari | ambari-server/src/main/resources/stacks/ADH/1.0/services/YARN/package/scripts/service_check.py | Python | apache-2.0 | 5,633 | 0.008699 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
from resource_management.libraries.functions.version import compare_versions
from resource_management import *
import sys
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
import re
import subprocess
from ambari_commons import os_utils
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
from resource_management.libraries.functions.get_user_call_output import get_user_call_output
CURL_CONNECTION_TIMEOUT = '5'
class ServiceCheck(Script):
def service_check(self, env):
pass
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class ServiceCheckWindows(ServiceCheck):
def service_check(self, env):
import params
env.set_params(params)
yarn_exe = os_utils.quote_path(os.path.join(params.yarn_home, "bin", "yarn.cmd"))
run_yarn_check_cmd = "cmd /C %s node -list" % yarn_exe
component_type = 'rm'
if params.hadoop_ssl_enabled:
component_address = params.rm_webui_https_address
else:
component_address = params.rm_webui_address
#temp_dir = os.path.abspath(os.path.join(params.hadoop_home, os.pardir)), "/tmp"
temp_dir = os.path.join(os.path.dirname(params.hadoop_home), "temp")
validateStatusFileName = "validateYarnComponentStatusWindows.py"
validateStatusFilePath = os.path.join(temp_dir, validateStatusFileName)
python_executable = sys.executable
validateStatusCmd = "%s %s %s -p %s -s %s" % (python_executable, validateStatusFilePath, component_type, component_address, params.hadoop_ssl_enabled)
if params.security_enabled:
kinit_cmd = "%s -kt %s %s;" % (params.kinit_path_local, params.smoke_user_keytab, params.smokeuser)
smoke_cmd = kinit_cmd + ' ' + validateStatusCmd
else:
smoke_cmd = validateStatusCmd
File(validateStatusFilePath,
content=StaticFile(validateStatusFileName)
)
Execute(smoke_cmd,
tries=3,
try_sleep=5,
logoutput=True
)
Execute(run_yarn_check_cmd, logoutput=True)
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class ServiceCheckDefault(ServiceCheck):
def get_app_name(self, yarn_out):
m = re.search("appTrackingUrl=(.*),\s", yarn_out)
if m == None:
cmd = 'yarn application -list -appStates FINISHED |sort |tail -3 |head -1'
out = subprocess.check_output(cmd, shell=True)
s = out.split()
application_name = s[0]
else:
app_url = m.group(1)
splitted_app_url = str(app_url).split('/')
for item in splitted_app_url:
if "application" in item:
application_name = item
break
return application_name
def service_check(self, env):
import params
env.set_params(params)
path_to_distributed_shell_jar = "/usr/lib/hadoop-yarn/hadoop-yarn-applications-distributedshell*.jar"
yarn_distrubuted_shell_check_cmd = format("yarn org.apache.hadoop.yarn.applications.distributedshell.Client "
"-shell_command ls -num_containers {number_of_nm} -jar {path_to_distributed_shell_jar}")
if params.security_enabled:
kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
smoke_cmd = format("{kinit_cmd | } {yarn_distrubuted_shell_check_cmd}")
else:
smoke_cmd = yarn_distrubuted_shell_check_cmd
return_code, out = shell.checked_call(smoke_cmd,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
user=params.smokeuser,
| )
application_name = self.get_app_name(out)
json_response_received = False
for rm_webapp_address in params.rm_webapp_addresses_list:
info_app_url = params.scheme + "://" + rm_webapp_address + "/ws/v1/cluster/apps/" + application_name
get_app_info_cmd = "curl --negotiate -u : -ksL --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + info_app_url
return_code, stdout, _ = get_user_call_output(get_app_info_cmd,
user=params.smokeuser,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
)
try:
json_response = json.loads(stdout)
json_response_received = True
if json_response['app']['state'] != "FINISHED" or json_response['app']['finalStatus'] != "SUCCEEDED":
raise Exception("Application " + app_url + " state/status is not valid. Should be FINISHED/SUCCEEDED.")
except Exception as e:
pass
if not json_response_received:
raise Exception("Could not get json response from YARN API")
if __name__ == "__main__":
ServiceCheck().execute()
|
Storm7874/Utils | Messenger.py | Python | mit | 3,653 | 0.00438 | ## Messenger application. Basedd off PyMessenger by Tijndagamer
## Released under the MIT licence.
# You rock, man.
## Import stage.
try:
from Notify import Main
Notify = Main()
Notify.SetMode("C")
Notify.Success("Successfully imported Notify.")
except(ImportError):
print("[!] Failed to import Notify.py")
try:
from utilsv2 import Main as UtilsMain
utils = UtilsMain()
utils.SetDeviceEnvironment(0)
except:
print("[!] Failed to import utils")
exit()
import socket
import time
import sys
class MessengerClient():
def __init__(self):
self.HostIP = ''
self.Port = 5005
self.BufferSize = 1024
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.message = ""
self.RecMessage = ""
def Connect(self):
Notify.Info("Connecting to '{}' ...".format(self.HostIP))
self.sock.connect((self.HostIP, self.Port))
def GetHostIP(self):
self.HostIP = socket.gethostbyname(socket.gethostname())
Notify.Info("Host IP: {}".format(self.HostIP))
def SendMessage(self):
try:
self.sock.send(self.message)
except:
Notify.Error("Unable to send message. ")
def CheckForRecMessage(self):
try:
if self.RecMessage == "--LEFT--":
Notify.Warning("Server disconnected. Terminating Connection.")
self.sock.close()
except:
pass
Notify.Cyan()
print("({})> {}".format(self.HostIP, str(self.RecMessage)))
def MPL(self):
while True:
print("""
|------------------------------------------------------|
| {}:{} {}|
|------------------------------------------------------|
""".format(self.hour, self.minute, self.HostIP))
self.message = input("[>] ")
if self.message.upper() == "EXIT":
self.sock.close()
Notify.Error("Connection Closed.")
else:
self.SendMessage()
self.CheckForRecMessage()
class MessengerServer():
def __init__(self):
self.__HostIP = ""
self.__Port = 5005
self.__BufferSize = 1024
self.__sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__RecMessage = ''
self.__conn = 0
self.__addr = ''
def InitiateServer(self):
Notify.Info("Starting Server.")
self.__sock.bind((self.__HostIP, self.__Port))
self.__sock.listen(1)
def ConnectToServer(self):
self.__conn, self.__addr = self.__sock.accept()
print("Connection Address: {}".format(self.__addr))
def MPL(self):
##Rec
while True:
self.__RecMessage = self.__conn.recv(self.__BufferSize)
if not self.__RecMessage:
| break
try:
if self.__RecMessage == "--LEFT--":
Notify.Error("Client has left. Terminating Connection")
self.__conn.close()
break
except:
pass
def Main():
Local = MessengerClient()
Server = MessengerServer()
print("""
|---------------|
| [1] Server |
| [2] Client |
| [3] Exit |
|---------------|
| {}:{}
|------- | --------|
""")
while True:
menuchoice = int(input("[?]> "))
if menuchoice not in [1,2,3]:
Notify.Error("Please enter a valid selection.")
else:
break
if menuchoice == 1:
Notify.Info("Starting Server...")
|
tecdct2941/nxos_dashboard | repeated_timer.py | Python | apache-2.0 | 721 | 0.008322 | from threading import Timer
class RepeatedTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = inte | rval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _ru | n(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
|
OakNinja/svtplay-dl | lib/svtplay_dl/service/disney.py | Python | mit | 4,847 | 0.001444 | # ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
from __future__ import absolute_import
import json
import re
import copy
import os
from svtplay_dl.service import Service, OpenGraphThumbMixin
from svtplay_dl.utils import get_http_data, check_redirect, filenamify
from svtplay_dl.utils.urllib import urlparse
from svtplay_dl.fetcher.hls import HLS, hlsparse
from svtplay_dl.fetcher.http import HTTP
from svtplay_dl.log import log
class Disney(Service, OpenGraphThumbMixin):
supported_domains = ['disney.se', 'video.disney.se']
def get(self, options):
parse = urlparse(self.url)
if parse.hostname == "video.disney.se":
error, data = self.get_urldata()
if error:
log.error("Can't get the page")
return
if self.exclude(options):
return
match = re.search(r"Grill.burger=({.*}):", data)
if not match:
log.error("Can't find video info")
return
jsondata = json.loads(match.group(1))
for n in jsondata["stack"]:
if len(n["data"]) > 0:
for x in n["data"]:
if "flavors" in x:
for i in x["flavors"]:
if i["format"] == "mp4":
yield HTTP(copy.copy(options), i["url"], i["bitrate"])
else:
error, data = self.get_urldata()
if error:
log.error("Cant get the page")
return
match = re.search(r"uniqueId : '([^']+)'", data)
if not match:
log.error("Can't find video info")
return
uniq = match.group(1)
match = re.search("entryId : '([^']+)'", self.get_urldata()[1])
entryid = match.group(1)
match = re.search("partnerId : '([^']+)'", self.get_urldata()[1])
partnerid = match.group(1)
match = re.search("uiConfId : '([^']+)'", self.get_urldata()[1])
uiconfid = match.group(1)
match = re.search("json : ({.*}}),", self.get_urldata()[1])
jsondata = json.loads(match.group(1))
parse = urlparse(self.url)
if len(parse.fragment) > 0:
entry = parse.fragment[parse.fragment.rindex("/")+1:]
if entry in jsondata["idlist"]:
entryid = jsondata["idlist"][entry]
else:
log.error("Cant find video info")
return
if options.output_auto:
for i in jsondata["playlists"][0]["playlist"]:
if entryid in i["id"]:
title = i["longId"]
break
directory = os.path.dirname(options.output)
options.service = "disney"
title = "%s-%s" % (title, options.service)
title = filenamify(title)
if len(directory):
options.output = "%s/%s" % (directory, title)
else:
options.output = title
if self.exclude(options):
return
| url = "http | ://cdnapi.kaltura.com/html5/html5lib/v1.9.7.6/mwEmbedFrame.php?&wid=%s&uiconf_id=%s&entry_id=%s&playerId=%s&forceMobileHTML5=true&urid=1.9.7.6&callback=mwi" % \
(partnerid, uiconfid, entryid, uniq)
error, data = get_http_data(url)
if error:
log.error("Cant get video info")
return
match = re.search(r"mwi\(({.*})\);", data)
jsondata = json.loads(match.group(1))
data = jsondata["content"]
match = re.search(r"window.kalturaIframePackageData = ({.*});", data)
jsondata = json.loads(match.group(1))
ks = jsondata["enviornmentConfig"]["ks"]
if options.output_auto:
name = jsondata["entryResult"]["meta"]["name"]
directory = os.path.dirname(options.output)
options.service = "disney"
title = "%s-%s" % (name, options.service)
title = filenamify(title)
if len(directory):
options.output = "%s/%s" % (directory, title)
else:
options.output = title
url = "http://cdnapi.kaltura.com/p/%s/sp/%s00/playManifest/entryId/%s/format/applehttp/protocol/http/a.m3u8?ks=%s&referrer=aHR0cDovL3d3dy5kaXNuZXkuc2U=&" % (partnerid[1:], partnerid[1:], entryid, ks)
redirect = check_redirect(url)
streams = hlsparse(redirect)
for n in list(streams.keys()):
yield HLS(copy.copy(options), streams[n], n) |
ellonweb/merlin | Core/config.py | Python | gpl-2.0 | 1,487 | 0.004707 | # This file is part of Merlin.
# Merlin is the Copyright (C)2008,2009,2010 of Robin K. Hansen, Elliot Rosemarine, Andreas Jacobsen.
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
# This program is free software; you can red | istribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; | without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from ConfigParser import ConfigParser as configparser
def load_config(path):
# Load and parse required config file
try:
config = configparser()
config.optionxform = str
if len(config.read(path)) != 1:
raise IOError
except StandardError:
# Either couldn't read/find the file, or couldn't parse it.
print "Warning! Could not load %s" % (path,)
raise ImportError
else:
return config
Config = load_config("merlin.cfg") |
karllessard/tensorflow | tensorflow/python/keras/activations_test.py | Python | apache-2.0 | 8,461 | 0.004964 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras activation functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend
from tensorflow.python.keras import combinations
from tensorflow.python.keras.layers import advanced_activations
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import serialization
from tensorflow.python.ops import nn_ops as nn
from tensorflow.python.platform import test
def _ref_softmax(values):
m = np.max(values)
e = np.exp(values - m)
return e / np.sum(e)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class KerasActivationsTest(test.TestCase, parameterized.TestCase):
def test_serialization(self):
all_activations = [
'softmax', 'relu', 'elu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear',
'softplus', 'softsign', 'selu', 'gelu'
]
for name in all_activations:
fn = activations.get(name)
ref_fn = getattr(activations, name)
assert fn == ref_fn
config = activations.serialize(fn)
fn = activations.deserialize(config)
assert fn == ref_fn
def test_serialization_v2(self):
activation_map = {nn.softmax_v2: 'softmax'}
for fn_v2_key in activation_map:
fn_v2 = activations.get(fn_v2_key)
config = activations.serialize(fn_v2)
fn = activations.deserialize(config)
assert fn.__name__ == activation_map[fn_v2_key]
def test_serialization_with_layers(self):
activation = advanced_activations.LeakyReLU(alpha=0.1)
layer = core.Dense(3, activation=activation)
config = serialization.serialize(layer)
deserialized_layer = serialization.deserialize(
config, custom_objects={'LeakyReLU': activation})
self.assertEqual(deserialized_layer.__class__.__name__,
layer.__class__.__name__)
self.assertEqual(deserialized_layer.activation.__class__.__name__,
activation.__class__.__name__)
def test_softmax(self):
x = backend.placeholder(ndim=2)
f = backend.func | tion([x], [activations.softmax(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = _ref_softmax(test_values[0])
self.assertAllClose(result[0], expected, rtol=1e-05)
x = backend.placeholder(ndim=1)
with self.assertRaises(ValueError):
activations.softmax | (x)
def test_temporal_softmax(self):
x = backend.placeholder(shape=(2, 2, 3))
f = backend.function([x], [activations.softmax(x)])
test_values = np.random.random((2, 2, 3)) * 10
result = f([test_values])[0]
expected = _ref_softmax(test_values[0, 0])
self.assertAllClose(result[0, 0], expected, rtol=1e-05)
def test_selu(self):
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.selu(x)])
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
positive_values = np.array([[1, 2]], dtype=backend.floatx())
result = f([positive_values])[0]
self.assertAllClose(result, positive_values * scale, rtol=1e-05)
negative_values = np.array([[-1, -2]], dtype=backend.floatx())
result = f([negative_values])[0]
true_result = (np.exp(negative_values) - 1) * scale * alpha
self.assertAllClose(result, true_result)
def test_softplus(self):
def softplus(x):
return np.log(np.ones_like(x) + np.exp(x))
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.softplus(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = softplus(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_softsign(self):
def softsign(x):
return np.divide(x, np.ones_like(x) + np.absolute(x))
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.softsign(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = softsign(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_sigmoid(self):
def ref_sigmoid(x):
if x >= 0:
return 1 / (1 + np.exp(-x))
else:
z = np.exp(x)
return z / (1 + z)
sigmoid = np.vectorize(ref_sigmoid)
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.sigmoid(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = sigmoid(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_hard_sigmoid(self):
def ref_hard_sigmoid(x):
x = (x * 0.2) + 0.5
z = 0.0 if x <= 0 else (1.0 if x >= 1 else x)
return z
hard_sigmoid = np.vectorize(ref_hard_sigmoid)
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.hard_sigmoid(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = hard_sigmoid(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_relu(self):
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.relu(x)])
positive_values = np.random.random((2, 5))
result = f([positive_values])[0]
self.assertAllClose(result, positive_values, rtol=1e-05)
negative_values = np.random.uniform(-1, 0, (2, 5))
result = f([negative_values])[0]
expected = np.zeros((2, 5))
self.assertAllClose(result, expected, rtol=1e-05)
def test_gelu(self):
def gelu(x, approximate=False):
if approximate:
return 0.5 * x * (1.0 + np.tanh(
np.sqrt(2.0 / np.pi) * (x + 0.044715 * np.power(x, 3))))
else:
from scipy.stats import norm # pylint: disable=g-import-not-at-top
return x * norm.cdf(x)
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.gelu(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = gelu(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
f = backend.function([x], [activations.gelu(x, True)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = gelu(test_values, True)
self.assertAllClose(result, expected, rtol=1e-05)
def test_elu(self):
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.elu(x, 0.5)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
self.assertAllClose(result, test_values, rtol=1e-05)
negative_values = np.array([[-1, -2]], dtype=backend.floatx())
result = f([negative_values])[0]
true_result = (np.exp(negative_values) - 1) / 2
self.assertAllClose(result, true_result)
def test_tanh(self):
test_values = np.random.random((2, 5))
x = backend.placeholder(ndim=2)
exp = activations.tanh(x)
f = backend.function([x], [exp])
result = f([test_values])[0]
expected = np.tanh(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_exponential(self):
test_values = np.random.random((2, 5))
x = backend.placeholder(ndim=2)
exp = activations.exponential(x)
f = backend.function([x], [exp])
result = f([test_values])[0]
expected = np.exp(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_linear(self):
x = np.random.random((10, 5))
self.assertAllClose(x, activations.l |
bytedance/fedlearner | web_console_v2/api/test/fedlearner_webconsole/mmgr/model_test.py | Python | apache-2.0 | 6,763 | 0 | # Copyright 2021 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import unittest
from unittest.mock import MagicMock, patch
from testing.common import BaseTestCase
from fedlearner_webconsole.db import db, get_session
from fedlearner_webconsole.mmgr.models import Model
from fedlearner_webconsole.mmgr.models import ModelState
from fedlearner_webconsole.mmgr.service import ModelService
from fedlearner_webconsole.job.models import Job, JobType, JobState
from fedlearner_webconsole.utils.k8s_cache import Event, EventType, ObjectType
class ModelTest(BaseTestCase):
@patch(
'fedlearner_webconsole.mmgr.service.ModelService.get_checkpoint_path')
def setUp(self, mock_get_checkpoint_path):
super().setUp()
self.model_service = ModelService(db.session)
self.train_job = Job(name='train-job',
job_type=JobType.NN_MODEL_TRANINING,
workflow_id=1,
project_id=1)
self.eval_job = Job(name='eval-job',
job_type=JobType.NN_MODEL_EVALUATION,
workflow_id=1,
project_id=1)
mock_get_checkpoint_path.return_value = 'output'
self.model_service.create(job=self.train_job, parent_job_name=None)
model = db.session.query(Model).filter_by(
job_name=self.train_job.name).one()
self.model_service.create(job=self.eval_job,
parent_job_name=model.job_name)
db.session.add(self.train_job)
db.session.add(self.eval_job)
db.session.commit()
@patch('fedlearner_webconsole.mmgr.service.ModelService.plot_metrics')
def test_on_job_update(self, mock_plot_metrics: MagicMock):
mock_plot_metrics.return_value = 'plot metrics return'
# TODO: change get_session to db.session_scope
with get_session(db.engine) as session:
model = session.query(Model).filter_by(
job_name=self.train_job.name).one()
self.assertEqual(model.state, ModelState.COMMITTED.value)
train_job = session.query(Job).filter_by(name='train-job').one()
train_job.state = JobState.STARTED
session.commit()
# TODO: change get_session to db.session_scope
with get_session(db.engine) as session:
train_job = session.query(Job).filter_by(name='train-job').one()
train_job.state = JobState.STARTED
model = session.query(Model).filter_by(
job_name=self.train_job.name).one()
model_service = ModelService(session)
model_service.on_job_update(train_job)
self.assertEqual(model.state, ModelState.RUNNING.value)
session.commit()
# TODO: change get_session to db.session_scope
with get_session(db.engine) as session:
train_job = session.query(Job).filter_by(name='train-job').one()
train_job.state = JobState.COMPLETED
model = session.query(Model).filter_by(
job_name=self.train_job.name).one()
model_service = ModelService(session)
model_service.on_job_update(train_job)
self.assertEqual(model.state, ModelState.SUCCEEDED.value)
session.commit()
# TODO: change get_session to db.session_scope
with get_session(db.engine) as session:
train_job = session.query(Job).filter_by(name='train-job').one()
train_job.state = JobState.FAILED
model = session.query(Model).filter_by(
job_name=self.train_job.name).one()
model_service = ModelService(session)
model_service.on_job_update(train_job)
self.assertEqual(model.state, ModelState.FAILED.value)
session.commit()
def test_hook(self):
train_job = Job(id=0,
state=JobState.STARTED,
name='nn-train',
job_type=JobType.NN_MODEL_TRANINING,
workflow_id=0,
project_id=0)
db.session.add(train_job)
db.session.commit()
event = Event(flapp_name='nn-train',
event_type=EventType.ADDED,
obj_type=ObjectType.FLAPP,
obj_dict={})
self.model_service.workflow_hook(train_job)
model = Model.query.filter_by(job_name='nn-train').one()
self.assertEqual(model.state, ModelState.COMMITTED.value)
event.event_type = EventType.MODIFIED
train_job.state = JobState.STARTED
self.model_service.k8s_watcher_hook(event)
self.assertEqual(model.state, ModelState.RUNNING.value)
train_job.state = JobState.COMPLETED
self.model_service.k8s_watcher_hook(event)
self.assertEqual(model.state, ModelState.SUCCEEDED.value)
train_job.state = JobState.STARTED
self.model_service.k8s_watcher_hook(event)
self.assertEqual(model.state, ModelState.RUNNING.value)
self.assertEqual(model.version, 2)
train_job.state = JobState.STOPPED
self.model_service.k8s_watcher_hook(event)
self.assertEqual(model | .state, ModelState.PAUSED.value)
db.session.rollback()
def test_api(self):
resp = self.get_helper('/api/v2/models/1')
data = self.get_response_data(resp)
self.assertEqual(data.get('id'), 1)
| resp = self.get_helper('/api/v2/models')
model_list = self.get_response_data(resp)
self.assertEqual(len(model_list), 1)
model = Model.query.first()
model.state = ModelState.FAILED.value
db.session.add(model)
db.session.commit()
self.delete_helper('/api/v2/models/1')
resp = self.get_helper('/api/v2/models/1')
data = self.get_response_data(resp)
self.assertEqual(data.get('state'), ModelState.DROPPED.value)
def test_get_eval(self):
model = Model.query.filter_by(job_name=self.train_job.name).one()
self.assertEqual(len(model.get_eval_model()), 1)
if __name__ == '__main__':
unittest.main()
|
abulovic/pgnd-meta | meta/data/tax.py | Python | mit | 12,071 | 0.009693 | import os, sys
from collections import defaultdict
import meta
NO_ENTRY_NAME = 'no_db_entry_name'
NO_ENTRY_RANK = 'no_db_entry_rank'
ranks = { 'superkingdom' : 0,
'kingdom' : 1,
'subkingdom' : 2,
'superphylum' : 3,
'phylum' : 4,
'subphylum' : 5,
'superclass' : 6,
'class' : 7,
'subclass' : 8,
'infraclass' : 9,
'superorder' : 10,
'order' : 11,
'suborder' : 12,
'infraorder' : 13,
'parvorder' : 14,
'superfamily' : 15,
'family' : 16,
'subfamily' : 17,
'tribe' : 18,
'subtribe' : 19,
'genus' : 20,
'subgenus' : 21,
'species group' : 22,
'species subgroup' : 23,
'species' : 24,
'subspecies' : 25,
'varietas' : 26,
'forma' : 27,
'no rank' : 28 }
human = 9606
mouse = 10090
rats = 10114
rodents = 9989
primates = 9443
animalia = 33208
green_plants = 33090
eukaryota = 2759
archea = 2157
bacteria = 2
viruses = 10239
fungi = 4751
euglenozoa = 33682
alveolata = 33630
amoebozoa = 554915
fornicata = 207245
parabasalia = 5719
heterolobosea = 5752
viroids = 12884
stramenopiles = 33634
blastocladiomycota = 451459 #(ne)
chytridiomycota = 4761 #(ne)
cryptomycota = 1031332 #(da)
dikarya = 451864 #(ne)
entomophthoromycota = 1264859 #(da)
glomeromycota = 214504 #(ne)
microsporidia = 6029 #(DA)
neocallimastigomycota = 451455 #(da)
other = 28384
unclassified = 12908
artificial = 81077
class TaxTree ():
''' Loads the NCBI taxonomy tree, creates both
parent-child and child-parent relations,
enables parent-child relationship testing and
finding the least common ancestor.
'''
def __init__ (self, parent2child_fname=None, tax_nodes_fname=None):
''' Locates the ncbi taxonomy file and sets the important
taxonomy assignments (such as animalia, bacteria ecc)
:param parent2child_fname location of the ncbi taxonomy tree file
:param tax_nodes_fname location of the file containing taxid,
organism name and organism rank for each taxid in the tree.
'''
if not parent2child_fname:
parent2child_fname = os.path.join(meta.__path__[0], 'data', 'ncbi_tax_tree')
self.load(parent2child_fname)
if not tax_nodes_fname:
tax_nodes_fname = os.path.join(meta.__path__[0], 'data', 'taxid2namerank')
self.load_taxonomy_data(tax_nodes_fname)
#--------- RELEVANT TAXONOMY ASSIGNMENTS ----------#
self._h_set_relevant_taxonomy_assignments()
self._h_map_taxids_to_relevant_tax_nodes()
def load (self, parent2child_fname):
self.parent_nodes = self._h_get_tax_nodes(parent2child_fname)
self.child_nodes = self._h_populate_child_nodes()
def load_taxonomy_data(self, tax_nodes_fname):
'''
Uses data access object to find organism name and
rank of each of the tax IDs.
For each tax ID creates a node of type TaxNode
After invoking this method, there is nodes parameter
of type dict(key=tax_id:int, value=node:TaxNode)
'''
self.nodes = {}
total = len(self.parent_nodes)
current = 0
tax_nodes_file = open(tax_nodes_fname, 'r')
readline = tax_nodes_file.readline
while (True):
line = readline()
if not line: break
(taxid, org_name, rank) = line.strip().split('|')
node = TaxNode(org_name, rank)
self.nodes[int(taxid)] = node
tax_nodes_file.close()
def get_org_name(self, taxid):
if taxid not in self.nodes:
return NO_ENTRY_NAME
else:
return self.nodes[taxid].organism_name
def get_org_rank(self, taxid):
if taxid not in self.nodes:
return NO_ENTRY_RANK
else:
return self.nodes[taxid].rank
def is_child (self, child_taxid, parent_taxid):
''' Test if child_taxid is child node of parent_taxid
Node is not the child of itself
'''
# check boundary conditions
if child_taxid == parent_taxid:
return False
if parent_taxid == self.root:
return True
tmp_parent_taxid = child_taxid
while True:
if not self.parent_nodes.has_key(tmp_parent_taxid):
return False
tmp_parent_taxid = self.parent_nodes[tmp_parent_taxid]
if tmp_parent_taxid == self.root:
return False
if tmp_parent_taxid == parent_taxid:
return True
def find_lca (self, taxid_list):
''' Finds the lowest common ancestor of a list of nodes
Args:
taxid_list ([int]): List of tax_ids
Returns:
(int): tax_id of LCA
'''
# Check if all nodes exist (and sum up blast scores)
for taxid in taxid_list:
if taxid != self.root and not self.parent_nodes.has_key(taxid):
try:
raise Exception ("Key error, no element with id " + str(taxid))
except Exception, e:
pass
# Filter out invalid tax_ids - those without parents
taxid_list = filter(lambda tax_id: tax_id == self.root or self.parent_nodes.has_key(tax_id) , taxid_list)
# Check if list is empty
if len(taxid_list) == 0:
try:
raise Exception ("taxid_list is empty, cannot find LCA!")
except Exception:
sys.stderr.write("{0}\n".format(e))
return 1 # Assign to root
# each of the visited nodes remembers how many
# child nodes traversed it
self.num_visited = defaultdict(int)
current_taxids = taxid_list
num_of_nodes = len(current_taxids)
# now find the lowest common ancestor
while (True):
parent_nodes = []
| for taxid in current_taxids:
# root node must not add itself to parent list
if taxid != self.root: parent_taxid = self.parent_nodes[taxid]
else: parent_taxid = None
# if parent exists, append him to parent list
# duplicates ensure that every traversal will count
if parent_taxid: | parent_nodes.append(parent_taxid)
# Check for LCA
self.num_visited[taxid] += 1
if self.num_visited[taxid] == num_of_nodes:
self.lca_root = taxid
return taxid
# refresh current nodes
current_taxids = parent_nodes
def get_relevant_taxid (self, tax_id):
return self.tax2relevantTax.get(tax_id, -1)
def get_lineage(self,tax_id):
lineage = []
while (True):
if tax_id == self.root:
break
lineage.append(tax_id)
tax_id = self.parent_nodes[tax_id]
return reversed(lineage)
def get_parent_with_rank(self, tax_id, rank):
if tax_id not in self.nodes:
return -1
parent = 0
while (True):
if tax_id == self.root:
return 0
if self.nodes[tax_id].rank == rank:
return tax_id
tax_id = self.parent_nodes[tax_id]
def _h_get_tax_nodes (self, parent2child_fname):
'''Loads the taxonomy nodes in a dictionary
mapping the child to parent node.
'''
# |
mahak/neutron | neutron/ipam/subnet_alloc.py | Python | apache-2.0 | 18,935 | 0 | # Copyright (c) 2015 Hewlett-Packard Co.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import operator
import netaddr
from neutron_lib import constants
from neutron_lib.db import api as db_api
from neutron_lib import exceptions
from oslo_db import exception as db_exc
from oslo_utils import uuidutils
from neutron._i18n import _
from neutron.db import models_v2
from neutron.ipam import driver
from neutron.ipam import exceptions as ipam_exc
from neutron.ipam import requests as ipam_req
from neutron.ipam import utils as ipam_utils
class SubnetAllocator(driver.Pool):
"""Class for handling allocation of subnet prefixes from a subnet pool.
This class leverages the pluggable IPAM interface where possible to
make merging into IPAM framework easier in future cycles.
"""
def __init__(self, subnetpool, context):
super(SubnetAllocator, self).__init__(subnetpool, context)
self._sp_helper = SubnetPoolHelper()
def _lock_subnetpool(self):
"""Lock subnetpool associated row.
This method disallows to allocate concurrently 2 subnets in the same
subnetpool, it's required to ensure non-overlapping cidrs in the same
subnetpool.
"""
with db_api.CONTEXT_READER.using(self._context):
current_hash = (
self._context.session.query(models_v2.SubnetPool.hash)
.filter_by(id=self._subnetpool['id']).scalar())
if current_hash is None:
# NOTE(cbrandily): subnetpool has been deleted
raise exceptions.SubnetPoolNotFound(
subnetpool_id=self._subnetpool['id'])
new_hash = uuidutils.generate_uuid()
# NOTE(cbrandily): the update disallows 2 concurrent subnet allocation
# to succeed: at most 1 transaction will succeed, ot | hers will be
# rolled back and be caught in neutron.db.v2.base
with db_api.CONTEXT_WRITER.using(self._context):
query = (
self._context.session.query(models_v2.SubnetPool).filter_by(
id=self._subnetpool['id'], hash=current_hash))
count = query.update({'hash': new_hash})
if not count:
raise db_exc.RetryRequest(exceptions.SubnetPoolInUse(
subnet_pool_id=self._subnetpool['id | ']))
def _get_allocated_cidrs(self):
with db_api.CONTEXT_READER.using(self._context):
query = self._context.session.query(models_v2.Subnet.cidr)
subnets = query.filter_by(subnetpool_id=self._subnetpool['id'])
return (x.cidr for x in subnets)
def _get_available_prefix_list(self):
prefixes = (x.cidr for x in self._subnetpool.prefixes)
allocations = self._get_allocated_cidrs()
prefix_set = netaddr.IPSet(iterable=prefixes)
allocation_set = netaddr.IPSet(iterable=allocations)
available_set = prefix_set.difference(allocation_set)
available_set.compact()
return sorted(available_set.iter_cidrs(),
key=operator.attrgetter('prefixlen'),
reverse=True)
def _num_quota_units_in_prefixlen(self, prefixlen, quota_unit):
return math.pow(2, quota_unit - prefixlen)
def _allocations_used_by_tenant(self, quota_unit):
subnetpool_id = self._subnetpool['id']
tenant_id = self._subnetpool['tenant_id']
with db_api.CONTEXT_READER.using(self._context):
qry = self._context.session.query(models_v2.Subnet.cidr)
allocations = qry.filter_by(subnetpool_id=subnetpool_id,
tenant_id=tenant_id)
value = 0
for allocation in allocations:
prefixlen = netaddr.IPNetwork(allocation.cidr).prefixlen
value += self._num_quota_units_in_prefixlen(prefixlen,
quota_unit)
return value
def _check_subnetpool_tenant_quota(self, tenant_id, prefixlen):
quota_unit = self._sp_helper.ip_version_subnetpool_quota_unit(
self._subnetpool['ip_version'])
quota = self._subnetpool.get('default_quota')
if quota:
used = self._allocations_used_by_tenant(quota_unit)
requested_units = self._num_quota_units_in_prefixlen(prefixlen,
quota_unit)
if used + requested_units > quota:
raise exceptions.SubnetPoolQuotaExceeded()
def _allocate_any_subnet(self, request):
with db_api.CONTEXT_WRITER.using(self._context):
self._lock_subnetpool()
self._check_subnetpool_tenant_quota(request.tenant_id,
request.prefixlen)
prefix_pool = self._get_available_prefix_list()
for prefix in prefix_pool:
if request.prefixlen >= prefix.prefixlen:
subnet = next(prefix.subnet(request.prefixlen))
gateway_ip = request.gateway_ip
if not gateway_ip:
gateway_ip = subnet.network + 1
pools = ipam_utils.generate_pools(subnet.cidr,
gateway_ip)
return IpamSubnet(request.tenant_id,
request.subnet_id,
subnet.cidr,
gateway_ip=gateway_ip,
allocation_pools=pools)
msg = _("Insufficient prefix space to allocate subnet size /%s")
raise exceptions.SubnetAllocationError(
reason=msg % str(request.prefixlen))
def _allocate_specific_subnet(self, request):
with db_api.CONTEXT_WRITER.using(self._context):
self._lock_subnetpool()
self._check_subnetpool_tenant_quota(request.tenant_id,
request.prefixlen)
cidr = request.subnet_cidr
available = self._get_available_prefix_list()
matched = netaddr.all_matching_cidrs(cidr, available)
if len(matched) == 1 and matched[0].prefixlen <= cidr.prefixlen:
return IpamSubnet(request.tenant_id,
request.subnet_id,
cidr,
gateway_ip=request.gateway_ip,
allocation_pools=request.allocation_pools)
msg = _("Cannot allocate requested subnet from the available "
"set of prefixes")
raise exceptions.SubnetAllocationError(reason=msg)
def allocate_subnet(self, request):
max_prefixlen = int(self._subnetpool['max_prefixlen'])
min_prefixlen = int(self._subnetpool['min_prefixlen'])
if request.prefixlen > max_prefixlen:
raise exceptions.MaxPrefixSubnetAllocationError(
prefixlen=request.prefixlen,
max_prefixlen=max_prefixlen)
if request.prefixlen < min_prefixlen:
raise exceptions.MinPrefixSubnetAllocationError(
prefixlen=request.prefixlen,
min_prefixlen=min_prefixlen)
if isinstance(request, ipam_req.AnySubnetRequest):
return self._allocate_any_subnet(request)
elif isinstance(request, ipam_req.SpecificSubnetRequest):
|
ba1dr/tplgenerator | templates/django/__APPNAME__/apps/user_auth/admin.py | Python | mit | 110 | 0 | # -*- coding: utf-8 -*-
from django.contrib import | admin
from .models imp | ort User
admin.site.register(User)
|
peterhogan/python | oop_test.py | Python | mit | 2,018 | 0.026264 | import random
from urllib import urlopen
import sys
WORD_URL = "http://learncodethehardway.org/words.txt"
WORDS = []
PHRASES = {
"class %%%(%%%):":
"Make a class named %%% that is-a %%%.",
"class %%%(object):\n\tdef __init__(self, ***)" :
"class %%% has-a __init__ hat takes self and *** parameter.",
"class %%%(object):\n\tdef ***(self, @@@)":
"class %%% has-a function named *** that takes self and @@@ parameter.",
"*** = %%%()":
"Set *** to an instance of class %%%.",
"***.***(@@@)":
"From *** get the *** function, and call it with parameters self, @@@.",
"***.*** = '***'":
"From *** get the *** attribute and set it to '***'."
}
# do they want to drill phrases first
if len(sys.argv) == 2 and sys.argv[1] == "english":
PHRASE_FIRST = True
else:
PHRASE_FIRST = False
# load up the word from the website
for word in urlopen(WORD_URL).readlines():
WORDS.append(word.strip())
def convert(snippet, phrase | ):
class_names = [w.capitalize() for w in
random.sample(WORDS, snippet.count("%%%"))]
other_names = random.sample(WORDS, snippet.count("***"))
results = []
param_names = []
for i in range(0, snippet.count("@@@")):
param_count = random.randint(1,3)
param_names.append(', '.join(random.sample(WORDS, param_count)))
for sentence in snippet, phrase:
result = sentence[:]
# fake class names
for word in class_names:
result = result.replace("%%%" | , word, 1)
# fake other names
for word in other_names:
result = result.replace("***", word, 1)
# fake parameter lists
for word in param_names:
result = result.replace("@@@", word, 1)
results.append(result)
return results
# keep going until they hit CTRL-D
try:
while True:
snippets = PHRASES.keys()
random.shuffle(snippets)
for snippet in snippets:
phrase = PHRASES[snippet]
question, answer = convert(snippet, phrase)
if PHRASE_FIRST:
question, answer = answer, question
print question
raw_input("> ")
print "ANSWER: %s\n\n" % answer
except EOFError:
print "\nBye"
|
vertisfinance/docker-images | base/context/runutils.py | Python | mit | 8,413 | 0.000357 | # coding: utf-8
import os
import sys
import subprocess
import signal
import pwd
import shutil
import click
class Stopper(object):
def __init__(self):
self.stopped = False
def getvar(name, default=None, required=True):
"""
Returns the value of an environment variable.
If the variable is not present, default will be used.
If required is True, only not None values will be returned,
will raise an exception instead of returning None.
"""
ret = os.environ.get(name, default)
if required and ret is None:
raise Exception('Environment variable %s is not set' % name)
return ret
def ensure_dir(dir, owner=None, group=None, permission_str=None):
"""
Checks the existence of the giver direcoty and creates it if not present.
If `owner` is not present, root will own the newly created dir.
If `group` is not present, the newly created dir's group will be root.
"""
if not os.path.isdir(dir):
os.makedirs(dir)
if owner:
subprocess.call(['chown', owner, dir])
if group:
subprocess.call(['chgrp', group, dir])
if permission_str:
subprocess.call(['chmod', permission_str, dir])
def ensure_user(username, uid, groupname=None, gid=None, unlock=False):
"""
If `username` does not exist, we create one with uid.
"""
if not groupname:
groupname = username
if not gid:
gid = uid
try:
subprocess.call(['groupadd', '-g', str(gid), groupname])
except:
pass
try:
params = ['useradd',
'-u', str(uid),
'-g', str(gid),
'-s', '/bin/bash',
'-m', username]
if unlock:
params += ['-p', '*']
subprocess.call(params)
except:
pass
def run_cmd(args, message=None, input=None, user=None, printoutput=False):
"""
| Executes a one-off command. The message will be printed on terminal.
If input is given, it will be passed to the subprocess.
If user is given (as id or name) the process will run as the given user.
"""
if message:
| click.echo(message + ' ... ')
_setuser = setuser(user) if user else None
if input is None:
try:
output = subprocess.check_output(
args, stderr=subprocess.STDOUT, preexec_fn=_setuser)
except subprocess.CalledProcessError as e:
if message:
click.secho('✘', fg='red')
if printoutput:
output = e.output.decode('utf-8')
click.secho(output, fg='red')
raise
else:
if message:
click.secho('✔', fg='green')
if printoutput:
output = output.decode('utf-8')
click.secho(output, fg='green')
else:
sp = subprocess.Popen(
args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=_setuser)
out, err = sp.communicate(input)
retcode = sp.wait()
if retcode:
if message:
click.secho('✘', fg='red')
raise Exception(err)
else:
if message:
click.secho('✔', fg='green')
def run_daemon(params, stdout=None, stderr=None,
signal_to_send=signal.SIGTERM,
waitfunc=None, user=None,
semaphore=None, initfunc=None):
"""
Runs the command as the given user (or root by default) in daemon mode
and exits with it's returncode.
Connects the given stdout, stderr, sends the specified signal to exit.
The initialization of the container process will be blocked until
`waitfunc` (if given) returns. If `waitfunc` is given it must accept
an object and should return as soon as possible if object.stopped
evaluates to True.
After `waitfunc` returns, `initfunc` will run. Any container initialization
can go here (create directories, db users, etc.) but should return as
soon as object.stopped (must accept this parameter) is True.
If semaphore is provided it must be a path to a file. This file will
be created after the main process is launched. Before exit the file
will be deleted. Semafors can be used by other containers in their
`waitfunc`. The presence of semaphore does not mean the service is ready
(ex. a database can accept connections), only that the process is started.
A well designed `waitfunc` should first wait for the semaphore, then test
the service (ex. try to connect the db until it succeeds).
"""
class SubprocessWrapper(object):
def __init__(self):
self.subprocess = None
subprocess_wrapper = SubprocessWrapper()
stopper = Stopper()
def cleanup(signum, frame):
"""This will run when SIGTERM received."""
if subprocess_wrapper.subprocess:
subprocess_wrapper.subprocess.send_signal(signal_to_send)
stopper.stopped = True
signal.signal(signal.SIGTERM, cleanup)
if waitfunc:
waitfunc(stopper)
if initfunc:
initfunc(stopper)
_setuser = setuser(user) if user else None
if not stopper.stopped:
sp = subprocess.Popen(
params, stdout=stdout, stderr=stderr, preexec_fn=_setuser)
subprocess_wrapper.subprocess = sp
if semaphore:
open(semaphore, 'w').close()
waitresult = sp.wait()
else:
waitresult = 0
try:
os.remove(semaphore)
except:
pass
sys.exit(waitresult)
def setuser(user):
"""
Returns a function that sets process uid, gid according to
the given username.
If the user does not exist, it raises an error.
"""
try:
pw = getpw(user)
except KeyError:
raise Exception('No such user: %s' % user)
groups = list(set(os.getgrouplist(pw.pw_name, pw.pw_gid)))
def chuser():
os.setgroups(groups)
os.setgid(pw.pw_gid)
os.setuid(pw.pw_uid)
os.environ['HOME'] = pw.pw_dir
return chuser
def getpw(user):
"""
Returns the pwd entry for a user given by uid or username.
"""
if isinstance(user, int):
return pwd.getpwuid(user)
return pwd.getpwnam(user)
def substitute(filename, mapping):
"""
Takes a file and substitutes all occurances of {{VARIABLE}}
with values from mapping.
"""
with open(filename, 'r') as f:
content = f.read()
for k, v in mapping.items():
content = content.replace('{{%s}}' % k, v)
with open(filename, 'w') as f:
f.write(content)
def runbash(user):
subprocess.call(['bash'], preexec_fn=setuser(user))
def merge_dir(src, dst, owner=None, group=None, permission_str=None):
"""
Copy files and dirs from src to dst recursively.
"""
assert all([os.path.isdir(src), os.path.isdir(dst)])
for path, dirnames, filenames in os.walk(src):
rel = os.path.relpath(path, start=src)
pair = os.path.normpath(os.path.join(dst, rel))
for d in dirnames:
dirtocheck = os.path.join(pair, d)
ensure_dir(dirtocheck, owner, group, permission_str)
if not permission_str:
shutil.copymode(os.path.join(path, d), dirtocheck)
for f in filenames:
srcfile = os.path.join(path, f)
pairfile = os.path.join(pair, f)
copyfile(srcfile, pairfile,
owner=owner, group=group, permission_str=permission_str)
def copyfile(src, dest, owner=None, group=None, permission_str=None):
shutil.copy(src, dest)
if owner:
subprocess.call(['chown', owner, dest])
if group:
subprocess.call(['chgrp', group, dest])
if permission_str:
subprocess.call(['chmod', permission_str, dest])
def get_user_ids(default_user_name, default_user_id):
user_name = getvar('USER_NAME', required=False)
if user_name is not None:
user_id = int(getvar('USER_ID'))
else:
user_id = None
user_name = user_name or default_user_name
user_i |
guangtunbenzhu/BGT-Cosmology | DeepLearning/nnmath.py | Python | mit | 5,679 | 0.016024 | __license__ = "MIT"
__author__ = "Guangtun Ben Zhu (BGT) @ Johns Hopkins University"
__startdate__ = "2016.01.19"
__name__ = "nnmath"
__module__ = "Network"
__lastdate__ = "2016.01.19"
__version__ = "0.01"
__comments__ = "math utils for neural net work"
import numpy as np
from scipy.special import expit
# some small number epsilon
_EPS = 1E-5
# collect the activation and cost functions in dictionaries
# define activation function
#### logistic sigmoidal
sigmoi | d = expit
sigmoid_deriv = lambda x: (0.5/np.cosh(0.5*x))**2
#### to add: softmax
def softmax(w, t=1.0):
assert w.ndim==2, "the input must be in format of [n_vector, ndim_vector]."
maxes = np.amax(w, axis=1).reshape(w.shape[0],1)
e = numpy.exp((w-maxes)/1.)
dist = e/numpy.sum(e, axis=1)
return dist
def softmax_deriv(wsm): # revisit
"""
"""
assert wsm.ndim==2, "the input must be in for | mat of [n_vector, ndim_vector]."
tmp = np.eye((wsm.shape[1]).reshape(1, wsm.shape[1], wsm.shape[1]))
tmp_diag_indices = np.diag_indices(wsm.shape[1])
output = np.tile(tmp, np.r_[wsm.shape[0], np.ones(3)])
for i in np.arange(wsm.shape[0]):
output[i, tmp_diag_indices] = wsm[i, :]
output[i, ...] += np.matmul(wsm[i,:].reshape(wsm.shape[1], 1),wsm[i,:].reshape(1,wsm.shape[1],1))
return
# define cost function
#### quadratic
quadratic = lambda a, y: 0.5*np.sum(np.square(a-y), axis=1)
quadratic_deriv = lambda a, y: a-y
quadratic_delta = lambda z, a, y: (a-y)*sigmoid_deriv(z)
#### cross entropy
crossentropy = lambda a, y: -np.sum(y*np.log(a)+(1.-y)*np.log(1.-a)) #, axis=1)
crossentropy_deriv = lambda a, y: np.sum(y/a-(1.-y)/(1.-a), axis=1)
crossentropy_delta = lambda z, a, y: a-y
#### to add: log-likelihood
loglikelihood = lambda a, y: -np.sum(y*np.log(a), axis=1)
loglikelihood_deriv = lambda a, y: np.sum(a-y)
#loglikelihood_delta = lambda z, a, y:
# This is the fastest convolution I can think of with Python+Numpy
def conv2d(x, y, mode='valid'):
"""
"""
assert y.ndim == 2, "I can only do one kernel at a time."
#assert x.ndim >= 2, "I can only do one kernel at a time."
if (mode == 'full'):
newshape = (x.shape[-2]+y.shape[0]-1, x.shape[-1]+y.shape[1]-1)
if x.ndim > 2:
x_newshape = np.r_[np.asarray(x.shape[:-2]), np.asarray(newshape)]
print(x.shape, x_newshape, newshape, y.shape)
return np.fft.irfft2(np.fft.rfft2(x, x_newshape) * np.fft.rfft2(y, newshape))
elif (mode == 'same'):
newshape = (x.shape[-2], x.shape[-1])
return np.fft.irfft2(np.fft.rfft2(x) * np.fft.rfft2(y, newshape))
elif (mode == 'valid'):
newshape = (x.shape[-2], x.shape[-1])
return np.fft.irfft2(np.fft.rfft2(x) * np.fft.rfft2(y, newshape))[..., y.shape[0]-1:, y.shape[1]-1:]
else:
raise ValueError("Only 'full', 'same', and 'valid' FFT modes are supported.")
def maxpooling22_down(input_image, mask=None):
"""
down-pooling with pool size 2x2, with numpy.fmax
"""
if mask is None:
mask = np.zeros(input_image.shape, dtype=bool)
else:
assert np.array_equal(np.asarray(mask.shape),np.asarray(input_image.shape)), "image and mask must have the same shape."
nimages = np.prod(input_image.shape[:-2]) # collapse the first two arrays to speed up
newshape = np.r_[nimages, np.asarray([input_image.shape[-2]//2, 2, input_image.shape[-1]//2, 2])].astype(int)
image_view = input_image.reshape(newshape)
mask_view = mask.reshape(newshape)
tmp_mask = np.zeros(newshape[:-1], dtype=bool)
assert ~mask_view.flags['OWNDATA'], "mask_view needs to be a view."
mask_view[...,0] = image_view[...,0] > image_view[...,1]
mask_view[...,1] = ~mask_view[...,0]
# xtmp = np.fmax(image_view[...,0], image_view[...,1])
# print(image_view[mask_view].shape, newshape)
xtmp = image_view[mask_view].reshape(newshape[:-1]) # This is Ok because it's the last axis
tmp_mask[...,0,:] = xtmp[...,0,:] > xtmp[...,1,:]
tmp_mask[...,1,:] = ~tmp_mask[...,0,:]
for i in np.arange(2):
mask_view[...,i] = np.logical_and(mask_view[...,i], tmp_mask)
newshape_out = np.r_[np.asarray(input_image.shape[:-2]), np.asarray([input_image.shape[-2]//2, input_image.shape[-1]//2])]
return np.fmax(xtmp[...,0,:], xtmp[...,1,:]).reshape(newshape_out)
# return np.sum(np.sum(image_view, axis=-1), axis=-2).reshape(newshape_out)/4.
def maxpooling22_up(input_image, mask=None):
"""
up-pooling with pool size 2x2
for dz[l+1]/da[l] and detal[l]
"""
#nimages = np.prod(input_image.shape[:-2])
newshape = np.r_[np.asarray(input_image.shape[:-2]), np.asarray([input_image.shape[-2], 2, input_image.shape[-1], 2])]
output_image = np.zeros(newshape)
for i in (0,1):
for j in (0,1):
output_image[...,i,:,j] = input_image
newshape_out = np.r_[np.asarray(input_image.shape[:-2]), np.asarray([input_image.shape[-2]*2, input_image.shape[-1]*2])]
output_image_view = output_image.reshape(newshape_out)
# print(input_image.shape, newshape_out, output_image_view.shape, mask.shape)
if mask is not None:
#output_image_view[~mask] = output_image_view[~mask]*0E-2
output_image_view[~mask] = 0.
return output_image_view
activationfunc = {'sigmoid': {'function': sigmoid, 'derivative': sigmoid_deriv}}
# 'rectified': {'function': rectified, 'derivative':rectified_deriv}}
costfunc = {'quadratic': {'function': quadratic, 'derivative': quadratic_deriv, 'delta': quadratic_delta},
'crossentropy': {'function': crossentropy, 'derivative':crossentropy_deriv, 'delta': crossentropy_delta}}
|
ajfazan/tools | scripts/compute_vector_difference.py | Python | gpl-2.0 | 2,566 | 0.045207 | #!/usr/bin/env osgeo_python
from osgeo import ogr, osr
import os, sys
def openDataSource( filename ):
driver = ogr.GetDriverByName( "ESRI Shapefile" )
ds = driver.Open( filename, 0 )
if ds is None:
print "Unable to open %s", filename
sys.exit( -1 )
return ds
def printGeometryInfo( g ):
print "Geometry name:", g.GetGeometryName()
print "Geometry count:", g.GetGeometryCount()
def main( argv ):
ds1 = openDataSource( argv[0] )
ds2 = openDataSource( argv[1] )
l1 = ds1.GetLayer()
l2 | = ds2.GetLayer()
sr1 = l1.GetSpatialRef()
sr2 = l2.GetSpatialRef()
wkt1 = sr1.ExportToWkt()
wkt2 = sr2.ExportToWkt()
if wkt1 != wkt2:
print "WARNING: Spatial reference systems differ... Unable to compute result"
return 0
geom1_type = l1.GetGeomType()
geom2_type = l2.GetGeomType()
| if ( geom1_type == ogr.wkbPolygon ) and ( geom2_type == ogr.wkbPolygon ):
out_driver = ogr.GetDriverByName( "ESRI Shapefile" )
# Remove the output shapefile if it already exists
if os.path.exists( argv[2] ):
try:
out_driver.DeleteDataSource( argv[2] )
except:
print sys.stderr, "Unable to delete existing dataset" + argv[2]
sys.exit( 1 )
out_ds = out_driver.CreateDataSource( argv[2] )
sr = osr.SpatialReference()
sr.ImportFromWkt( wkt1 )
layer_name = os.path.splitext( os.path.basename( argv[2] ) )[0]
out_layer = out_ds.CreateLayer( layer_name, sr, ogr.wkbPolygon )
out_layer.CreateField( ogr.FieldDefn( "ID", ogr.OFTInteger64 ) )
r1 = range( l1.GetFeatureCount() )
r2 = range( l2.GetFeatureCount() )
fid = 0
for k1 in r1:
f1 = l1.GetNextFeature()
g1 = ogr.Geometry( ogr.wkbPolygon )
g1 = g1.Union( f1.GetGeometryRef() )
for k2 in r2:
f2 = l2.GetNextFeature()
g2 = ogr.Geometry( ogr.wkbPolygon )
g2 = g2.Union( f2.GetGeometryRef() )
g1 = g1.Difference( g2 )
if ( not g1.IsEmpty() ) and ( g1.GetGeometryType() == ogr.wkbPolygon ):
# Create the feature and set values
feature_defn = out_layer.GetLayerDefn()
feature = ogr.Feature( feature_defn )
feature.SetGeometry( g1 )
feature.SetField( "ID", fid )
out_layer.CreateFeature( feature )
feature.Destroy()
l2.ResetReading()
out_ds.Destroy()
return 0
if __name__ == "__main__":
if len( sys.argv ) != 4:
print "Usage:\n\t%s <DS1> <DS2> <OUT_DS>" % os.path.basename( sys.argv[0] )
sys.exit( 1 )
r = main( sys.argv[1:] )
sys.exit( r )
|
pozdnyakov/chromium-crosswalk | tools/telemetry/telemetry/core/platform/mac_platform_backend.py | Python | bsd-3-clause | 1,792 | 0.011161 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
try:
import resource # pylint: disable=F0401
except ImportError:
resource = None # Not available on all platforms
from telemetry.core.platform import posix_platform_backend
class MacPlatformBackend(posix_platform_backend.PosixPlatformBackend):
def StartRawDisplayFrameRateMeasurement(self):
raise NotImplementedError()
def StopRawDisplayFrameRateMeasurement(self):
raise NotImplementedError()
def GetRawDisplayFrameRateMeasurements(self):
raise NotImplementedError()
def IsThermallyThrottled(self):
raise NotImplementedError()
def HasBeenThermallyThrottled(self):
raise NotImplementedError()
def GetSystemCommitCharge(self):
vm_stat = self._RunCommand(['vm_stat'])
for stat in vm_stat.splitlines():
key, value = stat.split(':')
if key | == 'Pages active':
pages_active = int(value.strip()[:-1]) # Strip trailing '.'
return p | ages_active * resource.getpagesize() / 1024
return 0
def GetMemoryStats(self, pid):
rss_vsz = self._GetPsOutput(['rss', 'vsz'], pid)
if rss_vsz:
rss, vsz = rss_vsz[0].split()
return {'VM': 1024 * int(vsz),
'WorkingSetSize': 1024 * int(rss)}
return {}
def GetOSName(self):
return 'mac'
def GetOSVersionName(self):
os_version = os.uname()[2]
if os_version.startswith('9.'):
return 'leopard'
if os_version.startswith('10.'):
return 'snowleopard'
if os_version.startswith('11.'):
return 'lion'
if os_version.startswith('12.'):
return 'mountainlion'
#if os_version.startswith('13.'):
# return 'mavericks'
|
liris-vision/starling | tools/getos.py | Python | gpl-3.0 | 1,475 | 0.007463 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# STARLING PROJECT
#
# LIRIS - Laboratoire d'InfoRmatique en Image et Systèmes d'information
#
# Copyright: 2012 - 2015 Eric Lombardi (eric.lombardi@liris.cnrs.fr), LIRIS (liris.cnrs.fr), CNRS (www.cnrs.fr)
#
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but |
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# | PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further information, check the COPYING file distributed with this software.
#
#----------------------------------------------------------------------
#
# Get OS version informations.
#
import platform
fullOsVersion = platform.platform()
if 'x86_64-with-Ubuntu-16.04' in fullOsVersion:
shortVersion = 'u1604-64'
elif 'x86_64-with-Ubuntu-14.04' in fullOsVersion:
shortVersion = 'u1404-64'
elif 'x86_64-with-Ubuntu-12.04' in fullOsVersion:
shortVersion = 'u1204-64'
elif 'Windows-7' in fullOsVersion:
shortVersion = 'w7'
else:
shortVersion = 'unknown'
print shortVersion
|
droundy/deft | papers/histogram/figs/yaml-comparison.py | Python | gpl-2.0 | 4,167 | 0.011039 | #!/usr/bin/env python
from __future__ import division
import sys, os
import numpy as np
import readnew
from glob import glob
#import re
import yaml
import os.path
import time # Need to wait some time if file is being written
# Example: /home/jordan/sad-monte-carlo/
filename_location = sys.argv[1]
# Example: data/samc-1e4-256-cpp-reference-lndos.dat
reference = sys.argv[2]
# Used for where we save the data.: s000/periodic-ww1.50-ff0.17-N256
filebase = sys.argv[3]
# The number to divide moves by! N is added back in comparison-plot
N = int(sys.argv[4])
# Energy range
Smin = int(sys.argv[5])
Smax = int(sys.argv[6])
# Are you comparing to a yaml reference?
yamlRef = bool(sys.argv[7])
filename = sys.argv[8:]
print(('filenames are ', filename))
for f in filename:
name = '%s.yaml' % (f)
print(('trying filename ', name))
while not os.path.exists(filename_location + name):
print('I am waiting for file to be written.')
time.sleep(30)
# Read YAML file
if os.path.isfile(filename_location + name):
with open(filename_location + name, 'r') as stream:
yaml_data = yaml.load(stream)
else:
raise ValueError("%s isn't a file!" % (filename_location + name))
#print(data_loaded)
data = yam | l_data
data['bins']['histogram'] = np.array(data['bins']['histogram'])
data['bins']['lnw'] = np.array(data['bins']['lnw'])
data['movies']['energy']
minyaml = data['movies']['energy'].index(-Smax)
maxyaml = data['movies']['energy'].index(-Smin)
#print(data['bins']['lnw'])
moves = data['moves']
data | ['movies']['entropy'] = np.array(data['movies']['entropy'])
lndos = data['movies']['entropy']
N_save_times = len(data['movies']['entropy'])
ref = reference
if ref[:len('data/')] != 'data/':
ref = 'data/' + ref
maxref = Smax #int(readnew.max_entropy_state(ref))
minref = Smin # int(readnew.min_important_energy(ref))
n_energies = int(minref - maxref+1)
#print maxref, minref
try:
eref, lndosref, Nrt_ref = readnew.e_lndos_ps(ref)
except:
eref, lndosref = readnew.e_lndos(ref)
errorinentropy = np.zeros(N_save_times)
maxerror = np.zeros(N_save_times)
for i in range(0, N_save_times):
# below just set average S equal between lndos and lndosref
if yamlRef:
# if using yaml as a reference the range is from 0 to len while for C++ the range is
# from maxref to minref + 1
norm_factor = np.mean(lndos[i][maxyaml:minyaml+1]) - np.mean(lndosref[0:(minyaml+1-maxyaml)])
doserror = lndos[i][maxyaml:minyaml+1][::-1] - lndosref[0:(minyaml+1-maxyaml)] - norm_factor
else:
norm_factor = np.mean(lndos[i][maxyaml:minyaml+1]) - np.mean(lndosref[maxref:minref+1])
doserror = lndos[i][maxyaml:minyaml+1][::-1] - lndosref[maxref:minref+1] - norm_factor
errorinentropy[i] = np.sum(abs(doserror))/len(doserror)
maxerror[i] = np.amax(doserror) - np.amin(doserror)
# remove N from moves in yaml file because N is added back in the
# comparison-plot script
moves = list(map(int, data['movies']['time']))
moves = [x / N for x in moves]
errorinentropy = errorinentropy[:len(moves)]
maxerror = maxerror[:len(moves)]
dirname = 'data/comparison/%s-%s' % (filebase, name.replace('.yaml', ''))
print('saving to', dirname)
try:
os.mkdir(dirname)
except OSError:
pass
else:
print(("Successfully created the directory %s " % dirname))
np.savetxt('%s/errors.txt' %(dirname),
np.c_[moves, errorinentropy, maxerror],
fmt = ('%.4g'),
delimiter = '\t',
header = 'iterations\t errorinentropy\t maxerror\t(generated with python %s' % ' '.join(sys.argv))
# The following is intended for testing whether there is a
# systematic error in any of our codes.
#np.savetxt('%s/error-vs-energy.txt' %(dirname),
#np.c_[eref, doserror],
#fmt = ('%.4g'),
#delimiter = '\t', header = 'E\t Serror')
|
andyaguiar/tornado | tornado/concurrent.py | Python | apache-2.0 | 18,212 | 0.000384 | #!/usr/bin/env python
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities for working with threads and ``Futures``.
``Futures`` are a pattern for concurrent programming introduced in
Python 3.2 in the `concurrent.futures` package. This package defines
a mostly-compatible `Future` class designed for use from coroutines,
as well as some utility functions for interacting with the
`concurrent.futures` package.
"""
from __future__ import absolute_import, division, print_function, with_statement
import functools
import platform
import textwrap
import traceback
import sys
from tornado.log import app_log
from tornado.stack_context import ExceptionStackContext, wrap
from tornado.util import raise_exc_info, ArgReplacer
try:
from concurrent import futures
except ImportError:
futures = None
# Can the garbage collector handle cycles that include __del__ methods?
# This is true in cpython beginning with version 3.4 (PEP 442). |
_GC_CYCLE_FINALIZERS = (platform.python_implementation() == 'CPython' and
sys.version_info >= (3, 4))
class ReturnValueIgnoredError(Exception):
pass
# This class and associated code in the future object is derived
# from the Trollius project, a backport of asyncio to Python 2.x - 3.x
class _TracebackLogge | r(object):
"""Helper to log a traceback upon destruction if not cleared.
This solves a nasty problem with Futures and Tasks that have an
exception set: if nobody asks for the exception, the exception is
never logged. This violates the Zen of Python: 'Errors should
never pass silently. Unless explicitly silenced.'
However, we don't want to log the exception as soon as
set_exception() is called: if the calling code is written
properly, it will get the exception and handle it properly. But
we *do* want to log it if result() or exception() was never called
-- otherwise developers waste a lot of time wondering why their
buggy code fails silently.
An earlier attempt added a __del__() method to the Future class
itself, but this backfired because the presence of __del__()
prevents garbage collection from breaking cycles. A way out of
this catch-22 is to avoid having a __del__() method on the Future
class itself, but instead to have a reference to a helper object
with a __del__() method that logs the traceback, where we ensure
that the helper object doesn't participate in cycles, and only the
Future has a reference to it.
The helper object is added when set_exception() is called. When
the Future is collected, and the helper is present, the helper
object is also collected, and its __del__() method will log the
traceback. When the Future's result() or exception() method is
called (and a helper object is present), it removes the the helper
object, after calling its clear() method to prevent it from
logging.
One downside is that we do a fair amount of work to extract the
traceback from the exception, even when it is never logged. It
would seem cheaper to just store the exception object, but that
references the traceback, which references stack frames, which may
reference the Future, which references the _TracebackLogger, and
then the _TracebackLogger would be included in a cycle, which is
what we're trying to avoid! As an optimization, we don't
immediately format the exception; we only do the work when
activate() is called, which call is delayed until after all the
Future's callbacks have run. Since usually a Future has at least
one callback (typically set by 'yield From') and usually that
callback extracts the callback, thereby removing the need to
format the exception.
PS. I don't claim credit for this solution. I first heard of it
in a discussion about closing files when they are collected.
"""
__slots__ = ('exc_info', 'formatted_tb')
def __init__(self, exc_info):
self.exc_info = exc_info
self.formatted_tb = None
def activate(self):
exc_info = self.exc_info
if exc_info is not None:
self.exc_info = None
self.formatted_tb = traceback.format_exception(*exc_info)
def clear(self):
self.exc_info = None
self.formatted_tb = None
def __del__(self):
if self.formatted_tb:
app_log.error('Future exception was never retrieved: %s',
''.join(self.formatted_tb).rstrip())
class Future(object):
"""Placeholder for an asynchronous result.
A ``Future`` encapsulates the result of an asynchronous
operation. In synchronous applications ``Futures`` are used
to wait for the result from a thread or process pool; in
Tornado they are normally used with `.IOLoop.add_future` or by
yielding them in a `.gen.coroutine`.
`tornado.concurrent.Future` is similar to
`concurrent.futures.Future`, but not thread-safe (and therefore
faster for use with single-threaded event loops).
In addition to ``exception`` and ``set_exception``, methods ``exc_info``
and ``set_exc_info`` are supported to capture tracebacks in Python 2.
The traceback is automatically available in Python 3, but in the
Python 2 futures backport this information is discarded.
This functionality was previously available in a separate class
``TracebackFuture``, which is now a deprecated alias for this class.
.. versionchanged:: 4.0
`tornado.concurrent.Future` is always a thread-unsafe ``Future``
with support for the ``exc_info`` methods. Previously it would
be an alias for the thread-safe `concurrent.futures.Future`
if that package was available and fall back to the thread-unsafe
implementation if it was not.
.. versionchanged:: 4.1
If a `.Future` contains an error but that error is never observed
(by calling ``result()``, ``exception()``, or ``exc_info()``),
a stack trace will be logged when the `.Future` is garbage collected.
This normally indicates an error in the application, but in cases
where it results in undesired logging it may be necessary to
suppress the logging by ensuring that the exception is observed:
``f.add_done_callback(lambda f: f.exception())``.
"""
def __init__(self):
self._done = False
self._result = None
self._exc_info = None
self._log_traceback = False # Used for Python >= 3.4
self._tb_logger = None # Used for Python <= 3.3
self._callbacks = []
# Implement the Python 3.5 Awaitable protocol if possible
# (we can't use return and yield together until py33).
if sys.version_info >= (3, 3):
exec(textwrap.dedent("""
def __await__(self):
return (yield self)
"""))
def cancel(self):
"""Cancel the operation, if possible.
Tornado ``Futures`` do not support cancellation, so this method always
returns False.
"""
return False
def cancelled(self):
"""Returns True if the operation has been cancelled.
Tornado ``Futures`` do not support cancellation, so this method
always returns False.
"""
return False
def running(self):
"""Returns True if this operation is currently running."""
return not self._done
def done(self):
"""Returns True if the future has finished running."""
return self._done
def _clear_tb_log(self):
self._log_traceback = False
if self._tb_logger is not None:
s |
tensorflow/transform | tensorflow_transform/graph_tools.py | Python | apache-2.0 | 39,030 | 0.005867 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for analyzing a TensorFlow graph.
This module exports the function determine_ready_tensors_and_table_initializers
which analyzes a TensorFlow graph to determine which tensors and table
initializers are "ready". The concept of readiness arises as tf.Transform
works by building a single TF graph containing placeholders for the outputs
of analyzers. These placeholders are progressively replaced by constants in
a number of phases, where in each phase we run some analyzers and replace their
outputs with constants. We analyze the structure of the graph to determine
which analyzers to run in each phase.
"""
import collections
import copy
import hashlib
import itertools
from typing import Iterable, List, Mapping, Optional, Set, Union
import uuid
from absl import logging
import tensorflow as tf
from tensorflow_transform import analyzer_nodes
from tensorflow_transform import common_types
from tensorflow_transform import nodes
from tensorflow_transform import tf_utils
# TODO(https://issues.apache.org/jira/browse/SPARK-22674): Switch to
# `collections.namedtuple` or `typing.NamedTuple` once the Spark issu | e is
# resolved.
from tfx_bsl.types import tfx_namedtuple
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import composite_tensor
from tenso | rflow.python.framework import func_graph as tf_func_graph
from tensorflow.python.framework import function_def_to_graph
from tensorflow.python.util import object_identity
# pylint: enable=g-direct-tensorflow-import
_INITIALIZABLE_TABLE_OP_TYPES = [
'CuckooTable',
'CuckooTableV2',
'HashTable',
'HashTableV2',
'IndexTable',
'IndexTableV2',
]
_TABLE_INIT_OP_TYPES = [
'InitializeTable',
'InitializeTableV2',
'InitializeTableFromTextFile',
'InitializeTableFromTextFileV2',
'InitializeTableFromDataset',
'LookupTableImport',
'LookupTableImportV2',
# If a TF 2 SavedModel/Hub module with tables is loaded inside the
# pre-processing fn, a StatefulPartitionedCall is added to the
# TABLE_INITIALIZERS collection.
'StatefulPartitionedCall',
]
def _decompose_tensor_or_op(tensor_or_op):
"""Yields the raw components of a `tf.CompositeTensor`.
If tensor_or_op is a `tf.Operation`, or `tf.Tensor`, then
_decompose_tensor_or_op will act as a pass through.
Args:
tensor_or_op: `tf.Tensor`, `tf.CompositeTensor`, or `tf.Operation`.
Yields:
A tf.Tensor or tf.Operation, depending on what tensor_or_op is.
"""
if isinstance(tensor_or_op, composite_tensor.CompositeTensor):
for component in tf.nest.flatten(tensor_or_op, expand_composites=True):
yield component
else:
yield tensor_or_op
def retrieve_sources(sinks, ignore_control_dependencies=False):
"""Captures subgraph between sources and sinks.
Walk a Graph backwards from `sinks` and return any sources encountered in the
subgraph. This util is refactored from `_map_subgraph` in
tensorflow/.../ops/op_selector.py.
Arguments:
sinks: An iterable of Operations where the subgraph terminates.
ignore_control_dependencies: (Optional) If `True`, ignore any
`control_inputs` for all ops while walking the graph.
Returns:
The set of placeholders upon which `sinks` depend. This could also contain
placeholders representing `captures` in the graph.
"""
stop_at_tensors = object_identity.ObjectIdentitySet()
ops_to_visit = object_identity.ObjectIdentitySet(sinks)
visited_ops = object_identity.ObjectIdentitySet()
potential_extra_sources = object_identity.ObjectIdentitySet()
while ops_to_visit:
op = ops_to_visit.pop()
visited_ops.add(op)
if op.type == 'Placeholder':
potential_extra_sources.update(op.outputs)
input_ops = [t.op for t in op.inputs if t not in stop_at_tensors]
if not ignore_control_dependencies:
input_ops = itertools.chain(input_ops, op.control_inputs)
for input_op in input_ops:
if input_op not in visited_ops:
ops_to_visit.add(input_op)
return potential_extra_sources
def get_func_graph_for_name(graph, func_name):
"""Returns the FuncGraph associated to the given func_name if possible."""
outer_graph = graph
while graph is not None:
func = graph._get_function(str(func_name)) # pylint: disable=protected-access
if func is not None:
if hasattr(func, 'graph'):
return func.graph
# `outer_graph` may not be the same as `ops.get_default_graph()` e.g.
# in the case of nested if ops or when the gradient is being computed
# from inside a Defun. We build the `func_graph` with `outer_graph` as its
# outer graph.
with outer_graph.as_default():
# This is a _DefinedFunction.
func_graph = (
function_def_to_graph.function_def_to_graph(func.definition))
if func_graph is not None:
return func_graph
if hasattr(graph, 'outer_graph'):
graph = graph.outer_graph
else:
raise ValueError(
'Function {} does not exist in the graph.'.format(func_name))
class _UnexpectedPlaceholderError(Exception):
def __init__(self, op, func_graph_name):
tensor = op.outputs[0]
msg = 'An unexpected placeholder was encountered ({})'.format(tensor)
super().__init__(msg)
self.tensor = tensor
self.func_graph_name = func_graph_name
class _UnexpectedTableError(Exception):
def __init__(self, op, func_graph_name):
msg = 'An unexpected initializable table was encountered ({})'.format(op)
super().__init__(msg)
self.op = op
self.func_graph_name = func_graph_name
def _reraise_unexpected_error(func):
"""A decorator that reraises certain exceptions with modified msg and type."""
def wrapper(self, tensor_or_op):
"""Wrapper when calling func to re-raise exceptions."""
try:
return func(self, tensor_or_op)
except _UnexpectedPlaceholderError as e:
if e.func_graph_name:
raise ValueError(
'The tensor_or_op {} depended on a placeholder ({}) that is part '
'of a tf.function graph ({}), this is not supported. This may be a '
'result of calling a tf.Transform analyzer in a tf.function'
''.format(tensor_or_op, e.tensor, e.func_graph_name))
else:
raise ValueError(
'The tensor_or_op {} depended on a placeholder ({}) that was not '
'in the input_signature. This may have be caused by manually '
'adding a placeholder to the graph'.format(tensor_or_op, e.tensor))
except _UnexpectedTableError as e:
if e.func_graph_name:
raise ValueError(
'The tensor_or_op {} depended on an initializable table ({}) that '
'is part of a tf.function graph ({}), this is not supported. This'
' may be a result of initializing a table in a tf.function'
''.format(tensor_or_op, e.op, e.func_graph_name))
else:
raise ValueError(
'The tensor_or_op {} depended on an initializable table ({}) that '
'was not tracked by the graph analysis. This may be caused by '
'adding an initializable table without adding its initializer to '
'the collection tf.GraphKeys.TABLE_INITIALIZERS'.format(
tensor_or_op, e.op))
return wrapper
_AnalysisResult = tfx_namedtuple.namedtuple(
'_AnalysisResult', ['is_ready_to_run', 'path', 'dependent_sources'])
_SourceInfo = tfx_namedtuple.namedtuple('_SourceInfo',
['is_ready_to_run', 'name'])
class _GraphAnalyzer:
"""Class that analyze |
meliora000/eb_django_app | django_eb/comment/migrations/0002_auto_20151231_1023.py | Python | mit | 930 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-31 01:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migr | ations.Mig | ration):
dependencies = [
('comment', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='comment',
field=models.CharField(max_length=120, null=True),
),
migrations.AlterField(
model_name='comment',
name='mood',
field=models.NullBooleanField(default=False),
),
migrations.AlterField(
model_name='comment',
name='price',
field=models.NullBooleanField(default=False),
),
migrations.AlterField(
model_name='comment',
name='taste',
field=models.NullBooleanField(default=False),
),
]
|
dobestan/fastblog | fastblog/communications/models/sms.py | Python | mit | 442 | 0.004525 | from django.db import models
from .base import MessageAbstractModel
class SMS(MessageAbstractModel):
cmid = models.TextField()
class Meta:
verbose_name = 'SMS'
verbose_name_plural = verbose_name
def send_message(self, async=True):
from communications.tasks.sms import SendSMSTask
task = SendSMSTask()
| if asyn | c:
return task.delay(self.id)
return task.run(self.id)
|
hirochachacha/apython | bpython/interpreter.py | Python | mit | 12,791 | 0.00172 | #!/usr/bin/env python
#coding: utf-8
# The MIT License
#
# Copyright (c) 2009-2011 the bpython authors.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import os
import code
import inspect
import traceback
import pydoc
import keyword
from pygments.token import Token
from bpython.completion import inspection
from bpython.completion.completers import import_completer
from bpython.util import getpreferredencoding, safe_eval, TimeOutException, debug, isolate
from bpython.str_util import get_closure_words
from bpython._py3compat import PythonLexer, PY3
from six import callable
class NothingType: pass
Nothing = NothingType()
command_tokenize = lambda s: get_closure_words(s.strip(' '))
class BPythonInterpreter(code.InteractiveInterpreter):
def __init__(self, locals=None, encoding=None):
"""The syntaxerror callback can be set at any time and will be called
on a caught syntax error. The purpose for this in bpython is so that
the repl can be instantiated after the interpreter (which it
necessarily must be with the current factoring) and then an exception
callback can be added to the Interpeter instance afterwards - more
specifically, this is so that autoindentation does not occur after a
traceback."""
self.command_table = {}
self.encoding = encoding or sys.getdefaultencoding()
self.syntaxerror_callback = None
# Unfortunately code.InteractiveInterpreter is a classic class, so no super()
code.InteractiveInterpreter.__init__(self, locals)
self.locals['__command_table'] = self.command_table
if not PY3:
def runsource(self, source, filename='<input>', symbol='single',
encode=True):
if encode:
source = '# coding: %s\n%s' % (self.encoding,
source.encode(self.encoding))
return code.InteractiveInterpreter.runsource(self, source,
filename, symbol)
def showsyntaxerror(self, filename=None):
"""Override the regular handler, the code's copied and pasted from
code.py, as per showtraceback, but with the syntaxerror callback called
and the text in a pretty colour."""
if self.syntaxerror_callback is not None:
self.syntaxerror_callback()
type, value, sys.last_traceback = sys.exc_info()
sys.last_type = type
sys.last_value = value
if filename and type is SyntaxError:
# Work hard to stuff the correct filename in the exception
try:
msg, (dummy_filename, lineno, offset, line) = value.args
except:
# Not the format we expect; leave it alone
pass
else:
# Stuff in the right filename and right lineno
if not PY3:
lineno -= 1
value = SyntaxError(msg, (filename, lineno, offset, line))
sys.last_value = value
list = traceback.format_exception_only(type, value)
self.writetb(list)
def showtraceback(self):
"""This needs to override the default traceback thing
so it can put it into a pretty colour and maybe other
stuff, I don't know"""
try:
t, v, tb = sys.exc_info()
sys.last_type = t
sys.last_value = v
sys.last_traceback = tb
tblist = traceback.extract_tb(tb)
del tblist[:1]
# Set the right lineno (encoding header adds an extra line)
if not PY3:
for i, (filename, lineno, module, something) in enumerate(tblist):
if filename == '<input>':
tblist[i] = (filename, lineno - 1, module, something)
l = traceback.format_list(tblist)
if l:
l.insert(0, "Traceback (most recent call last):\n")
l[len(l):] = traceback.format_exception_only(t, v)
finally:
tblist = tb = None
self.writetb(l)
def writetb(self, lines):
"""This outputs the traceback and should be overridden for anything
fancy."""
for line in lines:
self.write(line)
def register_command(self, name, function):
if name not in self.command_table:
self.command_table[name] = function
return True
else:
return False
def is_commandline(self, line):
try:
if not PY3 and isinstance(line, unicode):
encoding = getpreferredencoding()
words = map(lambda s: s.decode(encoding), command_tokenize(line.encode(encoding)))
else:
words = command_tokenize(line)
except ValueError:
return False
else:
if len(words) > 0:
command_name = words[0]
return command_name in self.command_table
else:
return False
def get_command_spec(self, line):
try:
if not PY3 and isinstance(line, unicode):
encoding = getpreferredencoding()
words = map(lambda s: s.decode(encoding), command_tokenize(line.encode(encoding)))
else:
words = command_tokenize(line)
except ValueError:
pass
else:
if len(words) > 0:
command_name = words[0]
if command_name in self.command_table:
return [command_name, self.command_table[command_name]]
def runcommand(self, line):
try:
if not PY3 and isinstance(line, unicode):
encoding = getpreferredencoding()
words = map(lambda s: s.decode(encoding), command_tokenize(line.encode(encoding)))
else:
words = command_tokenize(line)
except ValueError:
pass
else:
if len(words) > 0:
command_name = words[0]
if command_name in self.command_table:
source = "__command_table['%s'](%s)" % (command_name, ','.join(words[1:]))
self.runsource(source)
def get_object(self, name):
try:
obj = safe_eval(name, self.locals)
except TimeOutException as e:
return e
except Exception:
return Nothing
else:
return obj
def get_raw_object(self, name):
try:
obj = eval(name, self.locals)
except Exception:
return Nothing
else:
return obj
def get_argspec(self, repl, func, arg_number):
line = repl.s
cw = repl.current_wor | d
if func:
spec = self._get_argspec(func, arg_number)
else:
spec = None
if not spec:
if keyword.iskeyword(line):
spec = inspection.KeySpec([line])
elif self.is_commandline(line) and repl.is_only_word | :
spec = self.get_command_spec(l |
obi-two/Rebelion | data/scripts/templates/object/tangible/furniture/all/shared_frn_all_potted_plants_sml_s03.py | Python | mit | 476 | 0.046218 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PL | EASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/furniture/all/shared_frn_all_potted_plants_sml_s03.iff"
result.attribute_template_id = 6
res | ult.stfName("frn_n","frn_potted_plant_sml_s03")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
AlexMili/WhatInstalled | whatinstalled.py | Python | mit | 1,456 | 0.067995 | import os
import argparse
keywords = {\
"mac":[\
"brew install",\
"brew cask install",\
"port install"\
],\
"linux":[\
"apt-get install",\
"aptitude install",\
"yum install",\
"pacman install",\
"dpkg -i",\
"dnf install",\
"zypper in",\
"make install",\
"tar "\
],\
"lua":[\
"luarocks install",\
"luarocks make"\
],\
"python":[\
"pip install",\
"easy_install",\
"conda install"\
],\
"ruby":[\
"gem install",\
"rvm install"\
],
"node":[\
"npm install",\
"bower install"\
],\
}
def whatinstalled():
parser = argparse.ArgumentParser(description='A simple tool to retrieve what you installed using CLI')
parser.add_argument('-f', '--file', dest='bash_file', type=str, help="custom file to parse", default="~/.bash_history")
parser.add_argument('-p', '--profile', dest='profile', type=str, help="specific profile to use", default=None)
args = parser.parse_args()
g | lobal keywords
history_file = os.p | ath.expanduser(args.bash_file)
f = open(history_file,"r")
if(args.profile != None and args.profile in keywords):
keywords = {args.profile:keywords[args.profile]}
elif(args.profile != None and args.profile not in keywords):
print("\n[ERROR]Profile doesn't exist\n")
exit(0)
for line in f:
for category in keywords:
for item in keywords[category]:
if item in line:
print("["+category+"]"+str(line[:-1]))
if __name__ == '__main__':
whatinstalled()
|
bundgus/python-playground | matplotlib-playground/examples/user_interfaces/fourier_demo_wx.py | Python | mit | 8,955 | 0.00067 | #!/usr/bin/env python
import numpy as np
# matplotlib requires wxPython 2.8+
# set the wxPython version in lib\site-packages\wx.pth file
# or if you have wxversion installed un-comment the lines below
#import wxversion
#wxversion.ensureMinimal('2.8')
import wx
import matplotlib
matplotlib.interactive(False)
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
from matplotlib.figure import Figure
from matplotlib.pyplot import gcf, setp
class Knob(object):
"""
Knob - simple class with a "setKnob" method.
A Knob instance is attached to a Param instance, e.g., param.attach(knob)
Base class is for documentation purposes.
"""
def setKnob(self, value):
pass
class Param(object):
"""
The idea of the "Param" class is that some parameter in the GUI may have
several knobs that both control it and reflect the parameter's state, e.g.
a slider, text, and dragging can all change the value of the frequency in
the waveform of this example.
The class allows a cleaner way to update/"feedback" to the other knobs when
one is being changed. Also, this class handles min/max constraints for all
the knobs.
Idea - knob list - in "set" method, knob object is passed as well
- the other knobs in the knob list have a "set" method which gets
called for the others.
"""
def __init__(self, initialValue=None, minimum=0., maximum=1.):
self.minimum = minimum
self.maximum = maximum
if initialValue != self.constrain(initialValue):
raise ValueError('illegal initial value')
self.value = initialValue
self.knobs = []
def attach(self, knob):
self.knobs += [knob]
def set(self, value, knob=None):
self.value = value
self.value = self.constrain(value)
for feedbackKnob in self.knobs:
if feedbackKnob != knob:
feedbackKnob.setKnob(self.value)
return self.value
def constrain(self, value):
if value <= self.minimum:
value = self.minimum
if value >= self.maximum:
value = self.maximum
return value
class SliderGroup(Knob):
def __init__(self, parent, label, param):
self.sliderLabel = wx.StaticText(parent, label=label)
self.sliderText = wx.TextCtrl(parent, -1, style=wx.TE_PROCESS_ENTER)
self.slider = wx.Slider(parent, -1)
# self.slider.SetMax(param.maximum*1000)
self.slider.SetRange(0, param.maximum * 1000)
self.setKnob(param.value)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.sliderLabel, 0,
wx.EXPAND | wx.ALIGN_CENTER | wx.ALL,
border=2)
sizer.Add(self.sliderText, 0,
wx.EXPAND | wx.ALIGN_CENTER | wx.ALL,
border=2)
sizer.Add(self.slider, 1, wx.EXPAND)
self.sizer = sizer
self.slider.Bind(wx.EVT_SLIDER, self.sliderHandler)
self.sliderText.Bind(wx.EVT_TEXT_ENTER, self.sliderTextHandler)
self.param = param
self.param.attach(self)
def sliderHandler(self, evt):
value = evt.GetInt() / 1000.
self.param.set(value)
def sliderTextHandler(self, evt):
value = float(self.sliderText.GetValue())
self.param.set(value)
def setKnob(self, value):
self.sliderText.SetValue('%g' % value)
self.slider.SetValue(value * 1000)
class FourierDemoFrame(wx.Frame):
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
self.fourierDemoWindow = FourierDemoWindow(self)
self.frequencySliderGroup = SliderGroup(
self,
label='Frequency f0:',
param=self.fourierDemoWindow.f0)
self.amplitudeSliderGroup = SliderGroup(self, label=' Amplitude a:',
param=self.fourierDemoWindow.A)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.fourierDemoWindow, 1, wx.EXPAND)
sizer.Add(self.frequencySliderGroup.sizer, 0,
wx.EXPAND | wx.ALIGN_CENTER | wx.ALL, border=5)
sizer.Add(self.amplitudeSliderGroup.sizer, 0,
w | x.EXPAND | wx.ALIGN_CENTER | wx.ALL, border=5)
| self.SetSizer(sizer)
class FourierDemoWindow(wx.Window, Knob):
def __init__(self, *args, **kwargs):
wx.Window.__init__(self, *args, **kwargs)
self.lines = []
self.figure = Figure()
self.canvas = FigureCanvasWxAgg(self, -1, self.figure)
self.canvas.callbacks.connect('button_press_event', self.mouseDown)
self.canvas.callbacks.connect('motion_notify_event', self.mouseMotion)
self.canvas.callbacks.connect('button_release_event', self.mouseUp)
self.state = ''
self.mouseInfo = (None, None, None, None)
self.f0 = Param(2., minimum=0., maximum=6.)
self.A = Param(1., minimum=0.01, maximum=2.)
self.draw()
# Not sure I like having two params attached to the same Knob,
# but that is what we have here... it works but feels kludgy -
# although maybe it's not too bad since the knob changes both params
# at the same time (both f0 and A are affected during a drag)
self.f0.attach(self)
self.A.attach(self)
self.Bind(wx.EVT_SIZE, self.sizeHandler)
self.Bind(wx.EVT_PAINT, self.OnPaint)
def OnPaint(self, event):
self.canvas.draw()
event.Skip()
def sizeHandler(self, *args, **kwargs):
self.canvas.SetSize(self.GetSize())
def mouseDown(self, evt):
if self.lines[0] in self.figure.hitlist(evt):
self.state = 'frequency'
elif self.lines[1] in self.figure.hitlist(evt):
self.state = 'time'
else:
self.state = ''
self.mouseInfo = (evt.xdata, evt.ydata,
max(self.f0.value, .1),
self.A.value)
def mouseMotion(self, evt):
if self.state == '':
return
x, y = evt.xdata, evt.ydata
if x is None: # outside the axes
return
x0, y0, f0Init, AInit = self.mouseInfo
self.A.set(AInit + (AInit * (y - y0) / y0), self)
if self.state == 'frequency':
self.f0.set(f0Init + (f0Init * (x - x0) / x0))
elif self.state == 'time':
if (x - x0) / x0 != -1.:
self.f0.set(1. / (1. / f0Init + (1. / f0Init * (x - x0) / x0)))
def mouseUp(self, evt):
self.state = ''
def draw(self):
if not hasattr(self, 'subplot1'):
self.subplot1 = self.figure.add_subplot(211)
self.subplot2 = self.figure.add_subplot(212)
x1, y1, x2, y2 = self.compute(self.f0.value, self.A.value)
color = (1., 0., 0.)
self.lines += self.subplot1.plot(x1, y1, color=color, linewidth=2)
self.lines += self.subplot2.plot(x2, y2, color=color, linewidth=2)
# Set some plot attributes
self.subplot1.set_title(
"Click and drag waveforms to change frequency and amplitude",
fontsize=12)
self.subplot1.set_ylabel("Frequency Domain Waveform X(f)", fontsize=8)
self.subplot1.set_xlabel("frequency f", fontsize=8)
self.subplot2.set_ylabel("Time Domain Waveform x(t)", fontsize=8)
self.subplot2.set_xlabel("time t", fontsize=8)
self.subplot1.set_xlim([-6, 6])
self.subplot1.set_ylim([0, 1])
self.subplot2.set_xlim([-2, 2])
self.subplot2.set_ylim([-2, 2])
self.subplot1.text(0.05, .95,
r'$X(f) = \mathcal{F}\{x(t)\}$',
verticalalignment='top',
transform=self.subplot1.transAxes)
self.subplot2.text(0.05, .95,
r'$x(t) = a \cdot \cos(2\pi f_0 t) e^{-\pi t^2}$',
verticalalignment='top',
transform=self.subplot2.transAxes)
def compute(self, f0, A):
f = np.arange(-6., 6., 0.02)
t = np.arange(-2., 2., 0.01)
x = |
stuarteberg/lazyflow | tests/testJsonConfig.py | Python | lgpl-3.0 | 7,865 | 0.012587 | ###############################################################################
# lazyflow: data flow based lazy parallel computation framework
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Lesser GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the
# GNU Lesser General Public License version 2.1 and 3 respectively.
# This information is also available on the ilastik web site at:
# http://ilastik.org/license/
###############################################################################
import os
import sys
import copy
import tempfile
import shutil
import collections
import numpy
import nose
from lazyflow.utility.jsonConfig import Namespace, JsonConfigParser, AutoEval, RoiTuple, FormattedField
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
#logger.setLevel(logging.DEBUG)
class TestJsonConfigNamespace(object):
"""
A basic test for the JsonConfigNamespace class, which always provides __dict__ as an OrderedDict.
It should also support == and != and copy.copy().
"""
def test(self):
n = Namespace()
n.a = "A"
n.b = "B"
n.c = "C"
n.d = "D"
n.e = "E"
assert isinstance(n.__dict__, collections.OrderedDict)
assert n.__dict__.keys() == ["a", "b", "c", "d", "e"]
assert n.__dict__.values() == ["A", "B", "C", "D", "E"]
assert n.a == "A"
assert n.b == "B"
assert n.c == "C"
def testCopy(self):
n = Namespace()
n.a = "A"
n.b = "B"
n.c = "C"
n2 = copy.deepcopy(n)
assert n == n2
assert id(n) != id(n2)
assert id(n.__dict__) != id(n2.__dict__)
class TestJsonConfig(object):
SubConfigSchema = \
{
"_schema_name" : "sub-schema",
"_schema_version" : 1.1,
"sub_settingA" : str,
"sub_settingB" : str
}
TestSchema = \
{
"_schema_name" : "test-schema",
"_schema_version" : 1.1,
"string_setting" : str,
"int_setting" : int,
"auto_int_setting" : AutoEval(int),
"another_auto_int_setting" : AutoEval(int),
"bool_setting" : bool,
"formatted_setting" : FormattedField( requiredFields=["user_name", "user_home_town"]),
"array_setting" : numpy.array,
"array_from_string_setting" : AutoEval(numpy.array),
"roi_setting" : RoiTuple(),
"subconfig" : JsonConfigParser(SubConfigSchema)
}
@classmethod
def setupClass(cls):
testConfig = \
"""
{
"_schema_name" : "test-schema",
"_schema_version" : 1.0,
"string_setting" : "This is a sentence.",
"int_setting" : 42,
"auto_int_setting" : "7*6",
"another_auto_int_setting" : 43,
"bool_setting" : true,
"formatted_setting" : "Greetings, {user_name} from {user_home_town}!",
"array_setting" : [1,2,3,4],
"array_from_string_setting" : "[1, 1*2, 1*3, 1*4]",
"roi_setting" : [[1,2,3,4,5], [6,7,8,9,10]],
"subconfig" : {
"_schema_name" : "sub-schema",
"_schema_version" : 1.0,
"sub_settingA" : "yes",
"sub_settingB" : "no"
}
}
"""
cls.tempDir = tempfile.mkdtemp()
cls.configpath = os.path.join(cls.tempDir, "config.json")
logger.debug("Using config file: " + cls.configpath)
with open(cls.configpath, 'w') as f:
f.write(testConfig)
@classmethod
def teardownClass(cls):
# If the user is debugging, don't delete the test files.
if logger.level > logging.DEBUG:
shutil.rmtree(cls.tempDir)
def testRead(self):
configFields = JsonConfigParser( TestJsonConfig.TestSchema ).parseConfigFile( TestJsonConfig.configpath )
assert configFields.string_setting == "This is a sentence."
assert configFields | .int_setting == 42
assert configFields.auto_int_setting == 4 | 2
assert configFields.another_auto_int_setting == 43
assert configFields.bool_setting is True
assert configFields.formatted_setting.format( user_name="Stuart", user_home_town="Washington, DC" ) == "Greetings, Stuart from Washington, DC!"
assert configFields.roi_setting == ((1,2,3,4,5), (6,7,8,9,10))
assert isinstance(configFields.array_setting, numpy.ndarray)
assert (configFields.array_setting == [1,2,3,4]).all()
assert isinstance(configFields.array_from_string_setting, numpy.ndarray)
assert (configFields.array_from_string_setting == [1,2,3,4]).all()
# Check sub-config settings
assert configFields.subconfig.sub_settingA == "yes"
assert configFields.subconfig.sub_settingB == "no"
def testWrite(self):
configFields = JsonConfigParser( TestJsonConfig.TestSchema ).parseConfigFile( TestJsonConfig.configpath )
configFields.string_setting = "This is a different sentence."
configFields.int_setting = 100
configFields.bool_setting = False
# Write it.
newConfigFilePath = TestJsonConfig.configpath + "_2"
JsonConfigParser( TestJsonConfig.TestSchema ).writeConfigFile( newConfigFilePath, configFields )
# Read it back.
newConfigFields = JsonConfigParser( TestJsonConfig.TestSchema ).parseConfigFile( newConfigFilePath )
assert newConfigFields == configFields, "Config field content was not preserved after writing/reading"
assert configFields.__dict__.items() == configFields.__dict__.items(), "Config field ORDER was not preserved after writing/reading"
@nose.tools.raises( JsonConfigParser.ParsingError )
def testExceptionIfRepeatedFields(self):
"""
This test creates a config that has an error: A field has been repeated.
We expect to see an exception from the parser telling us that we screwed up.
(See decorator above.)
"""
testConfig = \
"""
{
"_schema_name" : "test-schema",
"_schema_version" : 1.0,
"string_setting" : "First instance",
"string_setting" : "Repeated instance"
}
"""
tempDir = tempfile.mkdtemp()
configpath = os.path.join(tempDir, "config.json")
logger.debug("Using config file: " + configpath)
with open(configpath, 'w') as f:
f.write(testConfig)
try:
configFields = JsonConfigParser( TestJsonConfig.TestSchema ).parseConfigFile( configpath )
finally:
# Clean up temporary file
shutil.rmtree(tempDir)
if __name__ == "__main__":
import sys
import nose
sys.argv.append("--nocapture") # Don't steal stdout. Show it on the console as usual.
sys.argv.append("--nologcapture") # Don't set the logging level to DEBUG. Leave it alone.
ret = nose.run(defaultTest=__file__)
if not ret: sys.exit(1)
|
owatte/thecaribfos | apps/thedirectory/migrations/0004_auto_20150529_1416.py | Python | gpl-3.0 | 616 | 0.008117 | # -*- coding: utf-8 -*-
from __ | future__ import unicode_literals
from django.db import models, migrations
#import tagging_autocomplete.models
import datetime
class Migration(migrations.Migration):
dependencies = [
('thedirectory', '000 | 3_auto_20150525_1515'),
]
operations = [
migrations.AlterField(
model_name='entry',
name='tags',
#field=tagging_autocomplete.models.TagAutocompleteField(max_length=255, blank=True),
field=models.CharField(default=datetime.datetime(2015, 5, 25, 17, 23, 6, 515140), max_length=b'105'),
),
]
|
ColinIanKing/autotest | client/shared/xml_utils_unittest.py | Python | gpl-2.0 | 12,510 | 0.002878 | #!/usr/bin/python
import unittest, tempfile, os, glob, logging
try:
import autotest.common as common
except ImportError:
import common
from autotest.client.shared import xml_utils, ElementTree
class xml_test_data(unittest.TestCase):
def get_tmp_files(self, prefix, sufix):
path_string = os.path.join('/tmp', "%s*%s" % (prefix, sufix))
return glob.glob(path_string)
def setUp(self):
# Previous testing may have failed / left behind extra files
for filename in self.get_tmp_files(xml_utils.TMPPFX, xml_utils.TMPSFX):
os.unlink(filename)
for filename in self.get_tmp_files(xml_utils.TMPPFX,
xml_utils.TMPSFX + xml_utils.EXSFX):
os.unlink(filename)
# Compacted to save excess scrolling
self.TEXT_REPLACE_KEY="TEST_XML_TEXT_REPLACE"
self.XMLSTR="""<?xml version='1.0' encoding='UTF-8'?><capabilities><host>
<uuid>4d515db1-9adc-477d-8195-f817681e72e6</uuid><cpu><arch>x86_64</arch>
<model>Westmere</model><vendor>Intel</vendor><topology sockets='1'
cores='2' threads='2'/><feature name='rdtscp'/><feature name='x2apic'/>
<feature name='xtpr'/><feature name='tm2'/><feature name='est'/>
<feature name='vmx'/><feature name='ds_cpl'/><feature name='monitor'/>
<feature name='pbe'/><feature name='tm'/><feature name='ht'/><feature
name='ss'/><feature name='acpi'/><feature name='ds'/><feature
name='vme'/></cpu><migration_features><live/><uri_transports>
<uri_transport>tcp</uri_transport></uri_transports>
</migration_features><topology><cells num='1'><cell id='0'><cpus
num='4'><cpu id='0'/><cpu id='1'/><cpu id='2'/><cpu id='3'/></cpus>
</cell></cells></to | pology><secmodel><model>selinux</model><doi>0</doi>
</secmodel></host><guest><os_type>hvm</os_type><arch name='i686'>
<wordsize>32</wordsize><emulator>$TEST_XML_TEXT_REPLACE</emulator>
<machine>rhel6.2.0</machine><machine canonical='rhel6.2.0'>pc</machine>
<machine>rhel6.1.0</machine><machine>rhel6.0.0</machine><machine>
rhel5.5.0</machine><machine>rhel | 5.4.4</machine><machine>rhel5.4.0
</machine><domain type='qemu'></domain><domain type='kvm'><emulator>
/usr/libexec/qemu-kvm</emulator></domain></arch><features><cpuselection
/><deviceboot/><pae/><nonpae/><acpi default='on' toggle='yes'/><apic
default='on' toggle='no'/></features></guest></capabilities>"""
(fd, self.XMLFILE) = tempfile.mkstemp(suffix=xml_utils.TMPSFX,
prefix=xml_utils.TMPPFX)
os.write(fd, self.XMLSTR)
os.close(fd)
self.canonicalize_test_xml()
def tearDown(self):
os.unlink(self.XMLFILE)
leftovers = self.get_tmp_files(xml_utils.TMPPFX, xml_utils.TMPSFX)
if len(leftovers) > 0:
self.fail('Leftover files: %s' % str(leftovers))
def canonicalize_test_xml(self):
et = ElementTree.parse(self.XMLFILE)
et.write(self.XMLFILE, encoding="UTF-8")
f = file(self.XMLFILE)
self.XMLSTR = f.read()
f.close()
class test_ElementTree(xml_test_data):
def test_bundled_elementtree(self):
self.assertEqual(xml_utils.ElementTree.VERSION, ElementTree.VERSION)
class test_TempXMLFile(xml_test_data):
def test_prefix_sufix(self):
filename = os.path.basename(self.XMLFILE)
self.assert_(filename.startswith(xml_utils.TMPPFX))
self.assert_(filename.endswith(xml_utils.TMPSFX))
def test_test_TempXMLFile_canread(self):
tmpf = xml_utils.TempXMLFile()
tmpf.write(self.XMLSTR)
tmpf.seek(0)
stuff = tmpf.read()
self.assertEqual(stuff, self.XMLSTR)
del tmpf
def test_TempXMLFile_implicit(self):
def out_of_scope_tempxmlfile():
tmpf = xml_utils.TempXMLFile()
return tmpf.name
self.assertRaises(OSError, os.stat, out_of_scope_tempxmlfile())
def test_TempXMLFile_explicit(self):
tmpf = xml_utils.TempXMLFile()
tmpf_name = tmpf.name
# Assert this does NOT raise an exception
os.stat(tmpf_name)
del tmpf
self.assertRaises(OSError, os.stat, tmpf_name)
class test_XMLBackup(xml_test_data):
class_to_test = xml_utils.XMLBackup
def is_same_contents(self, filename, other=None):
try:
f = file(filename, "rb")
s = f.read()
except (IOError, OSError):
logging.warning("File %s does not exist" % filename)
return False
if other is None:
return s == self.XMLSTR
else:
other_f = file(other, "rb")
other_s = other_f.read()
return s == other_s
def test_backup_filename(self):
xmlbackup = self.class_to_test(self.XMLFILE)
self.assertEqual(xmlbackup.sourcefilename, self.XMLFILE)
def test_backup_file(self):
xmlbackup = self.class_to_test(self.XMLFILE)
self.assertTrue(self.is_same_contents(xmlbackup.name))
def test_rebackup_file(self):
xmlbackup = self.class_to_test(self.XMLFILE)
oops = file(xmlbackup.name, "wb")
oops.write("foobar")
oops.close()
self.assertFalse(self.is_same_contents(xmlbackup.name))
xmlbackup.backup()
self.assertTrue(self.is_same_contents(xmlbackup.name))
def test_restore_file(self):
xmlbackup = self.class_to_test(self.XMLFILE)
# nuke source
os.unlink(xmlbackup.sourcefilename)
xmlbackup.restore()
self.assertTrue(self.is_same_contents(xmlbackup.name))
def test_remove_backup_file(self):
xmlbackup = self.class_to_test(self.XMLFILE)
filename = xmlbackup.name
os.unlink(filename)
del xmlbackup
self.assertRaises(OSError, os.unlink, filename)
def test_TempXMLBackup_implicit(self):
def out_of_scope_xmlbackup():
tmpf = self.class_to_test(self.XMLFILE)
return tmpf.name
filename = out_of_scope_xmlbackup()
self.assertRaises(OSError, os.unlink, filename)
def test_TempXMLBackup_exception_exit(self):
tmpf = self.class_to_test(self.XMLFILE)
filename = tmpf.name
# simulate exception exit DOES NOT DELETE
tmpf.__exit__(Exception, "foo", "bar")
self.assertTrue(self.is_same_contents(filename + xml_utils.EXSFX))
os.unlink(filename + xml_utils.EXSFX)
def test_TempXMLBackup_unexception_exit(self):
tmpf = self.class_to_test(self.XMLFILE)
filename = tmpf.name
# simulate normal exit DOES DELETE
tmpf.__exit__(None, None, None)
self.assertRaises(OSError, os.unlink, filename)
class test_XMLTreeFile(test_XMLBackup):
class_to_test = xml_utils.XMLTreeFile
def test_sourcebackupfile_closed_file(self):
xml = self.class_to_test(self.XMLFILE)
self.assertRaises(ValueError, xml.sourcebackupfile.write, 'foobar')
def test_sourcebackupfile_closed_string(self):
xml = self.class_to_test(self.XMLSTR)
self.assertRaises(ValueError, xml.sourcebackupfile.write, 'foobar')
def test_init_str(self):
xml = self.class_to_test(self.XMLSTR)
self.assert_(xml.sourcefilename is not None)
self.assertEqual(xml.sourcebackupfile.name,
xml.sourcefilename)
def test_init_xml(self):
xml = self.class_to_test(self.XMLFILE)
self.assert_(xml.sourcefilename is not None)
self.assertEqual(xml.sourcebackupfile.name,
xml.sourcefilename)
def test_restore_from_string(self):
xmlbackup = self.class_to_test(self.XMLSTR)
os.unlink(xmlbackup.sourcefilename)
xmlbackup.restore()
self.assertTrue(self.is_same_contents(xmlbackup.sourcefilename))
def test_restore_from_file(self):
xmlbackup = self.class_to_test(self.XMLFILE)
os.unlink(xmlbackup.sourcefilename)
xmlbackup.restore()
self.assertTrue(self.is_same_content |
yukioSatoh/ysPyCommon | tests/test_ysPyCommon/__init__.py | Python | mit | 941 | 0.01169 | import unittest, tempfile, uuid, os, shutil, sys
class Test_CreateRemoveTempLocation(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(Test_CreateRemoveTempLocation, self).__init__(*args, **kwargs)
self._tmpTestFolder = None
def setUp(self):
self._tmpTestFolder = os | .path.join(tempfile.gettempdir(),
"unittest_%s_%s" % (self.__class__.__name__, str( uuid.uuid4() )[:8]))
os.mkdir(self._tmpTestFolder)
sys.path.append(self._tmpTestFolder)
def tearDown(self):
if not self._tmpTestFolder: return
if os.path.isdir(self._tmpTestFolder):
print "removing test folder: '%s'" | %self._tmpTestFolder
shutil.rmtree(self._tmpTestFolder)
self._tmpTestFolder = None
if self._tmpTestFolder in sys.path:
sys.path.remove(self._tmpTestFolder)
|
sgiavasis/nipype | nipype/interfaces/vtkbase.py | Python | bsd-3-clause | 1,997 | 0 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
vtkbase provides some helpers to use VTK through the tvtk package (mayavi)
Code using tvtk should import it through this module
"""
import os
from .. import logging
iflogger = logging.getLogger('interface')
# Check that VTK can be imported and get version
_vtk_version = None
try:
import vtk
_vtk_version = (vtk.vtkVersion.GetVTKMajorVersion(),
vtk.vtkVersion.GetVTKMinorVersion())
except ImportError:
iflogger.warning('VTK was not found')
# Ensure that tvtk is loaded with the appropriate ETS_TOOLKIT env var
old_ets = os.getenv('ETS_TOOLKIT')
os.environ['ETS_TOOLKIT'] = 'null'
_have_tvtk = False
try:
from tvtk.api import tvtk
_have_tvtk = True
except ImportError:
iflogger.warning('tvtk wasn\'t found')
tvtk = None
finally:
if old_ets is not None:
os.environ['ETS_TOO | LKIT'] = old_ets
else:
del os.environ['ETS_TOOLKIT']
def vtk_version():
""" Get VTK version """
global _vtk_version
return _vtk_version
def no_vtk():
""" Checks if VTK is installed and the python wrapper is functional """
global _vtk_version
return _vtk_version is None
def no_tvtk():
""" | Checks if tvtk was found """
global _have_tvtk
return not _have_tvtk
def vtk_old():
""" Checks if VTK uses the old-style pipeline (VTK<6.0) """
global _vtk_version
if _vtk_version is None:
raise RuntimeException('VTK is not correctly installed.')
return _vtk_version[0] < 6
def configure_input_data(obj, data):
"""
Configure the input data for vtk pipeline object obj.
Copied from latest version of mayavi
"""
if vtk_old():
obj.input = data
else:
obj.set_input_data(data)
def vtk_output(obj):
""" Configure the input data for vtk pipeline object obj."""
if vtk_old():
return obj.output
return obj.get_output()
|
LaoZhongGu/kbengine | kbe/src/lib/python/Lib/webbrowser.py | Python | lgpl-3.0 | 21,961 | 0.001229 | #! /usr/bin/env python3
"""Interfaces for launching and remotely controlling Web browsers."""
# Maintained by Georg Brandl.
import io
import os
import shlex
import sys
import stat
import subprocess
import time
__all__ = ["Error", "open", "open_new", "open_new_tab", "get", "register"]
class Error(Exception):
pass
_browsers = {} # Dictionary of available browser controllers
_tryorder = [] # Preference order of available browsers
def register(name, klass, instance=None, update_tryorder=1):
"""Register a browser connector and, optionally, connection."""
_browsers[name.lower()] = [klass, instance]
if update_tryorder > 0:
_tryorder.append(name)
elif update_tryorder < 0:
_tryorder.insert(0, name)
def get(using=None):
"""Return a browser launcher instance appropriate for the environment."""
if using is not None:
alternatives = [using]
else:
alternatives = _tryorder
for browser in alternatives:
if '%s' in browser:
# User gave us a command line, split it into name and args
browser = shlex.split(browser)
if browser[-1] == '&':
return BackgroundBrowser(browser[:-1])
else:
return GenericBrowser(browser)
else:
# User gave us a browser name or path.
try:
command = _browsers[browser.lower()]
except KeyError:
command = _synthesize(browser)
if command[1] is not None:
return command[1]
| el | if command[0] is not None:
return command[0]()
raise Error("could not locate runnable browser")
# Please note: the following definition hides a builtin function.
# It is recommended one does "import webbrowser" and uses webbrowser.open(url)
# instead of "from webbrowser import *".
def open(url, new=0, autoraise=True):
for name in _tryorder:
browser = get(name)
if browser.open(url, new, autoraise):
return True
return False
def open_new(url):
return open(url, 1)
def open_new_tab(url):
return open(url, 2)
def _synthesize(browser, update_tryorder=1):
"""Attempt to synthesize a controller base on existing controllers.
This is useful to create a controller when a user specifies a path to
an entry in the BROWSER environment variable -- we can copy a general
controller to operate using a specific installation of the desired
browser in this way.
If we can't create a controller in this way, or if there is no
executable for the requested browser, return [None, None].
"""
cmd = browser.split()[0]
if not _iscommand(cmd):
return [None, None]
name = os.path.basename(cmd)
try:
command = _browsers[name.lower()]
except KeyError:
return [None, None]
# now attempt to clone to fit the new name:
controller = command[1]
if controller and name.lower() == controller.basename:
import copy
controller = copy.copy(controller)
controller.name = browser
controller.basename = os.path.basename(browser)
register(browser, None, controller, update_tryorder)
return [None, controller]
return [None, None]
if sys.platform[:3] == "win":
def _isexecutable(cmd):
cmd = cmd.lower()
if os.path.isfile(cmd) and cmd.endswith((".exe", ".bat")):
return True
for ext in ".exe", ".bat":
if os.path.isfile(cmd + ext):
return True
return False
else:
def _isexecutable(cmd):
if os.path.isfile(cmd):
mode = os.stat(cmd)[stat.ST_MODE]
if mode & stat.S_IXUSR or mode & stat.S_IXGRP or mode & stat.S_IXOTH:
return True
return False
def _iscommand(cmd):
"""Return True if cmd is executable or can be found on the executable
search path."""
if _isexecutable(cmd):
return True
path = os.environ.get("PATH")
if not path:
return False
for d in path.split(os.pathsep):
exe = os.path.join(d, cmd)
if _isexecutable(exe):
return True
return False
# General parent classes
class BaseBrowser(object):
"""Parent class for all browsers. Do not use directly."""
args = ['%s']
def __init__(self, name=""):
self.name = name
self.basename = name
def open(self, url, new=0, autoraise=True):
raise NotImplementedError
def open_new(self, url):
return self.open(url, 1)
def open_new_tab(self, url):
return self.open(url, 2)
class GenericBrowser(BaseBrowser):
"""Class for all browsers started with a command
and without remote functionality."""
def __init__(self, name):
if isinstance(name, str):
self.name = name
self.args = ["%s"]
else:
# name should be a list with arguments
self.name = name[0]
self.args = name[1:]
self.basename = os.path.basename(self.name)
def open(self, url, new=0, autoraise=True):
cmdline = [self.name] + [arg.replace("%s", url)
for arg in self.args]
try:
if sys.platform[:3] == 'win':
p = subprocess.Popen(cmdline)
else:
p = subprocess.Popen(cmdline, close_fds=True)
return not p.wait()
except OSError:
return False
class BackgroundBrowser(GenericBrowser):
"""Class for all browsers which are to be started in the
background."""
def open(self, url, new=0, autoraise=True):
cmdline = [self.name] + [arg.replace("%s", url)
for arg in self.args]
try:
if sys.platform[:3] == 'win':
p = subprocess.Popen(cmdline)
else:
setsid = getattr(os, 'setsid', None)
if not setsid:
setsid = getattr(os, 'setpgrp', None)
p = subprocess.Popen(cmdline, close_fds=True, preexec_fn=setsid)
return (p.poll() is None)
except OSError:
return False
class UnixBrowser(BaseBrowser):
"""Parent class for all Unix browsers with remote functionality."""
raise_opts = None
background = False
redirect_stdout = True
# In remote_args, %s will be replaced with the requested URL. %action will
# be replaced depending on the value of 'new' passed to open.
# remote_action is used for new=0 (open). If newwin is not None, it is
# used for new=1 (open_new). If newtab is not None, it is used for
# new=3 (open_new_tab). After both substitutions are made, any empty
# strings in the transformed remote_args list will be removed.
remote_args = ['%action', '%s']
remote_action = None
remote_action_newwin = None
remote_action_newtab = None
def _invoke(self, args, remote, autoraise):
raise_opt = []
if remote and self.raise_opts:
# use autoraise argument only for remote invocation
autoraise = int(autoraise)
opt = self.raise_opts[autoraise]
if opt: raise_opt = [opt]
cmdline = [self.name] + raise_opt + args
if remote or self.background:
inout = io.open(os.devnull, "r+")
else:
# for TTY browsers, we need stdin/out
inout = None
p = subprocess.Popen(cmdline, close_fds=True, stdin=inout,
stdout=(self.redirect_stdout and inout or None),
stderr=inout, start_new_session=True)
if remote:
# wait five seconds. If the subprocess is not finished, the
# remote invocation has (hopefully) started a new instance.
time.sleep(1)
rc = p.poll()
if rc is None:
time.sleep(4)
rc = p.poll()
if rc is None:
return True
# if remote call failed, open() will try direct invocation
return not rc
elif se |
zero-point/hackattack | addField.py | Python | mit | 324 | 0.027778 | from osgeo import ogr
def addField(shapefile,field):
source = ogr.Open(shapefile, 1)
layer = source.GetLayer()
layer_defn = layer.GetLayerDefn()
new_field = ogr.FieldDefn(field, ogr.OFTInte | ger)
layer.CreateField(new_field)
source = None
# addField('A_Roads.shp','example') add the 'example' field to A_Roads sh | apefile
|
xavfernandez/pip | src/pip/__init__.py | Python | mit | 458 | 0 | from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_C | HECK_RUNNING:
from typing import List, Optional
__version__ = "20.0.dev0"
def main(args=None):
# type: (Optional[List[str]]) -> int
"""This is an internal API only meant for use by pip's own console scripts.
For additional details, see https://github.com/pypa/pip/issues/7498.
"""
from pip._internal.utils.entrypoints import _wrapper
return _wrap | per(args)
|
nortikin/sverchok | ui/nodeview_operators.py | Python | gpl-3.0 | 2,214 | 0.00271 | # This file is part of project Sverchok. It's copyrighted by the contributors
# recorded in the version control history of the file, available from
# its original location https://github.com/nortikin/sverchok/commit/master
#
# SPDX-License-Identifier: GPL3
# License-Filename: LICENSE
import bpy
from sverchok.utils.sv_operator_mixins import SvGenericNodeLocator
class SvNodeViewZoomBorder(bpy.types.Operator, SvGenericNodeLocator):
"""
This operator takes a tree name and a node name and scans through the open nodeviews to find
the node and select it and set active, and then executes the view_selected operator
"""
bl_idname = "node.sv_nodeview_zoom_border"
bl_label = "NodeView Zoom Border Operator"
bl_options = {'INTERNAL'}
def sv_execute(self, context, node):
for window in bpy.context.window_manager.windows:
screen = window.screen
for area in screen.areas:
if area.type == 'NODE_EDITOR':
for space in area.spaces:
if hasattr(space, "edit_tree"):
ng = space.edit_tree
if ng == self.get_tree():
# unselect all first.
for treenode in ng.nodes:
treenode.select = False
# set active, and select to get the thicker border around the node
ng.nodes.active = node
node.select = True
else:
continue
for region in area.regions:
if region.type == 'WINDOW':
override = {
'window': window,
'screen': screen,
'area': area,
'region': region
}
bpy.ops.node.view_selected(override)
break
classes = [SvNodeViewZoomBorder]
register, unregister = bpy.utils.regi | ster_classes_facto | ry(classes)
|
henriquenogueira/aedes | aedes_server/core/checks.py | Python | mit | 1,051 | 0.001903 | from django.conf import settings
from django.core.checks import Error, register
@register
def check_threshold(app_configs, **kwargs):
'''
Checks if THRESHOLD is set on settings.py
'''
errors = []
if not hasattr(settings, 'THRESHOLD'):
errors.append(Error(
'settings must have the threshold for clustering',
hint='Add THRESHOLD= | 0.0005 to your settings.py file',
obj=settings,
id='aedes.e_001'
))
return errors
@register
def check_aws_credentials(app_configs, **kwargs):
'''
Checks if THRESHOLD is set on settings.py
'''
errors = []
expected = ('AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY', 'AWS_STORAGE_BUCKET_NAME')
for exp in expected:
if not hasattr(settings, exp):
| errors.append(Error(
'settings must contain AWS credentials',
hint='Add AWS credentials to your settings.py file',
obj=settings,
id='aedes.e_002'
))
return errors
|
back1992/mezzanine-api-docker | web/mykgb/spiders/quandl_data.py | Python | mit | 2,840 | 0.002817 | # -*- coding: utf-8 -*-
import scrapy
import numpy
import quandl
from mykgb import indicator
from myapp.models import Quandlset
from mykgb.items import MykgbItem
quandl.ApiConfig.api_key = "taJyZN8QXqj2Dj8SNr6Z"
quandl.ApiConfig.api_version = '2015-04-09'
class QuandlDataSpider(scrapy.Spider):
name = "quandl_data"
allowed_domains = ["www.baidu.com"]
start_urls = ['http://www.baidu.com/']
custom_settings | = {
| 'ITEM_PIPELINES': {
# 'mykgb.pipelines.DestinyPipeline': 100
'mykgb.pipelines.MykgbPipeline': 100
},
'DEFAULT_REQUEST_HEADERS': {
'Referer': 'http://www.baidu.com'
}
}
def parse(self, response):
Quandlset.objects.update(actived=True)
qs = Quandlset.objects.filter(actived=True)
for p in qs:
symbol = p.quandlcode + "1"
if p and p.namezh:
code_str = p.namezh + ' ' + p.exchange + ' ' + p.name
else:
code_str = p.exchange + ' ' + p.name
try:
df = quandl.get(symbol)[-100:]
except:
print("error", symbol)
p.actived = False
p.save()
continue
if 'Last' in df.columns:
df = df.rename(
# columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Volume': 'volume', 'Last': 'close'})
columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Last': 'close'})
elif 'Close' in df.columns:
df = df.rename(
# columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Volume': 'volume', 'Close': 'close'})
columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Close': 'close'})
elif 'Settle' in df.columns:
df = df.rename(
# columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Volume': 'volume', 'Settle': 'close'})
columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Settle': 'close'})
else:
p.actived = False
p.save()
continue
# df[df['volume'] == 0] = numpy.nan
df = df.dropna()
if not df.empty and df.shape[0] > 50:
item = MykgbItem()
item['title'] = 'sleepless money'
item['code'] = code_str
macd = indicator.get_macd(df)
kdj = indicator.get_kdj(df)
rsi = indicator.get_rsi(df)
cci = indicator.get_cci(df)
item['macd'] = sum(macd.values())
item['kdj'] = sum(kdj.values())
item['rsi'] = sum(rsi.values())
item['cci'] = sum(cci.values())
yield item |
matthewfranglen/spark | examples/src/main/python/ml/anova_test_example.py | Python | mit | 1,926 | 0 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An example for ANOVA testing.
Run with:
bin/spark-submit examples/src/main/python/ml/anova_test_example.py
"""
from __future__ import print_function
from pyspark.sql import SparkSession
# $example on$
from pyspark.ml.linalg import Vectors
from pyspark.ml.stat import ANOVATest
# $example off$
if __name__ == "__main__":
spark = Spark | Session\
.builder\
.appName("ANOVATestExample")\
.getOrCreate()
# $example on$
data = [(3.0, Vectors.dense([1.7, 4.4, 7.6, 5.8, 9.6, 2.3])),
(2.0, Vectors.dense([8.8, 7.3, 5.7, 7.3, 2.2, 4.1])),
(1.0, Vectors.dense([1.2, 9.5, 2.5, 3.1, 8.7, 2.5])),
(2.0, Vectors.dense([3.7, 9.2, 6.1, 4.1, 7.5, 3.8])),
(4.0, Vectors.dense([8.9, 5.2, 7.8, 8.3, 5.2, 3.0])),
(4.0, Vectors.dense([7.9, 8.5, 9.2, 4.0, 9.4, 2.1]))]
d | f = spark.createDataFrame(data, ["label", "features"])
r = ANOVATest.test(df, "features", "label").head()
print("pValues: " + str(r.pValues))
print("degreesOfFreedom: " + str(r.degreesOfFreedom))
print("fValues: " + str(r.fValues))
# $example off$
spark.stop()
|
adhoc-dev/odoo-addons | project_issue_solutions_product/project_issue_solution.py | Python | agpl-3.0 | 403 | 0.024814 | #-*- coding: ut | f-8 -*-
from openerp.osv import fields, osv, orm
class project_isssue_solution(osv.osv):
""" Note """
_inherit = 'project.issue.solution'
_columns = {
'product_ids': fields.many2many('product.product', 'project_issue_solution_product_rel', 'solution_id','product_id', string='Products'),
}
# vim:expandtab:smartindent:tabstop=4 | :softtabstop=4:shiftwidth=4:
|
google/grr | grr/server/grr_response_server/flows/general/transfer_test.py | Python | apache-2.0 | 49,622 | 0.004816 | #!/usr/bin/env python
"""Test the file transfer mechanism."""
import hashlib
import io
import itertools
import os
import platform
import struct
import unittest
from unittest import mock
from absl import app
from grr_response_core.lib import constants
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.util import compatibility
from grr_response_core.lib.util import temp
from grr_response_server import data_store
from grr_response_server import file_store
from grr_response_server import flow_base
from grr_response_server.databases import db
from grr_response_server.flows.general import transfer
from grr_response_server.rdfvalues import objects as rdf_objects
from grr.test_lib import action_mocks
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
# pylint:mode=test
class ClientMock(action_mocks.ActionMock):
BUFFER_SIZE = 1024 * 1024
def __init__(self, mbr_data=None, client_id=None):
self.mbr = mbr_data
self.client_id = client_id
def ReadBuffer(self, args):
return_data = self.mbr[args.offset:args.offset + args.length]
return [
rdf_client.BufferReference(
data=return_data, offset=args.offset, length=len(return_data))
]
class GetMBRFlowTest(flow_test_lib.FlowTestsBaseclass):
"""Test the transfer mechanism."""
mbr = (b"123456789" * 1000)[:4096]
def setUp(self):
super().setUp()
self.client_id = self.SetupClient(0)
def testGetMBR(self):
"""Test that the GetMBR flow works."""
flow_id = flow_test_lib.TestFlowHelper(
transfer.GetMBR.__name__,
ClientMock(self.mbr),
creator=self.test_username,
client_id=self.client_id)
results = flow_test_lib.GetFlowResults(self.client_id, flow_id)
self.assertLen(results, 1)
self.assertEqual(results[0], self.mbr)
def _RunAndCheck(self, chunk_size, download_length):
with utils.Stubber(constants, "CLIENT_MAX_BUFFER_SIZE", chunk_size):
flow_id = flow_test_lib.TestFlowHelper(
transfer.GetMBR.__name__,
ClientMock(self.mbr),
creator=self.test_username,
client_id=self.client_id,
length=download_length)
results = flow_test_lib.GetFlowResults(self.client_id, flow_id)
self.assertLen(results, 1)
self.assertEqual(results[0], self.mbr[:download_length])
def testGetMBRChunked(self):
chunk_size = 100
download_length = 15 * chunk_size
self._RunAndCheck(chunk_size, download_length)
# Not a multiple of the chunk size.
download_length = 15 * chunk_size | + chunk_size // 2
self._RunAndCheck(chunk_size, download_length)
class CompareFDsMixin(object):
def CompareFDs(self, fd1, fd2):
# Seek the files to the end to make sure they are the same size.
fd2.seek(0, 2)
fd1.seek(0, 2)
self.assertEqual(fd2.tell(), fd1.tell())
ranges = [
# Start of file
(0, 100),
# Straddle the first chunk
(16 * 1024 - 100, 300),
# Read past end of file
| (fd2.tell() - 100, 300),
# Zero length reads
(100, 0),
]
for offset, length in ranges:
fd1.seek(offset)
data1 = fd1.read(length)
fd2.seek(offset)
data2 = fd2.read(length)
self.assertEqual(data1, data2)
class GetFileFlowTest(CompareFDsMixin, flow_test_lib.FlowTestsBaseclass):
"""Test the transfer mechanism."""
def setUp(self):
super().setUp()
self.client_id = self.SetupClient(0)
def testGetFile(self):
"""Test that the GetFile flow works."""
client_mock = action_mocks.GetFileClientMock()
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS,
path=os.path.join(self.base_path, "test_img.dd"))
flow_test_lib.TestFlowHelper(
transfer.GetFile.__name__,
client_mock,
creator=self.test_username,
client_id=self.client_id,
pathspec=pathspec)
# Fix path for Windows testing.
pathspec.path = pathspec.path.replace("\\", "/")
with open(pathspec.path, "rb") as fd2:
cp = db.ClientPath.FromPathSpec(self.client_id, pathspec)
fd_rel_db = file_store.OpenFile(cp)
self.CompareFDs(fd2, fd_rel_db)
# Only the sha256 hash of the contents should have been calculated:
# in order to put file contents into the file store.
history = data_store.REL_DB.ReadPathInfoHistory(cp.client_id, cp.path_type,
cp.components)
self.assertEqual(history[-1].hash_entry.sha256, fd_rel_db.hash_id.AsBytes())
self.assertIsNone(history[-1].hash_entry.sha1)
self.assertIsNone(history[-1].hash_entry.md5)
def testGetFilePathCorrection(self):
"""Tests that the pathspec returned is used for the aff4path."""
client_mock = action_mocks.GetFileClientMock()
# Deliberately using the wrong casing.
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS,
path=os.path.join(self.base_path, "TEST_IMG.dd"))
expected_size = os.path.getsize(os.path.join(self.base_path, "test_img.dd"))
session_id = flow_test_lib.TestFlowHelper(
transfer.GetFile.__name__,
client_mock,
creator=self.test_username,
client_id=self.client_id,
pathspec=pathspec)
results = flow_test_lib.GetFlowResults(self.client_id, session_id)
self.assertLen(results, 1)
res_pathspec = results[0].pathspec
# Fix path for Windows testing.
pathspec.path = pathspec.path.replace("\\", "/")
with open(res_pathspec.path, "rb") as fd2:
fd2.seek(0, 2)
cp = db.ClientPath.FromPathSpec(self.client_id, res_pathspec)
fd_rel_db = file_store.OpenFile(cp)
self.CompareFDs(fd2, fd_rel_db)
# Only the sha256 hash of the contents should have been calculated:
# in order to put file contents into the file store.
history = data_store.REL_DB.ReadPathInfoHistory(cp.client_id, cp.path_type,
cp.components)
self.assertEqual(history[-1].hash_entry.sha256, fd_rel_db.hash_id.AsBytes())
self.assertEqual(history[-1].hash_entry.num_bytes, expected_size)
self.assertIsNone(history[-1].hash_entry.sha1)
self.assertIsNone(history[-1].hash_entry.md5)
def testGetFileIsDirectory(self):
"""Tests that the flow raises when called on directory."""
client_mock = action_mocks.GetFileClientMock()
with temp.AutoTempDirPath() as temp_dir:
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS, path=temp_dir)
with self.assertRaises(RuntimeError):
flow_test_lib.TestFlowHelper(
transfer.GetFile.__name__,
client_mock,
creator=self.test_username,
client_id=self.client_id,
pathspec=pathspec)
def testFailsIfStatFailsAndIgnoreStatFailureFlagNotSet(self):
with temp.AutoTempFilePath() as test_path:
with open(test_path, "wb") as fd:
fd.write(b"foo")
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS,
path=test_path,
)
args = transfer.GetFileArgs(
pathspec=pathspec,
read_length=1,
)
client_mock = action_mocks.GetFileWithFailingStatClientMock()
with self.assertRaises(RuntimeError):
flow_test_lib.TestFlowHelper(
transfer.GetFile.__name__,
client_mock,
creator=self.test_username,
client_id=self.client_id,
args=args)
def testWorksIfStatFailsAndIgnoreStatFailureFlagIsSet(self):
with temp.AutoTempFilePath() as test_path:
with open(test_path, "wb") as fd:
fd.write(b"foo")
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS,
path=test_path,
)
args = transfer.GetFileArgs(
pathspec=pathspec,
read_length=1,
ignore_stat_failure=True,
)
client_mock = action_mocks.GetFileWithFailingStatClientMock()
flow_test_lib.T |
jossthomas/Enigma-Machine | components/Default_Settings.py | Python | mit | 2,231 | 0.013459 | #Sequences of actual rotors used in WWII, format is name, sequences, turnover notch(es)
rotor_sequences = {
'I': ('EKMFLGDQVZNTOWYHXUSPAIBRCJ', ('Q')),
'II': ('AJDKSIRUXBLHWTMCQGZNPYFVOE', ('E')),
'III': ('BDFHJLCPRTXVZNYEIWGAKMUSQO', ('V')),
'IV': ('ESOVPZJAYQUIRHXLNFTGKDCMWB', ('J')),
'V': ('VZBRGITYUPSDNHLXAWMJQOFECK', ('Z')),
'VI': ('JPGVOUMFYQBENHZRDKASXLICTW', ('Z', 'M')),
'VII': ('NZJHGRCXMYSWBOUFAIVLPEKQDT', ('Z', 'M')),
'VIII': ('FKQHTLXOCBJSPDZRAMEWNIUYGV', ('Z', 'M')),
'IC': ('DMTWSILRUYQNKFEJCAZBPGXOHV', ('Q')), #civilian
'IIC': ('HQZGPJTMOBLNCIFDYAW | VEUSRKX', ('Q')), #civilian
'IIIC': ('UQNTLSZFMREHDPXKIBVYGJCWOA', ('Q')), #civilian
'BETA': ('LEYJVCNIXWPBQMDRTAKZGFUHOS', None), #Position 4 Only
'GAMMA': ('FSOKANUERHMBTIYCWLQPZXVGJD', None) #Position 4 Only
}
#Simple letter substitutions before the | sequence is sent back through the rotors. Notably a letter cannot be encoded as itself here.
reflector_sequences = {
'A': 'EJMZALYXVBWFCRQUONTSPIKHGD',
'B': 'YRUHQSLDPXNGOKMIEBFZCWVJAT',
'C': 'FVPJIAOYEDRZXWGCTKUQSBNMHL',
'B Thin': 'ENKQAUYWJICOPBLMDXZVFTHRGS',
'C Thin': 'RDOBJNTKVEHMLFCWZAXGYIPSUQ',
'None': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' #Early models had no reflector
}
#Entry wheel for Enigma I
ETW = {
'STANDARD': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'NAVY': 'QWERTZUIOPASDFGHJKLYXCVBNM'
}
#Functions used to sort rotor_sequences.keys() into a logical order in frontend
def cat_sort(x):
'''Sort by categories (Civilian, Main, 1942)'''
score_x = -1
if x[-1] == 'C':
score_x = 0
elif x in ('BETA', 'GAMMA'):
score_x = 1
return score_x
def numeral_sort(x):
'''Lazy numeral sort, not worth making a proper parser for so few values'''
numerals = {
'I': 1,
'II': 2,
'III': 3,
'IV': 4,
'V': 5,
'VI': 6,
'VII': 7,
'VIII': 8,
'IX': 9,
'X': 10
}
string = ''.join([i for i in x if i in ('I','V','X')])
return(numerals.get(string, 0)) |
kennethjiang/donkey | donkeycar/parts/simulations.py | Python | mit | 2,106 | 0.015195 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 25 17:30:28 2017
@author: wroscoe
"""
import numpy as np
import random
class MovingSquareTelemetry:
"""
Generator of cordinates of a bouncing moving square for simulations.
"""
def __init__(self, max_velocity=29,
x_min = 10, x_max=150,
y_min = 10, y_max=110):
self.velocity = random.random() * max_velocity
self.x_min, self.x_max = x_min, x_max
self.y_min, self.y_max = y_min, y_max
self.x_direction = random.random() * 2 - 1
self.y_direction = random.random() * 2 - 1
self.x = random.random() * x_max
self.y = random.random() * y_max
self.tel = self.x, self.y
def run(self):
#move
self.x += self.x_direction * self.velocity
self.y += self.y_direction * self.velocity
#make square bounce off walls
if self.y < self.y_min or self.y > self.y_max:
self.y_direction *= -1
if self.x < self.x_min or self.x > self.x_max:
self.x_direction *= -1
return int(self.x), int(self.y)
def update(self):
self.tel = self.run()
def run_threaded(self):
return self.tel
class SquareBoxCamera:
"""
Fake camera that returns an image with a | square box.
This can be used to test if a learning algorithm can learn.
"""
def __init__(self, resolution=(120,160), box_size=4, color=(255, 0, 0)):
self.resolution = resolution
self.box_size = box_size
self.color = color
def run(self, x,y, box_size=None, color=None):
"""
Create an image of a square box at a given coordinates.
"""
rad | ius = int((box_size or self.box_size)/2)
color = color or self.color
frame = np.zeros(shape=self.resolution + (3,))
frame[y - radius: y + radius,
x - radius: x + radius, :] = color
return frame
|
TalosThoren/python-demo | tests/context.py | Python | mit | 138 | 0.043478 | import os
import sys
sys.path.insert( 0, os.path.abspat | h( '.. ' ) )
from example import list_compare
from example import social_anal | ysis
|
ppericard/matamog | scripts/fasta_length_filter.py | Python | agpl-3.0 | 3,594 | 0.001391 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
fasta_length_filter
Description: Filter a fasta file based on sequence length
fasta_length_filter.py -i input.fa -o output.fa -m 300
fasta_length_filter.py -i input.fa -o output.fa -M 1000
-----------------------------------------------------------------------
Author: This software is written and maintained by Pierre Pericard
(pierre.pericard@ed.univ-lille1.fr)
Created: 2016-04-12
Last Modified: 2016-04-12
Licence: GNU GPL 3.0
Copyright 2016 Pierre Pericard
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied war | ranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
def read_fasta_file_handle(fasta_file_handle):
"""
Parse a fasta file and return a generator
"""
# V | ariables initialization
header = ''
seqlines = list()
sequence_nb = 0
# Reading input file
for line in fasta_file_handle:
if line[0] == '>':
# Yield the last read header and sequence
if sequence_nb:
yield (header, ''.join(seqlines))
del seqlines[:]
# Get header
header = line[1:].rstrip()
sequence_nb += 1
else:
# Concatenate sequence
seqlines.append(line.strip())
# Yield the input file last sequence
yield (header, ''.join(seqlines))
# Close input file
fasta_file_handle.close()
def format_seq(seq, linereturn=80):
"""
Format an input sequence
"""
buff = list()
for i in range(0, len(seq), linereturn):
buff.append("{0}\n".format(seq[i:(i + linereturn)]))
return ''.join(buff).rstrip()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Filter a fasta file based on sequence length.')
parser.add_argument('-i', '--input_fasta', metavar='input',
type=argparse.FileType('r'), default='-',
help='input fasta file')
parser.add_argument('-o', '--output_fasta', metavar='output',
type=argparse.FileType('w'), default='-',
help='ouput fasta file')
parser.add_argument('-m', '--min_length', metavar='MIN',
type=int, default=0,
help='Minimum sequence length')
parser.add_argument('-M', '--max_length', metavar='MAX',
type=int, default=0,
help='Maximum sequence length')
args = parser.parse_args()
# Code is duplicated here to prevent to have to test args.max_length many times
if args.max_length:
for header, sequence in read_fasta_file_handle(args.input_fasta):
if len(sequence) >= args.min_length and len(sequence) <= args.max_length:
args.output_fasta.write(">{0}\n{1}\n".format(header, format_seq(sequence)))
else:
for header, sequence in read_fasta_file_handle(args.input_fasta):
if len(sequence) >= args.min_length:
args.output_fasta.write(">{0}\n{1}\n".format(header, format_seq(sequence)))
|
Silhm/bcf-scribble-strips | OSC-Midi/pythonPgm/main.py | Python | mit | 612 | 0.011438 | """
make everything work together!
"""
import argparse
import subproces | s
from midi2osc import MidiToOSC
from osc2midi import OscToMidi
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", default="127.0.0.1",
help="The ip of the OSC server")
parser.add_argument("--port", type=int, default=8000,
help="The port the OSC server is listening on")
args = parser.parse_args()
midiOSCProc = subprocess.Popen(["python3","midi2osc.py"]) #MidiToOSC(args.ip, args.port)
midiOSCProc.wait()
#oscMID | IProc = #OscToMidi(args.ip, args.port)
|
onfire73/pypeskg | ui/pypesvds/lib/extras/pdflib/lzw.py | Python | apache-2.0 | 2,356 | 0.025467 | #!/usr/bin/env python
import sys
stderr = sys.stderr
## LZWDecoder
##
class LZWDecoder(object):
debug = 0
def __init__(self, fp):
self.fp = fp
self.buff = 0
self.bpos = 8
self.nbits = 9
self.table = None
self.prevbuf = None
return
def readbits(self, bits):
v = 0
while 1:
# the number of remaining bits we can get from the current buffer.
r = 8-self.bpos
if bits <= r:
# |-----8-bits-----|
# |-bpos-|-bits-| |
# | |----r----|
v = (v<<bits) | ((self.buff>>(r-bits)) & ((1<<bits)-1))
self.bpos += bits
break
else:
# |-----8-bits-----|
# |-bpos-|---bits----...
# | |----r----|
v = (v<<r) | (self.buff & ((1<<r)-1))
bits -= r
x = self.fp.read(1)
if not x: raise EOFError
self.buff = ord(x)
self.bpos = 0
return v
def feed(self, code):
x = ''
if code == 256:
self.table = [ chr(c) for c in xrange(256) ] # 0-255
self.table.append(None) # 256
self.table.append(None) # 257
self.prevbuf = ''
self.nbits = 9
elif code == 257:
pass
elif not self.prevbuf:
x = self.prevbuf = self.table[code]
else:
if code < len(self.table):
x = self.table[code]
self.table.append(self.prevbuf+x[0])
else:
self.table.append(self.prevbuf+self.prevbuf[0])
x = self.table[code]
l = len(self.table)
if l | == 511:
self.nbits = 10
elif l == 1023:
self.nbits = 11
elif l == 2047:
self.nbits = 12
self.prevbuf = x
return x
def run(self):
while 1:
try:
code = self.readbits(self.nbits)
except EOFError:
break
x = self.feed(code)
yield x
if self.debug:
print >>std | err, ('nbits=%d, code=%d, output=%r, table=%r' %
(self.nbits, code, x, self.table[258:]))
return
def main(argv):
import StringIO
input = '\x80\x0b\x60\x50\x22\x0c\x0c\x85\x01'
fp = StringIO.StringIO(input)
expected = '\x2d\x2d\x2d\x2d\x2d\x41\x2d\x2d\x2d\x42'
LZWDecoder.debug = 1
output = ''.join(LZWDecoder(fp).run())
print (input, expected, output)
print output == expected
return 0
if __name__ == '__main__': sys.exit(main(sys.argv))
|
jaysonsantos/servo | components/script/dom/bindings/codegen/CodegenRust.py | Python | mpl-2.0 | 279,760 | 0.001659 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Common codegen classes.
from collections import defaultdict
from itertools import groupby
import operator
import os
import re
import string
import textwrap
import functools
from WebIDL import (
BuiltinTypes,
IDLBuiltinType,
IDLNullValue,
IDLNullableType,
IDLType,
IDLInterfaceMember,
IDLUndefinedValue,
IDLWrapperType,
)
from Configuration import (
MakeNativeName,
MemberIsUnforgeable,
getMo | duleFromObject,
getTypesFromCallback,
getTypesFromDescriptor,
getTypesFromDictionary,
iteratorNativeType
)
AUTOGENERATED_WARNING_COMMENT = \
"/* THIS FILE IS AUTOGENERATED - DO NOT EDIT */\n\n"
FINALIZE_HOOK_NAME = '_finalize'
TRACE_HOOK_NAM | E = '_trace'
CONSTRUCT_HOOK_NAME = '_constructor'
HASINSTANCE_HOOK_NAME = '_hasInstance'
RUST_KEYWORDS = {"abstract", "alignof", "as", "become", "box", "break", "const", "continue",
"else", "enum", "extern", "false", "final", "fn", "for", "if", "impl", "in",
"let", "loop", "macro", "match", "mod", "move", "mut", "offsetof", "override",
"priv", "proc", "pub", "pure", "ref", "return", "static", "self", "sizeof",
"struct", "super", "true", "trait", "type", "typeof", "unsafe", "unsized",
"use", "virtual", "where", "while", "yield"}
def replaceFileIfChanged(filename, newContents):
"""
Read a copy of the old file, so that we don't touch it if it hasn't changed.
Returns True if the file was updated, false otherwise.
"""
# XXXjdm This doesn't play well with make right now.
# Force the file to always be updated, or else changing CodegenRust.py
# will cause many autogenerated bindings to be regenerated perpetually
# until the result is actually different.
# oldFileContents = ""
# try:
# with open(filename, 'rb') as oldFile:
# oldFileContents = ''.join(oldFile.readlines())
# except:
# pass
# if newContents == oldFileContents:
# return False
with open(filename, 'wb') as f:
f.write(newContents)
return True
def toStringBool(arg):
return str(not not arg).lower()
def toBindingNamespace(arg):
return re.sub("((_workers)?$)", "Binding\\1", MakeNativeName(arg))
def stripTrailingWhitespace(text):
tail = '\n' if text.endswith('\n') else ''
lines = text.splitlines()
for i in range(len(lines)):
lines[i] = lines[i].rstrip()
return '\n'.join(lines) + tail
def innerSequenceType(type):
assert type.isSequence()
return type.inner.inner if type.nullable() else type.inner
builtinNames = {
IDLType.Tags.bool: 'bool',
IDLType.Tags.int8: 'i8',
IDLType.Tags.int16: 'i16',
IDLType.Tags.int32: 'i32',
IDLType.Tags.int64: 'i64',
IDLType.Tags.uint8: 'u8',
IDLType.Tags.uint16: 'u16',
IDLType.Tags.uint32: 'u32',
IDLType.Tags.uint64: 'u64',
IDLType.Tags.unrestricted_float: 'f32',
IDLType.Tags.float: 'Finite<f32>',
IDLType.Tags.unrestricted_double: 'f64',
IDLType.Tags.double: 'Finite<f64>'
}
numericTags = [
IDLType.Tags.int8, IDLType.Tags.uint8,
IDLType.Tags.int16, IDLType.Tags.uint16,
IDLType.Tags.int32, IDLType.Tags.uint32,
IDLType.Tags.int64, IDLType.Tags.uint64,
IDLType.Tags.unrestricted_float,
IDLType.Tags.unrestricted_double
]
def unwrapCastableObject(descriptor, source, codeOnFailure, conversionFunction):
"""
A function for unwrapping an object named by the "source" argument
based on the passed-in descriptor. Returns the string of the Rust expression of
the appropriate type.
codeOnFailure is the code to run if unwrapping fails.
"""
args = {
"failureCode": CGIndenter(CGGeneric(codeOnFailure), 8).define(),
"function": conversionFunction,
"source": source,
}
return """\
match %(function)s(%(source)s) {
Ok(val) => val,
Err(()) => {
%(failureCode)s
}
}""" % args
# We'll want to insert the indent at the beginnings of lines, but we
# don't want to indent empty lines. So only indent lines that have a
# non-newline character on them.
lineStartDetector = re.compile("^(?=[^\n#])", re.MULTILINE)
def indent(s, indentLevel=2):
"""
Indent C++ code.
Weird secret feature: this doesn't indent lines that start with # (such as
#include lines or #ifdef/#endif).
"""
if s == "":
return s
return re.sub(lineStartDetector, indentLevel * " ", s)
# dedent() and fill() are often called on the same string multiple
# times. We want to memoize their return values so we don't keep
# recomputing them all the time.
def memoize(fn):
"""
Decorator to memoize a function of one argument. The cache just
grows without bound.
"""
cache = {}
@functools.wraps(fn)
def wrapper(arg):
retval = cache.get(arg)
if retval is None:
retval = cache[arg] = fn(arg)
return retval
return wrapper
@memoize
def dedent(s):
"""
Remove all leading whitespace from s, and remove a blank line
at the beginning.
"""
if s.startswith('\n'):
s = s[1:]
return textwrap.dedent(s)
# This works by transforming the fill()-template to an equivalent
# string.Template.
fill_multiline_substitution_re = re.compile(r"( *)\$\*{(\w+)}(\n)?")
@memoize
def compile_fill_template(template):
"""
Helper function for fill(). Given the template string passed to fill(),
do the reusable part of template processing and return a pair (t,
argModList) that can be used every time fill() is called with that
template argument.
argsModList is list of tuples that represent modifications to be
made to args. Each modification has, in order: i) the arg name,
ii) the modified name, iii) the indent depth.
"""
t = dedent(template)
assert t.endswith("\n") or "\n" not in t
argModList = []
def replace(match):
"""
Replaces a line like ' $*{xyz}\n' with '${xyz_n}',
where n is the indent depth, and add a corresponding entry to
argModList.
Note that this needs to close over argModList, so it has to be
defined inside compile_fill_template().
"""
indentation, name, nl = match.groups()
depth = len(indentation)
# Check that $*{xyz} appears by itself on a line.
prev = match.string[:match.start()]
if (prev and not prev.endswith("\n")) or nl is None:
raise ValueError("Invalid fill() template: $*{%s} must appear by itself on a line" % name)
# Now replace this whole line of template with the indented equivalent.
modified_name = name + "_" + str(depth)
argModList.append((name, modified_name, depth))
return "${" + modified_name + "}"
t = re.sub(fill_multiline_substitution_re, replace, t)
return (string.Template(t), argModList)
def fill(template, **args):
"""
Convenience function for filling in a multiline template.
`fill(template, name1=v1, name2=v2)` is a lot like
`string.Template(template).substitute({"name1": v1, "name2": v2})`.
However, it's shorter, and has a few nice features:
* If `template` is indented, fill() automatically dedents it!
This makes code using fill() with Python's multiline strings
much nicer to look at.
* If `template` starts with a blank line, fill() strips it off.
(Again, convenient with multiline strings.)
* fill() recognizes a special kind of substitution
of the form `$*{name}`.
Use this to paste in, and automatically indent, multiple lines.
(Mnemonic: The `*` is for "multiple lines").
A `$*` substitution must appear by itself on a line, with optional
preceding indentation (spaces only). The whole line is replaced by the
corresponding keyword argument, indented appropriately. If the
argument is |
dhuang/incubator-airflow | airflow/migrations/versions/852ae6c715af_add_rendered_task_instance_fields_table.py | Python | apache-2.0 | 2,123 | 0.000471 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF | licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KI | ND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add RenderedTaskInstanceFields table
Revision ID: 852ae6c715af
Revises: a4c2fd67d16b
Create Date: 2020-03-10 22:19:18.034961
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '852ae6c715af'
down_revision = 'a4c2fd67d16b'
branch_labels = None
depends_on = None
TABLE_NAME = 'rendered_task_instance_fields'
def upgrade():
"""Apply Add RenderedTaskInstanceFields table"""
json_type = sa.JSON
conn = op.get_bind()
if conn.dialect.name != "postgresql":
# Mysql 5.7+/MariaDB 10.2.3 has JSON support. Rather than checking for
# versions, check for the function existing.
try:
conn.execute("SELECT JSON_VALID(1)").fetchone()
except (sa.exc.OperationalError, sa.exc.ProgrammingError):
json_type = sa.Text
op.create_table(
TABLE_NAME,
sa.Column('dag_id', sa.String(length=250), nullable=False),
sa.Column('task_id', sa.String(length=250), nullable=False),
sa.Column('execution_date', sa.TIMESTAMP(timezone=True), nullable=False),
sa.Column('rendered_fields', json_type(), nullable=False),
sa.PrimaryKeyConstraint('dag_id', 'task_id', 'execution_date'),
)
def downgrade():
"""Drop RenderedTaskInstanceFields table"""
op.drop_table(TABLE_NAME)
|
jlant/playground | python/hello-cookiecutter/python-boilerplate/tests/test_python-boilerplate.py | Python | mit | 438 | 0.002283 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_python-boilerplate
---------- | ------------------------
Tests for `python-boilerplate` module.
"""
import unittest
from python-boilerplate import python-boilerplate
class TestPython-boilerplate(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
pass |
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.