repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
AndrewWasHere/audiolens
|
lib/beamformers/beamformer.py
|
1
|
2105
|
"""
Copyright 2015 Andrew Lin.
All rights reserved.
Licensed under the BSD 3-clause License. See LICENSE.txt or
<http://opensource.org/licenses/BSD-3-Clause>.
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from lib.albatross import log
_log = log.get_logger(__name__)
class BeamFormerError(Exception):
"""Error while beam forming."""
class BeamFormer(metaclass=ABCMeta):
"""Audio beam former base class."""
def __init__(self, max_channels):
self.max_channels = max_channels
# Public Interfaces. #######################################################
def process(self, audio):
"""Process audio file.
Args:
audio (np.ndarray or list of np.ndarray): multi-channel audio.
Raises:
ValueError: Problem with audio.
BeamFormerError: Problem while processing audio.
"""
_log.debug('%s.process(%s)', self.__class__.__name__, audio)
# Process audio.
if isinstance(audio, np.ndarray):
_, channels = audio.shape
audio = [audio[:, n] for n in range(channels)]
n_channels = len(audio)
if n_channels < 2:
raise ValueError(
'Not enough channels in audio to beam form. (found %d)',
n_channels
)
elif self.max_channels and n_channels > self.max_channels:
raise ValueError(
'Too many channels in audio. There cannot be more than %d '
'channels. Found %d.',
self.max_channels,
n_channels
)
self._process(audio) # Derived class implementation.
# Private methods. #########################################################
@abstractmethod
def _process(self, audio):
"""Process audio.
This function is implemented in derived classes.
Args:
audio (list of np.ndarray): multi-channel audio.
Raises:
BeamFormerException (or a derivation thereof): Problem while
processing audio.
"""
|
bsd-3-clause
| 896,673,129,523,582,500
| 27.445946
| 80
| 0.549644
| false
| 4.606127
| false
| false
| false
|
sportorg/pysport
|
sportorg/libs/iof/parser.py
|
1
|
10856
|
import xml.etree.ElementTree as ET
class IOFParseResult(object):
def __init__(self, name, data):
self.name = name
self.data = data
def parse(file):
ns = {
'iof': 'http://www.orienteering.org/datastandard/3.0',
'orgeo': 'http://orgeo.ru/iof-xml-extensions/3.0',
}
tree = ET.parse(file)
results = [
IOFParseResult('EntryList', entry_list(tree, ns)),
IOFParseResult('CourseData', course_data(tree, ns)),
IOFParseResult('ResultList', result_list(tree, ns)),
IOFParseResult('Event', event(tree, ns)),
]
return [result for result in results if result.data is not None]
def course_data(tree, ns):
root = tree.getroot()
if 'CourseData' not in root.tag:
return
courses = []
version = '0'
if 'iofVersion' in root.attrib:
version = root.attrib['iofVersion'][0]
elif root.find('IOFVersion') is not None:
version = root.find('IOFVersion').attrib['version'][0]
if version == '3':
for course_el in root.find('iof:RaceCourseData', ns).findall('iof:Course', ns):
course = {
'name': course_el.find('iof:Name', ns).text,
'length': int(course_el.find('iof:Length', ns).text),
'climb': int(course_el.find('iof:Climb', ns).text),
'controls': [],
}
for course_control_el in course_el.findall('iof:CourseControl', ns):
leg_length = 0
if course_control_el.find('iof:LegLength', ns) is not None:
leg_length = int(course_control_el.find('iof:LegLength', ns).text)
course['controls'].append(
{
'type': course_control_el.attrib['type'], # Start, Control, Finish
'control': course_control_el.find('iof:Control', ns).text,
'leg_length': leg_length,
}
)
courses.append(course)
elif version == '2':
for course_el in root.findall('Course'):
course_variation_el = course_el.find('CourseVariation')
course = {
'name': course_el.find('CourseName').text.strip(),
'length': int(course_variation_el.find('CourseLength').text),
'climb': int(course_variation_el.find('CourseClimb').text.strip()) if course_variation_el.find(
'CourseClimb').text.strip().isdigit() else 0,
'controls': [],
}
for course_control_el in course_variation_el.findall('CourseControl'):
leg_length = 0
if course_control_el.find('LegLength') is not None:
leg_length = int(course_control_el.find('LegLength').text)
course['controls'].append(
{
'type': 'Control',
'control': course_control_el.find('ControlCode').text.strip(),
'leg_length': leg_length,
}
)
courses.append(course)
return courses
def entry_list(tree, ns):
root = tree.getroot()
if 'EntryList' not in root.tag:
return
groups = {}
for group_el in root.findall('iof:Class', ns):
group_id = group_el.find('iof:Id', ns).text
groups[group_id] = {
'id': group_id,
'name': group_el.find('iof:Name', ns).text,
'short_name': group_el.find('iof:ShortName', ns).text,
}
person_entries = []
for person_entry_el in root.findall('iof:PersonEntry', ns):
person_el = person_entry_el.find('iof:Person', ns)
birth_date_el = person_el.find('iof:BirthDate', ns)
id_el = person_el.find('iof:Id', ns)
person = {
'family': person_el.find('iof:Name', ns).find('iof:Family', ns).text,
'given': person_el.find('iof:Name', ns).find('iof:Given', ns).text,
'extensions': {},
}
if birth_date_el is not None:
person['birth_date'] = birth_date_el.text
if id_el is not None:
person['id'] = id_el.text
extensions_el = person_el.find('iof:Extensions', ns)
if extensions_el:
qual_el = extensions_el.find('orgeo:Qual', ns)
if qual_el is not None:
person['extensions']['qual'] = qual_el.text
bib_el = extensions_el.find('orgeo:BibNumber', ns)
if bib_el is not None:
person['extensions']['bib'] = bib_el.text
org_el = person_entry_el.find('iof:Organisation', ns)
organization = None
if org_el:
organization = {
'id': org_el.find('iof:Id', ns).text,
'name': org_el.find('iof:Name', ns).text
}
role = org_el.find('iof:Role', ns)
if role:
role_person = role.find('iof:Person', ns)
organization['role_person'] = '{} {}'.format(
role_person.find('iof:Name', ns).find('iof:Family', ns).text,
role_person.find('iof:Name', ns).find('iof:Given', ns).text
)
group_el = person_entry_el.find('iof:Class', ns)
if group_el:
group = {
'id': group_el.find('iof:Id', ns).text,
'name': group_el.find('iof:Name', ns).text
}
groups[group['id']] = {
'id': group['id'],
'name': group['name']
}
control_card_el = person_entry_el.find('iof:ControlCard', ns)
control_card = ''
if control_card_el is not None:
control_card = control_card_el.text
race_numbers = []
for race_num_el in person_entry_el.findall('iof:RaceNumber', ns):
race_numbers.append(race_num_el.text)
person_entries.append(
{
'person': person,
'organization': organization,
'group': groups[group['id']] if group['id'] in groups else group,
'control_card': control_card,
'race_numbers': race_numbers,
}
)
return person_entries
def result_list(tree, ns):
root = tree.getroot()
if 'ResultList' not in root.tag:
return
groups = {}
person_results = []
for class_result in root.findall('iof:ClassResult', ns):
"""Group of results for class"""
group_el = class_result.find('iof:Class', ns)
group_id = group_el.find('iof:Id', ns).text
groups[group_id] = {
'id': group_id,
'name': group_el.find('iof:Name', ns).text,
'short_name': group_el.find('iof:ShortName', ns).text if group_el.find('iof:ShortName', ns) else ''
}
for person_result_el in class_result.findall('iof:PersonResult', ns):
person_el = person_result_el.find('iof:Person', ns)
birth_date_el = person_el.find('iof:BirthDate', ns)
id_el = person_el.find('iof:Id', ns)
person = {
'family': person_el.find('iof:Name', ns).find('iof:Family', ns).text,
'given': person_el.find('iof:Name', ns).find('iof:Given', ns).text,
'extensions': {}
}
if birth_date_el is not None:
person['birth_date'] = birth_date_el.text
if id_el is not None:
person['id'] = id_el.text
org_el = person_result_el.find('iof:Organisation', ns)
organization = None
if org_el:
organization = {
'id': org_el.find('iof:Id', ns).text,
'name': org_el.find('iof:Name', ns).text
}
role = org_el.find('iof:Role', ns)
if role:
role_person = role.find('iof:Person', ns)
organization['role_person'] = '{} {}'.format(
role_person.find('iof:Name', ns).find('iof:Family', ns).text,
role_person.find('iof:Name', ns).find('iof:Given', ns).text
)
result_el = person_result_el.find('iof:Result', ns)
bib_el = result_el.find('iof:BibNumber', ns)
control_card_el = result_el.find('iof:ControlCard', ns)
finish_time_el = result_el.find('iof:FinishTime', ns)
splits = []
for split in result_el .findall('iof:SplitTime', ns):
split_time_el = split.find('iof:Time', ns)
if split_time_el is not None:
control_code = split.find('iof:ControlCode', ns)
split_obj = {
'control_code': control_code.text,
'time': split_time_el.text
}
splits.append(split_obj)
result = {
'bib': result_el.find('iof:BibNumber', ns).text if bib_el is not None else '',
'start_time': result_el.find('iof:StartTime', ns).text,
'finish_time': finish_time_el.text if finish_time_el is not None else '',
'status': result_el.find('iof:Status', ns).text,
'control_card': control_card_el.text if control_card_el is not None else '',
'splits': splits
}
person_results.append({
'person': person,
'organization': organization,
'group': groups[group_id],
'result': result,
})
return person_results
def event(tree, ns):
root = tree.getroot()
event_obj = {'races': []}
event_el = root.find('iof:Event', ns)
if event_el is None:
return
if event_el.find('iof:Name', ns) is not None:
event_obj['name'] = event_el.find('iof:Name', ns).text
if event_el.find('iof:StartTime', ns) is not None:
event_obj['start_time'] = event_el.find('iof:StartTime', ns).text
if event_el.find('iof:URL', ns) is not None:
event_obj['url'] = event_el.find('iof:URL', ns).text
if event_el is not None:
for race_el in event_el.findall('iof:Race', ns):
race_obj = {'name': race_el.find('iof:Name', ns).text if race_el.find('iof:Name', ns) is not None else ''}
start_time_el = race_el.find('iof:StartTime', ns)
if start_time_el:
if start_time_el.find('iof:Date', ns) is not None:
race_obj['date'] = start_time_el.find('iof:Date', ns).text
if start_time_el.find('iof:Time', ns) is not None:
race_obj['time'] = start_time_el.find('iof:Time', ns).text
event_obj['races'].append(race_obj)
return event_obj
|
gpl-3.0
| -5,870,004,246,323,201,000
| 37.496454
| 118
| 0.510041
| false
| 3.652759
| false
| false
| false
|
mindw/pip
|
pip/index.py
|
1
|
37607
|
"""Routines related to PyPI, indexes"""
from __future__ import absolute_import
import logging
import cgi
from collections import namedtuple
import itertools
import sys
import os
import re
import mimetypes
import posixpath
import warnings
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip.compat import ipaddress
from pip.utils import (
cached_property, splitext, normalize_path,
ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS,
)
from pip.utils.deprecation import RemovedInPip9Warning, RemovedInPip10Warning
from pip.utils.logging import indent_log
from pip.exceptions import (
DistributionNotFound, BestVersionAlreadyInstalled, InvalidWheelFilename,
UnsupportedWheel,
)
from pip.download import HAS_TLS, is_url, path_to_url, url_to_path
from pip.wheel import Wheel, wheel_ext
from pip.pep425tags import supported_tags
from pip._vendor import html5lib, requests, six
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.requests.exceptions import SSLError
__all__ = ['FormatControl', 'fmt_ctl_handle_mutual_exclude', 'PackageFinder']
SECURE_ORIGINS = [
# protocol, hostname, port
# Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC)
("https", "*", "*"),
("*", "localhost", "*"),
("*", "127.0.0.0/8", "*"),
("*", "::1/128", "*"),
("file", "*", None),
# ssh is always secure.
("ssh", "*", "*"),
]
logger = logging.getLogger(__name__)
class InstallationCandidate(object):
def __init__(self, project, version, location):
self.project = project
self.version = parse_version(version)
self.location = location
self._key = (self.project, self.version, self.location)
def __repr__(self):
return "<InstallationCandidate({0!r}, {1!r}, {2!r})>".format(
self.project, self.version, self.location,
)
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, InstallationCandidate):
return NotImplemented
return method(self._key, other._key)
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links.
"""
def __init__(self, find_links, index_urls, allow_all_prereleases=False,
trusted_hosts=None, process_dependency_links=False,
session=None, format_control=None):
"""Create a PackageFinder.
:param format_control: A FormatControl object or None. Used to control
the selection of source packages / binary packages when consulting
the index and links.
"""
if session is None:
raise TypeError(
"PackageFinder() missing 1 required keyword argument: "
"'session'"
)
# Build find_links. If an argument starts with ~, it may be
# a local file relative to a home directory. So try normalizing
# it and if it exists, use the normalized version.
# This is deliberately conservative - it might be fine just to
# blindly normalize anything starting with a ~...
self.find_links = []
for link in find_links:
if link.startswith('~'):
new_link = normalize_path(link)
if os.path.exists(new_link):
link = new_link
self.find_links.append(link)
self.index_urls = index_urls
self.dependency_links = []
# These are boring links that have already been logged somehow:
self.logged_links = set()
self.format_control = format_control or FormatControl(set(), set())
# Domains that we won't emit warnings for when not using HTTPS
self.secure_origins = [
("*", host, "*")
for host in (trusted_hosts if trusted_hosts else [])
]
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
# Do we process dependency links?
self.process_dependency_links = process_dependency_links
# The Session we'll use to make requests
self.session = session
# If we don't have TLS enabled, then WARN if anyplace we're looking
# relies on TLS.
if not HAS_TLS:
for link in itertools.chain(self.index_urls, self.find_links):
parsed = urllib_parse.urlparse(link)
if parsed.scheme == "https":
logger.warning(
"pip is configured with locations that require "
"TLS/SSL, however the ssl module in Python is not "
"available."
)
break
def add_dependency_links(self, links):
# # FIXME: this shouldn't be global list this, it should only
# # apply to requirements of the package that specifies the
# # dependency_links value
# # FIXME: also, we should track comes_from (i.e., use Link)
if self.process_dependency_links:
warnings.warn(
"Dependency Links processing has been deprecated and will be "
"removed in a future release.",
RemovedInPip9Warning,
)
self.dependency_links.extend(links)
@staticmethod
def _sort_locations(locations, expand_dir=False):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if os.path.isdir(path):
if expand_dir:
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url:
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
logger.warning(
"Url '%s' is ignored: it is neither a file "
"nor a directory.", url)
elif is_url(url):
# Only add url with clear scheme
urls.append(url)
else:
logger.warning(
"Url '%s' is ignored. It is either a non-existing "
"path or lacks a specific scheme.", url)
return files, urls
def _candidate_sort_key(self, candidate):
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min()
3. source archives
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
support_num = len(supported_tags)
if candidate.location.is_wheel:
# can raise InvalidWheelFilename
wheel = Wheel(candidate.location.filename)
if not wheel.supported():
raise UnsupportedWheel(
"%s is not a supported wheel for this platform. It "
"can't be sorted." % wheel.filename
)
pri = -(wheel.support_index_min())
else: # sdist
pri = -(support_num)
return (candidate.version, pri)
def _validate_secure_origin(self, logger, location):
# Determine if this url used a secure transport mechanism
parsed = urllib_parse.urlparse(str(location))
origin = (parsed.scheme, parsed.hostname, parsed.port)
# The protocol to use to see if the protocol matches.
# Don't count the repository type as part of the protocol: in
# cases such as "git+ssh", only use "ssh". (I.e., Only verify against
# the last scheme.)
protocol = origin[0].rsplit('+', 1)[-1]
# Determine if our origin is a secure origin by looking through our
# hardcoded list of secure origins, as well as any additional ones
# configured on this PackageFinder instance.
for secure_origin in (SECURE_ORIGINS + self.secure_origins):
if protocol != secure_origin[0] and secure_origin[0] != "*":
continue
try:
# We need to do this decode dance to ensure that we have a
# unicode object, even on Python 2.x.
addr = ipaddress.ip_address(
origin[1]
if (
isinstance(origin[1], six.text_type) or
origin[1] is None
)
else origin[1].decode("utf8")
)
network = ipaddress.ip_network(
secure_origin[1]
if isinstance(secure_origin[1], six.text_type)
else secure_origin[1].decode("utf8")
)
except ValueError:
# We don't have both a valid address or a valid network, so
# we'll check this origin against hostnames.
if (origin[1] and
origin[1].lower() != secure_origin[1].lower() and
secure_origin[1] != "*"):
continue
else:
# We have a valid address and network, so see if the address
# is contained within the network.
if addr not in network:
continue
# Check to see if the port patches
if (origin[2] != secure_origin[2] and
secure_origin[2] != "*" and
secure_origin[2] is not None):
continue
# If we've gotten here, then this origin matches the current
# secure origin and we should return True
return True
# If we've gotten to this point, then the origin isn't secure and we
# will not accept it as a valid location to search. We will however
# log a warning that we are ignoring it.
logger.warning(
"The repository located at %s is not a trusted or secure host and "
"is being ignored. If this repository is available via HTTPS it "
"is recommended to use HTTPS instead, otherwise you may silence "
"this warning and allow it anyways with '--trusted-host %s'.",
parsed.hostname,
parsed.hostname,
)
return False
def _get_index_urls_locations(self, project_name):
"""Returns the locations found via self.index_urls
Checks the url_name on the main (first in the list) index and
use this url_name to produce all locations
"""
def mkurl_pypi_url(url):
loc = posixpath.join(
url,
urllib_parse.quote(canonicalize_name(project_name)))
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's
# behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
return [mkurl_pypi_url(url) for url in self.index_urls]
def find_all_candidates(self, project_name):
"""Find all available InstallationCandidate for project_name
This checks index_urls, find_links and dependency_links.
All versions found are returned as an InstallationCandidate list.
See _link_package_versions for details on which files are accepted
"""
index_locations = self._get_index_urls_locations(project_name)
index_file_loc, index_url_loc = self._sort_locations(index_locations)
fl_file_loc, fl_url_loc = self._sort_locations(
self.find_links, expand_dir=True)
dep_file_loc, dep_url_loc = self._sort_locations(self.dependency_links)
file_locations = (
Link(url) for url in itertools.chain(
index_file_loc, fl_file_loc, dep_file_loc)
)
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links
# We explicitly do not trust links that came from dependency_links
# We want to filter out any thing which does not have a secure origin.
url_locations = [
link for link in itertools.chain(
(Link(url) for url in index_url_loc),
(Link(url) for url in fl_url_loc),
(Link(url) for url in dep_url_loc),
)
if self._validate_secure_origin(logger, link)
]
logger.debug('%d location(s) to search for versions of %s:',
len(url_locations), project_name)
for location in url_locations:
logger.debug('* %s', location)
canonical_name = canonicalize_name(project_name)
formats = fmt_ctl_formats(self.format_control, canonical_name)
search = Search(project_name, canonical_name, formats)
find_links_versions = self._package_versions(
# We trust every directly linked archive in find_links
(Link(url, '-f') for url in self.find_links),
search
)
page_versions = []
for page in self._get_pages(url_locations, project_name):
logger.debug('Analyzing links from page %s', page.url)
with indent_log():
page_versions.extend(
self._package_versions(page.links, search)
)
dependency_versions = self._package_versions(
(Link(url) for url in self.dependency_links), search
)
if dependency_versions:
logger.debug(
'dependency_links found: %s',
', '.join([
version.location.url for version in dependency_versions
])
)
file_versions = self._package_versions(file_locations, search)
if file_versions:
file_versions.sort(reverse=True)
logger.debug(
'Local files found: %s',
', '.join([
url_to_path(candidate.location.url)
for candidate in file_versions
])
)
# This is an intentional priority ordering
return (
file_versions + find_links_versions + page_versions +
dependency_versions
)
def find_requirement(self, req, upgrade):
"""Try to find a Link matching req
Expects req, an InstallRequirement and upgrade, a boolean
Returns a Link if found,
Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise
"""
all_candidates = self.find_all_candidates(req.name)
# Filter out anything which doesn't match our specifier
compatible_versions = set(
req.specifier.filter(
# We turn the version object into a str here because otherwise
# when we're debundled but setuptools isn't, Python will see
# packaging.version.Version and
# pkg_resources._vendor.packaging.version.Version as different
# types. This way we'll use a str as a common data interchange
# format. If we stop using the pkg_resources provided specifier
# and start using our own, we can drop the cast to str().
[str(c.version) for c in all_candidates],
prereleases=(
self.allow_all_prereleases
if self.allow_all_prereleases else None
),
)
)
applicable_candidates = [
# Again, converting to str to deal with debundling.
c for c in all_candidates if str(c.version) in compatible_versions
]
if applicable_candidates:
best_candidate = max(applicable_candidates,
key=self._candidate_sort_key)
else:
best_candidate = None
if req.satisfied_by is not None:
installed_version = parse_version(req.satisfied_by.version)
else:
installed_version = None
if installed_version is None and best_candidate is None:
logger.critical(
'Could not find a version that satisfies the requirement %s '
'(from versions: %s)',
req,
', '.join(
sorted(
set(str(c.version) for c in all_candidates),
key=parse_version,
)
)
)
raise DistributionNotFound(
'No matching distribution found for %s' % req
)
best_installed = False
if installed_version and (
best_candidate is None or
best_candidate.version <= installed_version):
best_installed = True
if not upgrade and installed_version is not None:
if best_installed:
logger.debug(
'Existing installed version (%s) is most up-to-date and '
'satisfies requirement',
installed_version,
)
else:
logger.debug(
'Existing installed version (%s) satisfies requirement '
'(most up-to-date version is %s)',
installed_version,
best_candidate.version,
)
return None
if best_installed:
# We have an existing version, and its the best version
logger.debug(
'Installed version (%s) is most up-to-date (past versions: '
'%s)',
installed_version,
', '.join(sorted(compatible_versions, key=parse_version)) or
"none",
)
raise BestVersionAlreadyInstalled
logger.debug(
'Using version %s (newest of versions: %s)',
best_candidate.version,
', '.join(sorted(compatible_versions, key=parse_version))
)
return best_candidate.location
def _get_pages(self, locations, project_name):
"""
Yields (page, page_url) from the given locations, skipping
locations that have errors.
"""
seen = set()
for location in locations:
if location in seen:
continue
seen.add(location)
page = self._get_page(location)
if page is None:
continue
yield page
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"""
Returns elements of links in order, non-egg links first, egg links
second, while eliminating duplicates
"""
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search):
result = []
for link in self._sort_links(links):
v = self._link_package_versions(link, search)
if v is not None:
result.append(v)
return result
def _log_skipped_link(self, link, reason):
if link not in self.logged_links:
logger.debug('Skipping link %s; %s', link, reason)
self.logged_links.add(link)
def _link_package_versions(self, link, search):
"""Return an InstallationCandidate or None"""
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
ext = link.ext
else:
egg_info, ext = link.splitext()
if not ext:
self._log_skipped_link(link, 'not a file')
return
if ext not in SUPPORTED_EXTENSIONS:
self._log_skipped_link(
link, 'unsupported archive format: %s' % ext)
return
if "binary" not in search.formats and ext == wheel_ext:
self._log_skipped_link(
link, 'No binaries permitted for %s' % search.supplied)
return
if "macosx10" in link.path and ext == '.zip':
self._log_skipped_link(link, 'macosx10 one')
return
if ext == wheel_ext:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
self._log_skipped_link(link, 'invalid wheel filename')
return
if canonicalize_name(wheel.name) != search.canonical:
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
if not wheel.supported():
self._log_skipped_link(
link, 'it is not compatible with this Python')
return
version = wheel.version
# This should be up by the search.ok_binary check, but see issue 2700.
if "source" not in search.formats and ext != wheel_ext:
self._log_skipped_link(
link, 'No sources permitted for %s' % search.supplied)
return
if not version:
version = egg_info_matches(egg_info, search.supplied, link)
if version is None:
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
self._log_skipped_link(
link, 'Python version is incorrect')
return
logger.debug('Found link %s, version: %s', link, version)
return InstallationCandidate(search.supplied, version, link)
def _get_page(self, link):
return HTMLPage.get_page(link, session=self.session)
def egg_info_matches(
egg_info, search_name, link,
_egg_info_re=re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)):
"""Pull the version part out of a string.
:param egg_info: The string to parse. E.g. foo-2.1
:param search_name: The name of the package this belongs to. None to
infer the name. Note that this cannot unambiguously parse strings
like foo-2-2 which might be foo, 2-2 or foo-2, 2.
:param link: The link the string came from, for logging on failure.
"""
match = _egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s', link)
return None
if search_name is None:
full_match = match.group(0)
return full_match[full_match.index('-'):]
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
class HTMLPage(object):
"""Represents one page, along with its URL"""
def __init__(self, content, url, headers=None):
# Determine if we have any encoding information in our headers
encoding = None
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
encoding = params['charset']
self.content = content
self.parsed = html5lib.parse(
self.content,
encoding=encoding,
namespaceHTMLElements=False,
)
self.url = url
self.headers = headers
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, skip_archives=True, session=None):
if session is None:
raise TypeError(
"get_page() missing 1 required keyword argument: 'session'"
)
url = link.url
url = url.split('#', 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
from pip.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug('Cannot look at %s URL %s', scheme, link)
return None
try:
if skip_archives:
filename = link.filename
for bad_ext in ARCHIVE_EXTENSIONS:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(
url, session=session,
)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
logger.debug('Getting page %s', url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = \
urllib_parse.urlparse(url)
if (scheme == 'file' and
os.path.isdir(urllib_request.url2pathname(path))):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith('/'):
url += '/'
url = urllib_parse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s', url)
resp = session.get(
url,
headers={
"Accept": "text/html",
"Cache-Control": "max-age=600",
},
)
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
content_type = resp.headers.get('Content-Type', 'unknown')
if not content_type.lower().startswith("text/html"):
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
inst = cls(resp.content, resp.url, resp.headers)
except requests.HTTPError as exc:
cls._handle_fail(link, exc, url)
except SSLError as exc:
reason = ("There was a problem confirming the ssl certificate: "
"%s" % exc)
cls._handle_fail(link, reason, url, meth=logger.info)
except requests.ConnectionError as exc:
cls._handle_fail(link, "connection error: %s" % exc, url)
except requests.Timeout:
cls._handle_fail(link, "timed out", url)
except requests.TooManyRedirects as exc:
cls._handle_fail(
link,
"Error: %s" % exc,
url
)
except Exception as e:
reason = ("There was an unknown error: %s" % e)
cls._handle_fail(
link,
reason,
url
)
else:
return inst
@staticmethod
def _handle_fail(link, reason, url, meth=None):
if meth is None:
meth = logger.debug
meth("Could not fetch URL %s: %s - skipping", link, reason)
@staticmethod
def _get_content_type(url, session):
"""Get the Content-Type of the given url, using a HEAD request"""
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
if scheme not in ('http', 'https'):
# FIXME: some warning or something?
# assertion error?
return ''
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
return resp.headers.get("Content-Type", "")
@cached_property
def base_url(self):
bases = [
x for x in self.parsed.findall(".//base")
if x.get("href") is not None
]
if bases and bases[0].get("href"):
return bases[0].get("href")
else:
return self.url
@property
def links(self):
"""Yields all links in the page"""
for anchor in self.parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(
urllib_parse.urljoin(self.base_url, href)
)
yield Link(url, self)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(
lambda match: '%%%2x' % ord(match.group(0)), url)
class Link(object):
def __init__(self, url, comes_from=None):
# url can be a UNC windows share
if url.startswith('\\\\'):
url = path_to_url(url)
self.url = url
self.comes_from = comes_from
def __str__(self):
if self.comes_from:
return '%s (from %s)' % (self.url, self.comes_from)
else:
return str(self.url)
def __repr__(self):
return '<Link %s>' % self
def __eq__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url == other.url
def __ne__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url != other.url
def __lt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url < other.url
def __le__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url <= other.url
def __gt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url > other.url
def __ge__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url >= other.url
def __hash__(self):
return hash(self.url)
@property
def filename(self):
_, netloc, path, _, _ = urllib_parse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
name = urllib_parse.unquote(name)
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urllib_parse.urlsplit(self.url)[0]
@property
def netloc(self):
return urllib_parse.urlsplit(self.url)[1]
@property
def path(self):
return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2])
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def ext(self):
return self.splitext()[1]
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url)
return urllib_parse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'[#&]egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)')
@property
def subdirectory_fragment(self):
match = self._subdirectory_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(
r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)'
)
@property
def hash(self):
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self):
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
@property
def is_wheel(self):
return self.ext == wheel_ext
@property
def is_artifact(self):
"""
Determines if this points to an actual artifact (e.g. a tarball) or if
it points to an "abstract" thing like a path or a VCS location.
"""
from pip.vcs import vcs
if self.scheme in vcs.all_schemes:
return False
return True
FormatControl = namedtuple('FormatControl', 'no_binary only_binary')
"""This object has two fields, no_binary and only_binary.
If a field is falsy, it isn't set. If it is {':all:'}, it should match all
packages except those listed in the other field. Only one field can be set
to {':all:'} at a time. The rest of the time exact package name matches
are listed, with any given package only showing up in one field at a time.
"""
def fmt_ctl_handle_mutual_exclude(value, target, other):
new = value.split(',')
while ':all:' in new:
other.clear()
target.clear()
target.add(':all:')
del new[:new.index(':all:') + 1]
if ':none:' not in new:
# Without a none, we want to discard everything as :all: covers it
return
for name in new:
if name == ':none:':
target.clear()
continue
name = canonicalize_name(name)
other.discard(name)
target.add(name)
def fmt_ctl_formats(fmt_ctl, canonical_name):
result = set(["binary", "source"])
if canonical_name in fmt_ctl.only_binary:
result.discard('source')
elif canonical_name in fmt_ctl.no_binary:
result.discard('binary')
elif ':all:' in fmt_ctl.only_binary:
result.discard('source')
elif ':all:' in fmt_ctl.no_binary:
result.discard('binary')
return frozenset(result)
def fmt_ctl_no_binary(fmt_ctl):
fmt_ctl_handle_mutual_exclude(
':all:', fmt_ctl.no_binary, fmt_ctl.only_binary)
def fmt_ctl_no_use_wheel(fmt_ctl):
fmt_ctl_no_binary(fmt_ctl)
warnings.warn(
'--no-use-wheel is deprecated and will be removed in the future. '
' Please use --no-binary :all: instead.', RemovedInPip10Warning,
stacklevel=2)
Search = namedtuple('Search', 'supplied canonical formats')
"""Capture key aspects of a search.
:attribute supplied: The user supplied package.
:attribute canonical: The canonical package name.
:attribute formats: The formats allowed for this package. Should be a set
with 'binary' or 'source' or both in it.
"""
|
mit
| 5,830,695,384,401,401,000
| 34.411488
| 79
| 0.551573
| false
| 4.402599
| false
| false
| false
|
maybelinot/findltr
|
findltr/utils.py
|
1
|
4673
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: maybelinot
# @Email: edik.trott@yandex.ru
# @Date: 2015-09-12 16:06:18
# @Last Modified by: maybelinot
# @Last Modified time: 2015-09-12 20:14:58
from __future__ import unicode_literals, absolute_import
import logging
import os
import subprocess
import sys
import time
# EXTERNALLY INSTALLED
from BCBio import GFF
from Bio import SeqIO, Seq, SeqRecord, SeqFeature
from Bio.Blast import NCBIXML
from Bio.Blast.Applications import NcbiblastnCommandline
from io import StringIO
import yaml
# Load logging before anything else
logging.basicConfig(format='>> %(message)s')
logr = logging.getLogger('findltr')
def export_gff(seq, young_lcp, outputfile):
gff_output = outputfile or 'rec_%s.gff3' % time.time()
logr.info('Found LTRs are saved in ' + gff_output)
records = []
# fix name to chrN based on input seq
gff = SeqRecord.SeqRecord(Seq.Seq(seq), "seq0")
top_feature = []
for idx, item in enumerate(young_lcp):
seq1 = SeqRecord.SeqRecord(
Seq.Seq(seq[item[0][0]:item[0][1]]), id="seq1")
seq2 = SeqRecord.SeqRecord(
Seq.Seq(seq[item[1][0]:item[1][1]]), id="seq2")
with open("/tmp/seq1.fasta", "w") as query:
SeqIO.write(seq1, query, "fasta")
with open("/tmp/seq2.fasta", "w") as subject:
SeqIO.write(seq2, subject, "fasta")
blast_output = NcbiblastnCommandline(
query="/tmp/seq1.fasta", subject="/tmp/seq2.fasta", outfmt=5)()[0]
blast_result_record = NCBIXML.read(StringIO(unicode(blast_output, "utf-8")))
identity = 0
for alignment in blast_result_record.alignments:
for hsp in alignment.hsps:
identity = max(
hsp.identities / float(hsp.align_length) * 100.0, identity)
identity = "%0.2f" % identity
# cut zeros tail
# identity = identity.rstrip("0")
# identity = identity.rstrip(".")
# improve seqfeatures appending
sub_qualifiers_region = {"source": "ltrfind",
"ID": "repeat_region" + str(idx + 1)}
top_feature.append(SeqFeature.SeqFeature(SeqFeature.FeatureLocation(item[0][0] - 4, item[1][1] + 4),
type="repeat_region", strand=0, qualifiers=sub_qualifiers_region))
sub_qualifiers_target_site = {"source": "ltrfind",
"Parent": "repeat_region" + str(idx + 1)}
top_feature.append(SeqFeature.SeqFeature(SeqFeature.FeatureLocation(item[0][0] - 4, item[0][0]),
type="target_site_duplication", strand=0, qualifiers=sub_qualifiers_target_site))
sub_qualifiers = {"source": "ltrfind",
"ID": "LTR_retrotransposon" + str(idx + 1),
"Parent": "repeat_region" + str(idx + 1),
"ltr_similarity": identity,
"seq_number": "0"}
top_feature.append(SeqFeature.SeqFeature(SeqFeature.FeatureLocation(item[0][0], item[1][1]),
type="LTR_retrotransposon", strand=0, qualifiers=sub_qualifiers))
sub_qualifiers_ltrs = {"source": "ltrfind", "Parent": "LTR_retrotransposon" +
str(idx + 1)}
top_feature.append(SeqFeature.SeqFeature(SeqFeature.FeatureLocation(item[0][0], item[0][1]),
type="long_terminal_repeat", strand=0, qualifiers=sub_qualifiers_ltrs))
top_feature.append(SeqFeature.SeqFeature(SeqFeature.FeatureLocation(item[1][0], item[1][1]),
type="long_terminal_repeat", strand=0, qualifiers=sub_qualifiers_ltrs))
top_feature.append(SeqFeature.SeqFeature(SeqFeature.FeatureLocation(item[1][1], item[1][1] + 4),
type="target_site_duplication", strand=0, qualifiers=sub_qualifiers_target_site))
gff.features = top_feature
# track name='findltr' description='findltr Supplied Track'
with open(gff_output, "w") as out_handle:
GFF.write([gff], out_handle)
def run(cmd):
cmd = cmd if isinstance(cmd, list) else cmd.split()
try:
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception as error:
logr.error("'{0}' failed: {1}".format(cmd, error))
raise
output, errors = process.communicate()
if process.returncode != 0 or errors:
if output:
logr.error(output)
if errors:
logr.error(errors)
sys.exit(process.returncode)
return output, errors
|
gpl-3.0
| -1,912,421,539,951,222,800
| 39.991228
| 114
| 0.598545
| false
| 3.474349
| false
| false
| false
|
hzlf/openbroadcast
|
website/djangorestframework/resources.py
|
1
|
15477
|
from django import forms
from django.core.urlresolvers import reverse, get_urlconf, get_resolver, NoReverseMatch
from django.db import models
from django.db.models.query import QuerySet
from django.db.models.fields.related import RelatedField
from django.utils.encoding import smart_unicode
from djangorestframework.response import ErrorResponse
from djangorestframework.serializer import Serializer, _SkipField
from djangorestframework.utils import as_tuple
import decimal
import inspect
import re
class BaseResource(Serializer):
"""
Base class for all Resource classes, which simply defines the interface they provide.
"""
fields = None
include = None
exclude = None
def __init__(self, view=None, depth=None, stack=[], **kwargs):
super(BaseResource, self).__init__(depth, stack, **kwargs)
self.view = view
def validate_request(self, data, files=None):
"""
Given the request content return the cleaned, validated content.
Typically raises a :exc:`response.ErrorResponse` with status code 400 (Bad Request) on failure.
"""
return data
def filter_response(self, obj):
"""
Given the response content, filter it into a serializable object.
"""
return self.serialize(obj)
class Resource(BaseResource):
"""
A Resource determines how a python object maps to some serializable data.
Objects that a resource can act on include plain Python object instances, Django Models, and Django QuerySets.
"""
# The model attribute refers to the Django Model which this Resource maps to.
# (The Model's class, rather than an instance of the Model)
model = None
# By default the set of returned fields will be the set of:
#
# 0. All the fields on the model, excluding 'id'.
# 1. All the properties on the model.
# 2. The absolute_url of the model, if a get_absolute_url method exists for the model.
#
# If you wish to override this behaviour,
# you should explicitly set the fields attribute on your class.
fields = None
class FormResource(Resource):
"""
Resource class that uses forms for validation.
Also provides a :meth:`get_bound_form` method which may be used by some renderers.
On calling :meth:`validate_request` this validator may set a :attr:`bound_form_instance` attribute on the
view, which may be used by some renderers.
"""
form = None
"""
The :class:`Form` class that should be used for request validation.
This can be overridden by a :attr:`form` attribute on the :class:`views.View`.
"""
def validate_request(self, data, files=None):
"""
Given some content as input return some cleaned, validated content.
Raises a :exc:`response.ErrorResponse` with status code 400 (Bad Request) on failure.
Validation is standard form validation, with an additional constraint that *no extra unknown fields* may be supplied.
On failure the :exc:`response.ErrorResponse` content is a dict which may contain :obj:`'errors'` and :obj:`'field-errors'` keys.
If the :obj:`'errors'` key exists it is a list of strings of non-field errors.
If the :obj:`'field-errors'` key exists it is a dict of ``{'field name as string': ['errors as strings', ...]}``.
"""
return self._validate(data, files)
def _validate(self, data, files, allowed_extra_fields=(), fake_data=None):
"""
Wrapped by validate to hide the extra flags that are used in the implementation.
allowed_extra_fields is a list of fields which are not defined by the form, but which we still
expect to see on the input.
fake_data is a string that should be used as an extra key, as a kludge to force .errors
to be populated when an empty dict is supplied in `data`
"""
# We'd like nice error messages even if no content is supplied.
# Typically if an empty dict is given to a form Django will
# return .is_valid() == False, but .errors == {}
#
# To get around this case we revalidate with some fake data.
if fake_data:
data[fake_data] = '_fake_data'
allowed_extra_fields = tuple(allowed_extra_fields) + ('_fake_data',)
bound_form = self.get_bound_form(data, files)
if bound_form is None:
return data
self.view.bound_form_instance = bound_form
data = data and data or {}
files = files and files or {}
seen_fields_set = set(data.keys())
form_fields_set = set(bound_form.fields.keys())
allowed_extra_fields_set = set(allowed_extra_fields)
# In addition to regular validation we also ensure no additional fields are being passed in...
unknown_fields = seen_fields_set - (form_fields_set | allowed_extra_fields_set)
unknown_fields = unknown_fields - set(('csrfmiddlewaretoken', '_accept', '_method')) # TODO: Ugh.
# Check using both regular validation, and our stricter no additional fields rule
if bound_form.is_valid() and not unknown_fields:
# Validation succeeded...
cleaned_data = bound_form.cleaned_data
# Add in any extra fields to the cleaned content...
for key in (allowed_extra_fields_set & seen_fields_set) - set(cleaned_data.keys()):
cleaned_data[key] = data[key]
return cleaned_data
# Validation failed...
detail = {}
if not bound_form.errors and not unknown_fields:
# is_valid() was False, but errors was empty.
# If we havn't already done so attempt revalidation with some fake data
# to force django to give us an errors dict.
if fake_data is None:
return self._validate(data, files, allowed_extra_fields, '_fake_data')
# If we've already set fake_dict and we're still here, fallback gracefully.
detail = {u'errors': [u'No content was supplied.']}
else:
# Add any non-field errors
if bound_form.non_field_errors():
detail[u'errors'] = bound_form.non_field_errors()
# Add standard field errors
field_errors = dict(
(key, map(unicode, val))
for (key, val)
in bound_form.errors.iteritems()
if not key.startswith('__')
)
# Add any unknown field errors
for key in unknown_fields:
field_errors[key] = [u'This field does not exist.']
if field_errors:
detail[u'field-errors'] = field_errors
# Return HTTP 400 response (BAD REQUEST)
raise ErrorResponse(400, detail)
def get_form_class(self, method=None):
"""
Returns the form class used to validate this resource.
"""
# A form on the view overrides a form on the resource.
form = getattr(self.view, 'form', None) or self.form
# Use the requested method or determine the request method
if method is None and hasattr(self.view, 'request') and hasattr(self.view, 'method'):
method = self.view.method
elif method is None and hasattr(self.view, 'request'):
method = self.view.request.method
# A method form on the view or resource overrides the general case.
# Method forms are attributes like `get_form` `post_form` `put_form`.
if method:
form = getattr(self, '%s_form' % method.lower(), form)
form = getattr(self.view, '%s_form' % method.lower(), form)
return form
def get_bound_form(self, data=None, files=None, method=None):
"""
Given some content return a Django form bound to that content.
If form validation is turned off (:attr:`form` class attribute is :const:`None`) then returns :const:`None`.
"""
form = self.get_form_class(method)
if not form:
return None
if data is not None or files is not None:
return form(data, files)
return form()
#class _RegisterModelResource(type):
# """
# Auto register new ModelResource classes into ``_model_to_resource``
# """
# def __new__(cls, name, bases, dct):
# resource_cls = type.__new__(cls, name, bases, dct)
# model_cls = dct.get('model', None)
# if model_cls:
# _model_to_resource[model_cls] = resource_cls
# return resource_cls
class ModelResource(FormResource):
"""
Resource class that uses forms for validation and otherwise falls back to a model form if no form is set.
Also provides a :meth:`get_bound_form` method which may be used by some renderers.
"""
# Auto-register new ModelResource classes into _model_to_resource
#__metaclass__ = _RegisterModelResource
form = None
"""
The form class that should be used for request validation.
If set to :const:`None` then the default model form validation will be used.
This can be overridden by a :attr:`form` attribute on the :class:`views.View`.
"""
model = None
"""
The model class which this resource maps to.
This can be overridden by a :attr:`model` attribute on the :class:`views.View`.
"""
fields = None
"""
The list of fields to use on the output.
May be any of:
The name of a model field. To view nested resources, give the field as a tuple of ("fieldName", resource) where `resource` may be any of ModelResource reference, the name of a ModelResourc reference as a string or a tuple of strings representing fields on the nested model.
The name of an attribute on the model.
The name of an attribute on the resource.
The name of a method on the model, with a signature like ``func(self)``.
The name of a method on the resource, with a signature like ``func(self, instance)``.
"""
exclude = ('id', 'pk')
"""
The list of fields to exclude. This is only used if :attr:`fields` is not set.
"""
include = ('url',)
"""
The list of extra fields to include. This is only used if :attr:`fields` is not set.
"""
def __init__(self, view=None, depth=None, stack=[], **kwargs):
"""
Allow :attr:`form` and :attr:`model` attributes set on the
:class:`View` to override the :attr:`form` and :attr:`model`
attributes set on the :class:`Resource`.
"""
super(ModelResource, self).__init__(view, depth, stack, **kwargs)
self.model = getattr(view, 'model', None) or self.model
def validate_request(self, data, files=None):
"""
Given some content as input return some cleaned, validated content.
Raises a :exc:`response.ErrorResponse` with status code 400 (Bad Request) on failure.
Validation is standard form or model form validation,
with an additional constraint that no extra unknown fields may be supplied,
and that all fields specified by the fields class attribute must be supplied,
even if they are not validated by the form/model form.
On failure the ErrorResponse content is a dict which may contain :obj:`'errors'` and :obj:`'field-errors'` keys.
If the :obj:`'errors'` key exists it is a list of strings of non-field errors.
If the ''field-errors'` key exists it is a dict of {field name as string: list of errors as strings}.
"""
return self._validate(data, files, allowed_extra_fields=self._property_fields_set)
def get_bound_form(self, data=None, files=None, method=None):
"""
Given some content return a ``Form`` instance bound to that content.
If the :attr:`form` class attribute has been explicitly set then that class will be used
to create the Form, otherwise the model will be used to create a ModelForm.
"""
form = self.get_form_class(method)
if not form and self.model:
# Fall back to ModelForm which we create on the fly
class OnTheFlyModelForm(forms.ModelForm):
class Meta:
model = self.model
#fields = tuple(self._model_fields_set)
form = OnTheFlyModelForm
# Both form and model not set? Okay bruv, whatevs...
if not form:
return None
# Instantiate the ModelForm as appropriate
if data is not None or files is not None:
if issubclass(form, forms.ModelForm) and hasattr(self.view, 'model_instance'):
# Bound to an existing model instance
return form(data, files, instance=self.view.model_instance)
else:
return form(data, files)
return form()
def url(self, instance):
"""
Attempts to reverse resolve the url of the given model *instance* for this resource.
Requires a ``View`` with :class:`mixins.InstanceMixin` to have been created for this resource.
This method can be overridden if you need to set the resource url reversing explicitly.
"""
if not hasattr(self, 'view_callable'):
raise _SkipField
# dis does teh magicks...
urlconf = get_urlconf()
resolver = get_resolver(urlconf)
possibilities = resolver.reverse_dict.getlist(self.view_callable[0])
for tuple_item in possibilities:
possibility = tuple_item[0]
# pattern = tuple_item[1]
# Note: defaults = tuple_item[2] for django >= 1.3
for result, params in possibility:
#instance_attrs = dict([ (param, getattr(instance, param)) for param in params if hasattr(instance, param) ])
instance_attrs = {}
for param in params:
if not hasattr(instance, param):
continue
attr = getattr(instance, param)
if isinstance(attr, models.Model):
instance_attrs[param] = attr.pk
else:
instance_attrs[param] = attr
try:
return reverse(self.view_callable[0], kwargs=instance_attrs)
except NoReverseMatch:
pass
raise _SkipField
@property
def _model_fields_set(self):
"""
Return a set containing the names of validated fields on the model.
"""
model_fields = set(field.name for field in self.model._meta.fields)
if fields:
return model_fields & set(as_tuple(self.fields))
return model_fields - set(as_tuple(self.exclude))
@property
def _property_fields_set(self):
"""
Returns a set containing the names of validated properties on the model.
"""
property_fields = set(attr for attr in dir(self.model) if
isinstance(getattr(self.model, attr, None), property)
and not attr.startswith('_'))
if self.fields:
return property_fields & set(as_tuple(self.fields))
return property_fields.union(set(as_tuple(self.include))) - set(as_tuple(self.exclude))
|
gpl-3.0
| -5,236,262,275,512,934,000
| 36.841076
| 277
| 0.616528
| false
| 4.368332
| false
| false
| false
|
asttra/pysces
|
pysces/PyscesLink.py
|
1
|
52634
|
"""
PySCeS - Python Simulator for Cellular Systems (http://pysces.sourceforge.net)
Copyright (C) 2004-2015 B.G. Olivier, J.M. Rohwer, J.-H.S Hofmeyr all rights reserved,
Brett G. Olivier (bgoli@users.sourceforge.net)
Triple-J Group for Molecular Cell Physiology
Stellenbosch University, South Africa.
Permission to use, modify, and distribute this software is given under the
terms of the PySceS (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Brett G. Olivier
"""
from pysces.version import __version__
__doc__ = '''
PyscesLink
----------
Interfaces to external software and API's, has replaced the PySCeS contrib classes.
'''
# for METATOOLlink
import os, re, cStringIO
# for SBWWebLink
import urllib, urllib2, getpass
class SBWlink(object):
"""Generic access for local SBW services using SBWPython """
sbw = None
psbw = None
sbwModuleProxy = None
moduleDict = None
modules = None
def __init__(self):
try:
import SBW as SBW
import SBW.psbw as psbw
## reload(SBW)
## reload(psbw)
self.sbw = SBW
self.psbw = SBW.psbw
self.sbwModuleProxy = SBW.sbwModuleProxy
self.moduleDict = SBW.sbwModuleProxy.moduleDict
self.modules = []
for m in self.moduleDict:
if self.moduleDict[m].pythonName not in ['python']:
self.SBW_exposeAll(self.moduleDict[m])
self.modules.append(self.moduleDict[m].pythonName)
setattr(self, self.moduleDict[m].pythonName, self.moduleDict[m])
print '\nSBWlink established.'
except Exception, ex:
print ex
print '\nSBWlink not established.'
def SBW_exposeAll(self, module):
for s in module.services:
s = getattr(module, s)
for m in s.methods:
getattr(s, m)
def SBW_getActiveModules(self):
idlist = []
namelst = []
for id in self.psbw.getModuleIdList():
idlist.append(id)
namelst.append(self.psbw.getModuleName(id))
for id in self.moduleDict.keys():
if id not in idlist:
self.moduleDict.pop(id)
for name in range(len(self.modules)-1,-1,-1):
if self.modules[name] not in namelst:
delattr(self, self.modules[name])
self.modules.pop(name)
for name in namelst:
if name not in self.modules:
self.SBW_loadModule(name)
return namelst
def SBW_loadModule(self, module_name):
ans = 'Y'
if module_name[-3:] == 'GUI':
ans = raw_input('Warning! This may hang the console\n\yPress \'Y\' to continue: ')
if ans == 'Y':
module_id = self.psbw.SBWGetModuleInstance(module_name)
assert module_id != None, '\nUnknow module, %s' % module_name
module = self.sbwModuleProxy.ModuleProxy(module_id)
self.SBW_exposeAll(module)
if not self.moduleDict.has_key(module_id):
print '<PySCeS_SBW> Adding ' + module.pythonName + ' to ModuleProxy (id=' + str(module_id) + ')'
self.moduleDict.update({module_id : module})
if module.pythonName not in self.modules:
print '<PySCeS_SBW> Adding ' + module.pythonName + ' to SBWlink'
self.modules.append(module.pythonName)
setattr(self, module.pythonName, module)
else:
print '\nModule %s not loaded' % module_name
class SBWLayoutWebLink(object):
"""Enables access to DrawNetwork and SBMLLayout web services at www.sys-bio.org"""
sbwhost = '128.208.17.26'
sbml = None
sbmllayout = None
svg = None
DEBUGMODE = False
DEBUGLEVEL = 1
DRAWNETWORKLOADED = False
LAYOUTMODULELOADED = False
def setProxy(self, **kwargs):
"""Set as many proxy settings as you need. You may supply a user name without
a password in which case you will be prompted to enter one (once) when required
(NO guarantees, implied or otherwise, on password security AT ALL). Arguments can be:
user = 'daUser',
pwd = 'daPassword',
host = 'proxy.paranoid.net',
port = 3128
"""
proxy_info = {}
for k in kwargs.keys():
proxy_info.update({k : kwargs[k]})
if proxy_info.has_key('user') and not proxy_info.has_key('pwd'):
proxy_info.update({'pwd' : getpass.getpass()})
proxy_support = urllib2.ProxyHandler({"http" :
"http://%(user)s:%(pwd)s@%(host)s:%(port)d" % proxy_info})
opener = urllib2.build_opener(proxy_support, urllib2.HTTPHandler)
urllib2.install_opener(opener)
del proxy_info, proxy_support
def loadSBMLFileFromDisk(self, File, Dir=None):
if Dir != None:
path = os.path.join(Dir, File)
else:
path = File
if os.path.exists(path):
self.sbmllayout = None
self.svg = None
self.DRAWNETWORKLOADED = False
self.LAYOUTMODULELOADED = False
sbmlF = file(path, 'r')
self.sbml = sbmlF.read()
sbmlF.close()
return True
else:
print "%s is an invalid path" % path
return False
def loadSBMLFromString(self, str):
self.sbmllayout = None
self.svg = None
self.DRAWNETWORKLOADED = False
self.LAYOUTMODULELOADED = False
self.sbml = str
return True
def urlGET(self, host, urlpath):
url = 'http://%s%s' % (host,urlpath)
con = urllib2.urlopen(url)
resp = con.read()
if self.DEBUGMODE:
print con.headers
if self.DEBUGMODE and self.DEBUGLEVEL == 2:
print resp
con.close()
return resp
def urlPOST(self, host, urlpath, data):
assert type(data) == dict, '\nData must be a dictionary'
url = 'http://%s%s' % (host, urlpath)
con = urllib2.urlopen(url, urllib.urlencode(data))
resp = con.read()
if self.DEBUGMODE:
print con.headers
if self.DEBUGMODE and self.DEBUGLEVEL == 2:
print resp
con.close()
return resp
def getVersion(self):
print 'Inspector.getVersion()'
ver = self.urlGET(self.sbwhost, '/generate/Inspector.asmx/getVersion')
ver = ver.replace('<?xml version="1.0" encoding="utf-8"?>','')
ver = ver.replace('<string xmlns="http://www.sys-bio.org/">','')
ver = ver.replace('</string>','')
return ver
def drawNetworkLoadSBML(self):
print 'DrawNetwork.loadSBML()'
assert self.sbml != None, '\nNo SBML file loaded'
data = {'var0' : self.sbml}
self.DRAWNETWORKLOADED = True
return self.urlPOST(self.sbwhost, '/generate/DrawNetwork.asmx/loadSBML', data)
def drawNetworkGetSBMLwithLayout(self):
print 'DrawNetwork.getSBML()'
assert self.DRAWNETWORKLOADED, '\nSBML not loaded into DrawNetwork module'
sbml = self.urlGET(self.sbwhost, '/generate/DrawNetwork.asmx/getSBML')
sbml = sbml.replace('>','>')
sbml = sbml.replace('<','<')
sbml = sbml.replace('''<string xmlns="http://www.sys-bio.org/"><?xml version="1.0" encoding="utf-8"?>''','')
sbml = sbml.replace('</string>','')
self.sbmllayout = sbml
return True
def layoutModuleLoadSBML(self):
print 'SBMLLayoutModule.loadSBML()'
assert self.sbmllayout != None, '\nNo SBML Layout loaded'
data = {'var0' : self.sbmllayout}
self.LAYOUTMODULELOADED = True
return self.urlPOST(self.sbwhost, '/generate/SBMLLayoutModule.asmx/loadSBML', data)
def layoutModuleGetSVG(self):
assert self.LAYOUTMODULELOADED, '\nSBML not loaded into SBMLLayout module'
svg = self.urlGET(self.sbwhost, '/generate/SBMLLayoutModule.asmx/getSVG')
svg = svg.replace('>','>')
svg = svg.replace('<','<')
svg = svg.replace('''<string xmlns="http://www.sys-bio.org/">''','')
svg = svg.replace('''<?xml version="1.0" encoding="utf-8"?>''','')
svg = svg.replace('</string>','')
self.svg = svg
return True
def getSBML(self):
return self.sbml
def getSBMLlayout(self):
return self.sbmllayout
def getSVG(self):
return self.svg
class METATOOLlink(object):
"""New interface to METATOOL binaries"""
__metatool_path__ = None
__mod__ = None
__emode_exe_int__ = None
__emode_exe_dbl__ = None
__emode_intmode__ = 0
__emode_userout__ = 0
__emode_file__ = None
__metatool_file__ = None
#EModes = ''
def __init__(self, mod, __metatool_path__=None):
# Initialise elementary modes
self.__mod__ = mod
if __metatool_path__ == None:
self.__metatool_path__ = os.path.join(mod.__pysces_directory__, 'metatool')
else:
self.__metatool_path__ = os.path.join(__metatool_path__, 'metatool')
assert self.__metatool_path__ != None, '\nPySCeS not found'
self.__emode_file__ = self.__mod__.ModelFile[:-4] + '_emodes'
self.__metatool_file__ = self.__mod__.ModelFile[:-4] + '_metatool'
if os.sys.platform == 'win32':
self.__emode_exe_int__ = os.path.join(self.__metatool_path__,'meta43_int.exe')
self.__emode_exe_dbl__ = os.path.join(self.__metatool_path__,'meta43_double.exe')
else:
self.__emode_exe_int__ = os.path.join(self.__metatool_path__,'meta43_int')
self.__emode_exe_dbl__ = os.path.join(self.__metatool_path__,'meta43_double')
if os.path.exists(self.__emode_exe_int__):
print 'Using METATOOL int',
self.__emode_intmode__ = True
else:
self.__emode_exe_int__ = None
if os.path.exists(self.__emode_exe_dbl__):
print '\b\b\b\bdbl'
self.__emode_intmode__ = False
else:
self.__emode_exe_dbl__ = None
assert self.__emode_exe_dbl__ != None or self.__emode_exe_int__ != None, "\nMETATOOL binaries not available"
def doEModes(self):
"""
doEModes()
Calculate the elementary modes by way of an interface to MetaTool.
METATOOL is a C program developed from 1998 to 2000 by Thomas Pfeiffer (Berlin)
in cooperation with Stefan Schuster and Ferdinand Moldenhauer (Berlin) and Juan Carlos Nuno (Madrid).
http://www.biologie.hu-berlin.de/biophysics/Theory/tpfeiffer/metatool.html
Arguments:
None
"""
print 'METATOOL is a C program developed from 1998 to 2000 by Thomas Pfeiffer (Berlin)'
print 'in cooperation with Stefan Schuster and Ferdinand Moldenhauer (Berlin) and Juan Carlos Nuno (Madrid).'
print 'http://www.biologie.hu-berlin.de/biophysics/Theory/tpfeiffer/metatool.html'
goMode = 0
fileIn = 'pysces_metatool.dat'
fileOut = 'pysces_metatool.out'
goMode = 1
if goMode == 1:
# Build MetaTool input file
File = open(os.path.join(self.__mod__.ModelOutput,fileIn),'w')
# Determine type of reaction
out1 = []
for key in self.__mod__.__nDict__:
#print key
#print self.__mod__.__nDict__[key]['Type']
out1.append((key,self.__mod__.__nDict__[key]['Type']))
#print '\nExtracting metatool information from network dictionary ...\n'
File.write('-ENZREV\n')
for x in out1:
if x[1] == 'Rever':
File.write(x[0] + ' ')
File.write('\n\n')
File.write('-ENZIRREV\n')
for x in out1:
if x[1] == 'Irrev':
File.write(x[0] + ' ')
File.write('\n\n')
File.write('-METINT\n')
for x in self.__mod__.__species__:
File.write(x + ' ')
File.write('\n\n')
File.write('-METEXT\n')
for x in self.__mod__.__fixed_species__:
File.write(x + ' ')
File.write('\n\n')
output = []
allInt = 1
for x in self.__mod__.__nDict__:
reList = self.__mod__.__nDict__[x]['Reagents']
subs = ''
prods = ''
#print 'Reaction: ' + x
for y in reList:
if self.__emode_intmode__ == 1: # use int elementary modes
if abs(int(reList[y]))/abs(float(reList[y])) != 1.0:
print 'INFO: Coefficient not integer = ' + `reList[y]`
allInt = 0
if reList[y] < 0:
#print y.replace('self.','') + ' : substrate'
if abs(int(reList[y])) != 1:
subs += `abs(int(reList[y]))` + ' '
subs += y.replace('self.','')
subs += ' + '
else:
#print y.replace('self.','') + ' : product '
if abs(int(reList[y])) != 1:
prods += `abs(int(reList[y]))` + ' '
prods += y.replace('self.','')
prods += ' + '
#output.append(x + ' : ' + subs[:-3] + ' = ' + prods[:-3] + ' .')
else: # use float/double elementary mode
if reList[y] < 0.0:
#print y.replace('self.','') + ' : substrate'
if abs(float(reList[y])) != 1.0:
subs += `abs(float(reList[y]))` + ' '
subs += y.replace('self.','')
subs += ' + '
else:
#print y.replace('self.','') + ' : product '
if abs(float(reList[y])) != 1.0:
prods += `abs(float(reList[y]))` + ' '
prods += y.replace('self.','')
prods += ' + '
output.append(x + ' : ' + subs[:-3] + ' = ' + prods[:-3] + ' .')
File.write('-CAT\n')
for x in output:
File.write(x + '\n')
File.write('\n')
File.flush()
File.close()
if allInt == 1:
if self.__emode_intmode__ == 1:
eModeExe = self.__emode_exe_int__
else:
eModeExe = self.__emode_exe_dbl__
print '\nMetatool running ...\n'
######### UPDATE:
# Actually works fine on windows and posix - johann 20081128
print 'Generic run'
os.spawnl(os.P_WAIT, eModeExe, eModeExe, os.path.join(self.__mod__.ModelOutput,fileIn), os.path.join(self.__mod__.ModelOutput,fileOut))
print '\nMetatool analysis complete\n'
# Parse MetaTool output file and store the result in a string
go = 0
go2 = 0
result = ''
end = ''
try:
file2 = open(os.path.join(self.__mod__.ModelOutput,fileOut), 'r')
for line in file2:
c = re.match('ELEMENTARY MODES',line)
d = re.match(' enzymes',line)
e = re.match('The elementary mode',line)
f = re.match('\n',line)
g = re.match('The elementary',line)
if c != None:
go = 1
go2 = 0
if d != None:
go2 = 1
if e != None:
go2 = 0
if go == 1 and go2 == 1 and f == None:
line = line.replace('reversible','\n reversible\n')
line = line.replace('ir\n ','\n ir')
if self.__emode_intmode__ == 1:
line = line.replace('] ',']\n ')
else:
line = line.replace(') ',')\n ',1)
result += line
if go == 1 and g != None:
end += line
result += end
result += '\n'
file2.close()
if self.__emode_userout__ == 1:
fileo = open(os.path.join(self.__mod__.ModelOutput,self.__metatool_file__) + '.in','w')
filer = open(os.path.join(self.__mod__.ModelOutput,fileIn),'r')
for line in filer:
fileo.write(line)
fileo.write('\n\n')
filer.close()
fileo.close()
filer = open(os.path.join(self.__mod__.ModelOutput,fileOut),'r')
fileo = open(os.path.join(self.__mod__.ModelOutput,self.__metatool_file__) + '.out','w')
for line in filer:
fileo.write(line)
filer.close()
fileo.close()
os.remove(os.path.join(self.__mod__.ModelOutput,fileIn))
os.remove(os.path.join(self.__mod__.ModelOutput,fileOut))
except Exception, EX:
print 'doEmode:', EX
print 'WARNING: Unable to open MetaTool output file\nPlease check the MetaTool executables: '
if os.name == 'posix':
print '/MetaTool/meta43_double /MetaTool/meta43_int\nand their permissions'
else:
print '/MetaTool/meta43_double.exe /MetaTool/meta43_int.exe'
else:
print '\nINFO: non-integer coefficients\
\nTry using the double eMode function: self.__emode_intmode__=0'
result = 'Elementary modes not calculated\n'
else:
print '\nNo elementary mode calculation possible - no meta43_xxx.exe'
result = 'Elementary modes not calculated\n'
self.EModes = result
def getEModes(self):
"""
getEModes()
Returns the elementary modes as a linked list of fluxes
"""
try:
a = self.EModes
FF = cStringIO.StringIO()
FF.write(self.EModes)
FF.reset()
output = []
for line in FF:
if re.match(' ',line) and not re.match(' reversible',line) and not re.match(' irreversible',line):
tmp = [el for el in line.replace('\n','').split(' ') if el != '']
tmpOut = []
skip = False
for el in range(len(tmp)):
if skip:
skip = False
elif tmp[el][0] != '(':
tmpOut.append(tmp[el])
elif tmp[el][0] == '(':
tmpOut.append(tmp[el]+')'+tmp[el+1][:-1])
skip = True
output.append(tmpOut)
return output
except AttributeError, atx:
print atx
print '\nINFO: Please run doEModes() first\n'
def showEModes(self,File=None):
"""
showEModes(File=None)
Print the results of an elementary mode analysis, generated with doEModes(),
to screen or file.
Arguments:
File [default=None]: Boolean, if True write parsed elementary modes to file
"""
try:
if File != None:
#assert type(File) == file, 'showEmodes() needs an open file object'
print '\nElementary modes written to file\n'
f = open(os.path.join(self.__mod__.ModelOutput,self.__emode_file__ + '.out'),'w')
f.write('\n## Elementary modes\n')
f.write(self.EModes)
f.close()
else:
print '\nElementary modes\n'
print self.EModes
except AttributeError, atx:
print atx
print '\nINFO: Please run doEModes() first\n'
#stochsim link
'''
_HAVE_STOMPY = False
_STOMPY_LOAD_ERROR = ''
try:
## import stompy
import stochpy as stompy
_HAVE_STOMPY = True
except Exception, ex:
_STOMPY_LOAD_ERROR = '%s' % ex
_HAVE_STOMPY = False
class StomPyInterface(object):
"""
StomPy interface to PySCeS this may move to pysces.link in the future
"""
SSA = None
SSA_REACTIONS = None
SSA_SPECIES = None
stompy = None
_MOD2PSC = None
TMP_FNAME = None
TMP_PATH = None
MODEL_PATH = None
OUTPUT_PATH = None
STP_IS_TIME_SIM = False
STP_METHOD = 'Direct'
STP_TIMEEND = 1
STP_TRAJ = 1
STP_INTERACTIVE = True
STP_TRACK_PROPENSITIES = True
STP_WAITING_TIMES = True
STP_STEPS = 10
STP_INITIAL_SPECIES = True
STP_KEEP_PSC_FILES = False
def __init__(self, model_path, output_path):
"""
An interface class to the StomPy stochastic simulator
- *model_path* the default PySCeS model directory
- *output_path* the default PySCeS output directory
"""
self.stompy = stompy
self.OUTPUT_PATH = output_path
self.MODEL_PATH = model_path
self.TMP_PATH = os.path.join(model_path, 'orca')
self._MOD2PSC = interface.writeMod2PSC
def setProperty(self, **kwargs):
"""
Sets a StomPy simulation parameter
- *method* [default='Direct'] select simulation algorithm
- *trajectories* [default=1]
- *interactive* [default=True]
- *track_propensities* [default=True]
- *steps* [default=10]
"""
## print kwargs
if kwargs.has_key('method'):
self.STP_METHOD = kwargs['method']
## print '%s = %s' % ('method', kwargs['method'])
if kwargs.has_key('trajectories'):
self.STP_TRAJ = kwargs['trajectories']
self.STP_TRAJ = 1 # TODO I need to look into this
## print 'Currently only single trajectories are supported via the PySCeS interface'
## print '%s = %s' % ('trajectories', self.STP_TRAJ)
if kwargs.has_key('interactive'):
self.STP_INTERACTIVE = kwargs['interactive']
## print '%s = %s' % ('interactive', kwargs['interactive'])
if kwargs.has_key('track_propensities'):
self.STP_TRACK_PROPENSITIES = kwargs['track_propensities']
## print '%s = %s' % ('track_propensities', kwargs['track_propensities'])
if kwargs.has_key('steps'):
self.STP_STEPS = kwargs['steps']
## print '%s = %s' % ('steps', kwargs['steps'])
if kwargs.has_key('species_initial'):
self.STP_INITIAL_SPECIES = kwargs['initial_species']
## print '%s = %s' % ('initial_species', kwargs['initial_species'])
if kwargs.has_key('keep_psc_files'):
self.STP_KEEP_PSC_FILES = kwargs['keep_psc_files']
## print '%s = %s' % ('keep_psc_files', kwargs['keep_psc_files'])
def initModelFromMod(self, pscmod, iValues=False):
"""
Initialise a StomPy SSA instance from a PySCeS model.
- *pscmod* an initialised PySCeS model
- *iValues* [default=False] use initial values (not current)
"""
self.TMP_FNAME = str(time.time()).replace('.','')+'.psc'
if self.STP_INITIAL_SPECIES:
for s in pscmod.species:
setattr(pscmod, s, pscmod.__sDict__[s]['initial'])
self._MOD2PSC(pscmod, self.TMP_FNAME, self.TMP_PATH, iValues=iValues)
self.SSA = self.stompy.SSA(Method=self.STP_METHOD, File=self.TMP_FNAME, dir=self.TMP_PATH, IsInteractive=self.STP_INTERACTIVE)
self.SSA.Trajectories(self.STP_TRAJ)
self.SSA_REACTIONS = self.SSA.SSA.rate_names
self.SSA_SPECIES = self.SSA.SSA.species
if self.STP_TRACK_PROPENSITIES:
self.SSA.TrackPropensities()
try:
print os.path.join(self.TMP_PATH, self.TMP_FNAME)
if not self.STP_KEEP_PSC_FILES and self.TMP_PATH != None and self.TMP_FNAME != None:
os.remove(os.path.join(self.TMP_PATH, self.TMP_FNAME))
except:
print 'Could not delete intermediatery StomPy PSC file: %s' % os.path.join(self.TMP_PATH, self.TMP_FNAME)
self.TMP_FNAME = None
print 'StomPy model ... initialised.'
def runTimeSimulation(self, pscmod, endtime=None, method='Direct', iValues=False):
"""
Run a stochastic simulation
- *pscmod* and instanitiated PySCeS model
- *endtime* [default=1] the end time **Note: this could take a long time i.e. generate ahuge amount of steps**
- *method* [default='Direct'] select the simulation method, one of:
- *Direct*
- *FirstReactionMethod*
- *NextReactionMethod*
- *TauLeaping*
- *iValues* [default=False] use initial values (not current)
"""
if method not in ['Direct','FirstReactionMethod','NextReactionMethod','TauLeaping']:
print 'Method: %s does not exist using - Direct' % method
self.STP_METHOD = 'Direct'
else:
self.STP_METHOD = method
if endtime != None:
self.STP_TIMEEND = endtime
self.initModelFromMod(pscmod, iValues=iValues)
## self.SSA.Timesteps(self.STP_STEPS)
self.SSA.Endtime(self.STP_TIMEEND)
self.SSA.Run()
self.STP_IS_TIME_SIM = True
## self.SSA.PlotTimeSim()
print 'StomPy time simulation ... done.'
# TODO STOCHPY
## if self.SSA.SSA.output[0][-1] == '':
## self.SSA.SSA.output[0][-1] = 0.0
## sim_dat = numpy.array(self.SSA.SSA.output, 'd')
## pscmod.data_stochsim = IntegrationStochasticDataObj()
## pscmod.data_stochsim.setTime(sim_dat[:,0])
## pscmod.data_stochsim.setSpecies(sim_dat[:,1:-1], self.SSA_SPECIES)
pscmod.data_stochsim = self.SSA.data_stochsim
if self.STP_WAITING_TIMES:
wtimes, wt_lbls = self.getWaitingtimesData(reactions=None,lbls=True)
pscmod.data_stochsim.setWaitingtimes(wtimes, wt_lbls)
if self.STP_TRACK_PROPENSITIES:
pscmod.data_stochsim.setPropensities(self.SSA.SSA.propensities_output)
pscmod.data_stochsim.TYPE_INFO = 'Stochastic'
def runStepSimulation(self, pscmod, steps=None, method='Direct', iValues=False):
"""
Run a stochastic simulation
- *pscmod* and instanitiated PySCeS model
- *steps* [default=10] the number of steps to simulate
- *method* [default='Direct'] select the simulation method, one of:
- *Direct*
- *FirstReactionMethod*
- *NextReactionMethod*
- *TauLeaping*
- *iValues* [default=False] use initial values (not current)
"""
if method not in ['Direct','FirstReactionMethod','NextReactionMethod','TauLeaping']:
print 'Method: %s does not exist using - Direct' % method
self.STP_METHOD = 'Direct'
else:
self.STP_METHOD = method
if steps != None:
self.STP_STEPS = steps
self.initModelFromMod(pscmod, iValues=iValues)
self.SSA.Timesteps(self.STP_STEPS)
## self.SSA.Endtime(self.STP_TIMEEND)
self.SSA.Run()
self.STP_IS_TIME_SIM = False
print 'StomPy step simulation ... done.'
## print self.SSA.SSA.output[0]
## print self.SSA.SSA.output[1]
## print self.SSA.SSA.output[-1]
## header_line = self.SSA.SSA.output.pop(0)
## if self.SSA.SSA.output[0][-1] == '':
## self.SSA.SSA.output[0][-1] = 0.0
## sim_dat = numpy.array(self.SSA.SSA.output, 'd')
## pscmod.data_stochsim = IntegrationStochasticDataObj()
## pscmod.data_stochsim.setTime(sim_dat[:,0])
## pscmod.data_stochsim.setSpecies(sim_dat[:,1:-1], self.SSA_SPECIES)
pscmod.data_stochsim = self.SSA.data_stochsim
if self.STP_WAITING_TIMES:
wtimes, wt_lbls = self.getWaitingtimesData(reactions=None,lbls=True)
pscmod.data_stochsim.setWaitingtimes(wtimes, wt_lbls)
if self.STP_TRACK_PROPENSITIES:
pscmod.data_stochsim.setPropensities(self.SSA.SSA.propensities_output)
pscmod.data_stochsim.TYPE_INFO = 'Stochastic'
def getWaitingtimesData(self,reactions=None,lbls=False):
"""
Plots the waiting times for each reaction in the model. Makes use of ObtainWaitingtimes to derive the waiting times out of the SSA output.
Input:
- *reactions* [default=0] a list of reactions to plot defualts to all reactions
- *traj* [default=0] trajectory to plot (defaults to first one)
- *lbls* [default=False] if True return (data_array, column_labels) otherwise just data_array
This method is derived from StomPy 0.9 (http://stompy.sf.net) Analysis.py
"""
if self.SSA.IsTauLeaping:
print 'INFO: waiting times not available when method is Tau Leaping'
if not lbls:
return None
else:
return None, None
self.SSA.GetWaitingtimes()
if reactions == None:
reactions = self.SSA_REACTIONS
vect = []
vect_lbls = []
for r in reactions:
if r in self.SSA_REACTIONS:
vect.append(self.SSA_REACTIONS.index(r)+1)
vect_lbls.append('wt'+str(r))
else:
print "INFO: '%s' is not a valid reaction name" % r
OUTPUT = []
## for t in range(len(self.SSA.data_stochsim.waiting_times)):
T_OUTPUT = []
for i in vect:
if self.SSA.data_stochsim.waiting_times.has_key(i):
waiting_time = self.SSA.data_stochsim.waiting_times[i]
if len(waiting_time) > 1: # At least 2 waiting times are necessary per reaction
T_OUTPUT.append(self.stompy.modules.Analysis.LogBin(waiting_time, 1.5)) # Create logarithmic bins
else:
T_OUTPUT.append(None)
else:
T_OUTPUT.append(None)
OUTPUT.append(T_OUTPUT)
if not lbls:
return OUTPUT
else:
return OUTPUT, vect_lbls
class IntegrationStochasticDataObj(object):
"""
This class is specifically designed to store the
results of a stochastic time simulation
It has methods for setting the Time, Labels, Species and Propensity data and
getting Time, Species and Rate (including time) arrays. However, of more use:
- getOutput(\*args) feed this method species/rate labels and it will return
an array of [time, sp1, r1, ....]
- getDataAtTime(time) the data generated at time point "time".
- getDataInTimeInterval(time, bounds=None) more intelligent version of the above
returns an array of all data points where: time-bounds <= time <= time+bounds
"""
time = None
waiting_times = None
species = None
propensities = None
xdata = None
time_label = 'Time'
waiting_time_labels = None
species_labels = None
propensities_labels = None
xdata_labels = None
HAS_SPECIES = False
HAS_WAITING_TIMES = False
HAS_PROPENSITIES = False
HAS_TIME = False
HAS_XDATA = False
IS_VALID = True
TYPE_INFO = 'Stochastic'
def setLabels(self, species):
"""
Set the species
- *species* a list of species labels
"""
self.species_labels = species
def setTime(self, time, lbl=None):
"""
Set the time vector
- *time* a 1d array of time points
- *lbl* [default=None] is "Time" set as required
"""
self.time = time.reshape(len(time), 1)
self.HAS_TIME = True
if lbl != None:
self.time_label = lbl
def setSpecies(self, species, lbls=None):
"""
Set the species array
- *species* an array of species vs time data
- *lbls* [default=None] a list of species labels
"""
self.species = species
self.HAS_SPECIES = True
if lbls != None:
self.species_labels = lbls
def setWaitingtimes(self, waiting_times, lbls=None):
"""
Set the `waiting_times` this data structure is not an array but a nested list of: waiting time log bins per reaction per trajectory::
waiting_times = [traj_1, ..., traj_n]
traj_1 = [wt_J1, ..., wt_Jn] # in order of SSA_REACTIONS
wt_J1 = (xval, yval, nbin)
xval =[x_1, ..., x_n]
yval =[y_1, ..., y_n]
nbin = n
- *waiting_times* a list of waiting times
- *lbls* [default=None] a list of matching reaction names
"""
self.waiting_times = waiting_times
self.HAS_WAITING_TIMES = True
if lbls != None:
self.waiting_time_labels = lbls
def setPropensities(self, propensities, lbls=None):
"""
Sets an array of propensities.
- *propensities* a list of propensities
- *lbls* [default=None] a list of matching reaction names
"""
if lbls == None:
LB = copy.copy(propensities[0])
lbls = LB[1:]
lbls = ['p'+str(r) for r in lbls]
P_ARR = numpy.zeros((len(propensities), len(propensities[0])-1), 'd')
P_ARR[-1,:] = numpy.NaN
for r in range(1, P_ARR.shape[0]):
P_ARR[r, :] = propensities[r][1:]
self.propensities = P_ARR
self.HAS_PROPENSITIES = True
if lbls != None:
self.propensities_labels = lbls
## print self.propensities_labels
## print self.propensities
def setXData(self, xdata, lbls=None):
"""
Sets an array of extra simulation data
- *xdata* an array of xdata vs time
- *lbls* [default=None] a list of xdata labels
"""
self.xdata = xdata
self.HAS_XDATA = True
if lbls != None:
self.xdata_labels = lbls
def getTime(self, lbls=False):
"""
Return the time vector
- *lbls* [default=False] return only the time array or optionally both the time array and time label
"""
output = None
if self.HAS_TIME:
output = self.time.reshape(len(self.time),)
if not lbls:
return output
else:
return output, [self.time_label]
def getSpecies(self, lbls=False):
"""
Return an array fo time+species
- *lbls* [default=False] return only the time+species array or optionally both the data array and a list of column label
"""
output = None
if self.HAS_SPECIES:
output = numpy.hstack((self.time, self.species))
labels = [self.time_label]+self.species_labels
else:
output = self.time
labels = [self.time_label]
if not lbls:
return output
else:
return output, labels
def getWaitingtimes(self, lbls=False, traj=[]):
"""
Return waiting times, time+waiting_time array
- *lbls* [default=False] return only the time+waiting_time array or optionally both the data array and a list of column label
- *traj* [default=[0]] return the firs or trajectories defined in this list
"""
output = None
labels = None
if self.HAS_WAITING_TIMES:
output = []
if len(traj) == 0:
traj = range(len(self.waiting_times))
## if len(traj) == 1:
## output = self.waiting_times[0]
## else:
for t in traj:
output.append(self.waiting_times[t])
labels = self.waiting_time_labels
else:
output = []
labels = []
if not lbls:
return output
else:
return output, labels
def getPropensities(self, lbls=False):
"""
Return time+propensity array
- *lbls* [default=False] return only the time+propensity array or optionally both the data array and a list of column label
"""
#assert self.propensities != None, "\nNo propensities"
output = None
if self.HAS_PROPENSITIES:
print self.time.shape
print self.propensities.shape
output = numpy.hstack((self.time, self.propensities))
labels = [self.time_label]+self.propensities_labels
else:
output = self.time
labels = [self.time_label]
if not lbls:
return output
else:
return output, labels
def getXData(self, lbls=False):
"""
Return time+xdata array
- *lbls* [default=False] return only the time+xdata array or optionally both the data array and a list of column label
"""
output = None
if self.HAS_XDATA:
output = numpy.hstack((self.time, self.xdata))
labels = [self.time_label]+self.xdata_labels
else:
output = self.time
labels = [self.time_label]
if not lbls:
return output
else:
return output, labels
def getDataAtTime(self, time):
"""
Return all data generated at "time"
- *time* the required exact time point
"""
#TODO add rate rule data
t = None
sp = None
ra = None
ru = None
xd = None
temp_t = self.time.reshape(len(self.time),)
for tt in range(len(temp_t)):
if temp_t[tt] == time:
t = tt
if self.HAS_SPECIES:
sp = self.species.take([tt], axis=0)
if self.HAS_PROPENSITIES:
ru = self.propensities.take([tt], axis=0)
if self.HAS_XDATA:
xd = self.xdata.take([tt], axis=0)
break
output = None
if t is not None:
output = numpy.array([[temp_t[t]]])
if sp is not None:
output = numpy.hstack((output,sp))
if ra is not None:
output = numpy.hstack((output,ra))
if ru is not None:
output = numpy.hstack((output,ru))
if xd is not None:
output = numpy.hstack((output,xd))
return output
def getDataInTimeInterval(self, time, bounds=None):
"""
Returns an array of all data in interval: time-bounds <= time <= time+bounds
where bound defaults to stepsize
- *time* the interval midpoint
- *bounds* [default=None] interval halfspan defaults to stepsize
"""
temp_t = self.time.reshape(len(self.time),)
if bounds == None:
bounds = temp_t[1] - temp_t[0]
c1 = (temp_t >= time-bounds)
c2 = (temp_t <= time+bounds)
print 'Searching (%s:%s:%s)' % (time-bounds, time, time+bounds)
t = []
sp = None
ra = None
for tt in range(len(c1)):
if c1[tt] and c2[tt]:
t.append(tt)
output = None
if len(t) > 0:
output = self.time.take(t)
output = output.reshape(len(output),1)
if self.HAS_SPECIES and self.HAS_TIME:
output = numpy.hstack((output, self.species.take(t, axis=0)))
if self.HAS_PROPENSITIES:
output = numpy.hstack((output, self.propensities.take(t, axis=0)))
if self.HAS_XDATA:
output = numpy.hstack((output, self.xdata.take(t, axis=0)))
return output
def getAllSimData(self,lbls=False):
"""
Return an array of time + all available simulation data
- *lbls* [default=False] return only the data array or (data array, list of labels)
"""
labels = [self.time_label]
if self.HAS_SPECIES and self.HAS_TIME:
output = numpy.hstack((self.time, self.species))
labels += self.species_labels
if self.HAS_PROPENSITIES:
output = numpy.hstack((output, self.propensities))
labels += self.propensities_labels
if self.HAS_XDATA:
output = numpy.hstack((output, self.xdata))
labels += self.xdata_labels
if not lbls:
return output
else:
return output, labels
def getSimData(self, *args, **kwargs):
"""
Feed this method species/xdata labels and it will return an array of [time, sp1, ....]
- 'speces_l', 'xdatal' ...
- *lbls* [default=False] return only the data array or (data array, list of labels)
"""
output = self.time
if kwargs.has_key('lbls'):
lbls = kwargs['lbls']
else:
lbls = False
lout = [self.time_label]
for roc in args:
if self.HAS_SPECIES and roc in self.species_labels:
lout.append(roc)
output = numpy.hstack((output, self.species.take([self.species_labels.index(roc)], axis=-1)))
if self.HAS_PROPENSITIES and roc in self.propensities_labels:
lout.append(roc)
output = numpy.hstack((output, self.propensities.take([self.propensities_labels.index(roc)], axis=-1)))
if self.HAS_XDATA and roc in self.xdata_labels:
lout.append(roc)
output = numpy.hstack((output, self.xdata.take([self.xdata_labels.index(roc)], axis=-1)))
if not lbls:
return output
else:
return output, lout
class PysMod:
#STOMPY INSERT START
def StochSimPlot(self, plot='species', filename=None, title=None, log=None, format='points'):
"""
Plot the Stochastic simulation results, uses the new UPI pysces.plt interface:
- *plot* [default='species'] output to plot, can be one of:
- 'all' species and propensities
- 'species' species
- 'waiting_times' waiting_times
- 'propensities' propensities
- `['S1', 'R1', ]` a list of model attributes ('species')
- *filename* [default=None] if defined file is exported to filename
- *title* [default=None] the plot title
- *log* [default=None] use log axis for axis 'x', 'y', 'xy'
- *format* [default='lines'] use UPI or backend specific keys
"""
data = None
labels = None
allowedplots = ['all', 'species', 'propensities','waiting_times']
## allowedplots = ['all', 'species', 'waiting_times']
if type(plot) != list and plot not in allowedplots:
raise RuntimeError, '\nPlot must be one of %s not \"%s\"' % (str(allowedplots), plot)
if plot == 'all':
## data, labels = self.data_stochsim.getSpecies(lbls=True)
data, labels = self.data_stochsim.getAllSimData(lbls=True)
elif plot == 'species':
data, labels = self.data_stochsim.getSpecies(lbls=True)
elif plot == 'propensities':
data, labels = self.data_stochsim.getPropensities(lbls=True)
## data, labels = self.data_stochsim.getRates(lbls=True)
elif plot == 'waiting_times':
dataLst, labels = self.data_stochsim.getWaitingtimes(lbls=True)
format='points'
## data, labels = self.data_stochsim.getRates(lbls=True)
else:
plot = [at for at in plot if at in self.__species__+[self.data_stochsim.time_label]+self.data_stochsim.propensities_labels]
kwargs = {'lbls' : True}
print plot
if len(plot) > 0:
data, labels = self.data_stochsim.getSimData(*plot, **kwargs)
del allowedplots
xu = 'Time (%(multiplier)s x %(kind)s x 10**%(scale)s)**%(exponent)s' % self.__uDict__['time']
if plot == 'waiting_times':
xu = 'Inter-arrival time (%s)' % xu
xrng_start = 0.1
xrng_end = 0.1
yrng_start = 0.1
yrng_end = 0.1
for wt in range(len(dataLst)):
for d in range(len(dataLst[wt])):
D = dataLst[wt][d]
if plt.__USE_MATPLOTLIB__ and d > 0:
plt.m.hold(True)
if D != None and len(D[0]) > 0 and len(D[1]) > 0:
data = numpy.vstack([D[0], D[1]]).transpose()
if min(D[0]) < xrng_start and min(D[0]) > 0.0:
xrng_start = min(D[0])
if max(D[0]) > xrng_end:
xrng_end = max(D[0])
if min(D[1]) < yrng_start and min(D[1]) > 0.0:
yrng_start = min(D[1])
if max(D[1]) > yrng_end:
yrng_end = max(D[1])
plt.plotLines(data, 0, [1], titles=['Time']+[labels[d]], formats=[format])
plt.setRange('x', xrng_start*0.8, xrng_end*1.2)
plt.setRange('y', yrng_start*0.8, yrng_end*1.2)
if plt.__USE_MATPLOTLIB__:
plt.m.hold(False)
else:
plt.plotLines(data, 0, range(1, data.shape[1]), titles=labels, formats=[format])
# set the x-axis range so that it is original range + 0.2*sim_end
# this is a sceintifcally dtermned amount of space that is needed for the title at the
# end of the line :-) - brett 20040209
RngTime = self.data_stochsim.getTime()
end = RngTime[-1] + 0.2*RngTime[-1]
plt.setRange('x', RngTime[0], end)
del RngTime
# For now StochPy results are plotted as Amounts directly from StochPy
M = 'Amount'
## if self.__KeyWords__['Output_In_Conc']:
## M = 'Concentration'
## else:
## M = 'Amount (%(multiplier)s x %(kind)s x 10**%(scale)s)**%(exponent)s' % self.__uDict__['substance']
if plot == 'all':
yl = 'Amount, propensities'
elif plot == 'propensities':
yl = 'Propensities'
elif plot == 'waiting_times':
yl = 'Frequency'
if log == None:
log = 'xy'
elif plot == 'species':
yl = '%s' % M
else:
yl = 'User defined'
plt.setAxisLabel('x', xu)
plt.setAxisLabel('y', yl)
if log != None:
plt.setLogScale(log)
if title == None:
plt.setGraphTitle('PySCeS/StochPy simulation (' + self.ModelFile + ') ' + time.strftime("%a, %d %b %Y %H:%M:%S"))
else:
plt.setGraphTitle(title)
plt.replot()
if filename != None:
plt.export(filename, directory=self.ModelOutput, type='png')
def doStochSim(self,end=10,mode='steps',method='Direct',trajectories=1):
"""
doStochSim(end=10, mode='steps', method='Direct')
Run a stochastic simulation for until `end` is reached. This can be either steps or end time (which could be a *HUGE* number of steps).
Arguments:
- *end* [default=10] simulation end (steps or time)
- *mode* [default='steps'] simulation mode, can be one of:
- *steps* total number of steps to simulate
- *time* simulate until time is reached
- *method* [default='Direct'] stochastic algorithm, can be one of:
- Direct
- FirstReactionMethod
- NextReactionMethod
- TauLeaping
"""
if method not in ['Direct', 'FirstReactionMethod','NextReactionMethod','TauLeaping']:
print 'Method "%s" not recognised using: "Direct"' % method
method = 'Direct'
if mode not in ['steps','time']:
print 'Mode "%s" not recognised using: "steps"' % mode
mode = 'steps'
stompy_track_propensities = True
stompy_keep_psc_files = False
self.__STOMPY__.setProperty(method=method, trajectories=trajectories, interactive=True, track_propensities=stompy_track_propensities, keep_psc_files=stompy_keep_psc_files)
if mode == 'time':
self.__STOMPY__.runTimeSimulation(self, endtime=end, method=method)
else:
self.__STOMPY__.runStepSimulation(self, steps=end, method=method)
def doStochSimPlot(self, end=10.0, mode='steps', method='Direct', plot='species', fmt='points', log=None, filename=None):
"""
doStochSimPlot(end=10.0, mode='steps', method='Direct', plot='species', fmt='points', log=None, filename=None)
Run a stochastic simulation for until `end` is reached and plot the results. This can be either steps or end time (which could be a *HUGE* number of steps).
Arguments:
- *end* [default=10] simulation end (steps or time)
- *mode* [default='steps'] simulation mode, can be one of:
- *steps* total number of 'steps' to simulate
- *time* simulate until 'time' is reached
- *method* [default='Direct'] stochastic algorithm, can be one of:
- Direct
- FirstReactionMethod
- NextReactionMethod
- TauLeaping
- *plot* [default='species'] output to plot, can be one of:
- 'all' species and propensities
- 'species' species
- 'waiting_times' waiting_times
- 'propensities' propensities
- `['S1', 'R1', ]` a list of model attributes ('species')
- *filename* [default=None] if defined file is exported to filename
- *title* [default=None] the plot title
- *log* [default=None] use log axis for axis 'x', 'y', 'xy'
- *fmt* [default='lines'] use UPI or backend specific keys
"""
self.doStochSim(end=end, mode=mode, method=method,trajectories=1)
self.StochSimPlot(plot='species', filename=filename, log=log, format=fmt)
#STOMPY INSERT START
if not _HAVE_STOMPY:
def nofunc(self, *args, **kwargs):
print '\nStochastic simulation not available, please download/install *StomPy* from: http://stompy.sf.net\n'
PysMod.doStochSim = nofunc
PysMod.doStochSimPlot = nofunc
PysMod.StochSimPlot = nofunc
'''
|
bsd-3-clause
| 621,127,197,163,312,800
| 36.223675
| 179
| 0.522609
| false
| 3.787709
| false
| false
| false
|
6/jcrawler
|
mbga_parser.py
|
1
|
5359
|
#-*- encoding:utf-8 -*-
"""
Parse MBGA data to generate statistics.
"""
import glob
import os
import re
import csv
import numpy
from PIL import Image
from datetime import datetime
DATA_PATH = "data/mbga/{0}/"
PERMISSIONS = {
"メンバー全員": 1 # all members
,"主催者+副管理": 2 # sponsors and moderators
,"主催者のみ": 3 # sponsors
}
EMOTIONS = {
"normal": 1
,"shy": 2
,"smile": 3
,"angry": 4
,"cry": 5
}
def analyze_groups():
group_files = files('group', '*.data')
groups = []
min_dist, max_dist = None, 0
for i in range(1, len(group_files), 2):
n_members, permissions = parse(group_files[i-1], meta_parser)
dist = parse(group_files[i], time_dist_parser)
if dist and dist > max_dist:
max_dist = dist
if dist and (dist < min_dist or min_dist is None):
min_dist = dist
if not dist: dist = 0
groups.append([n_members, permissions, dist])
min_members_dist, max_members_dist = None, 0
for i,g in enumerate(groups):
if g[2] is 0:
groups[i].append(0)
continue
n_members, dist = g[0], float(g[2])
# scale from 0.01 (least activity) to 1.0 (most activity)
scaled_dist = 1 - ((dist - min_dist) / (max_dist - min_dist) * 0.99)
groups[i][2] = scaled_dist
members_dist = scaled_dist / n_members
groups[i].append(members_dist)
if members_dist < min_members_dist or min_members_dist is None:
min_members_dist = members_dist
if members_dist > max_members_dist:
max_members_dist = members_dist
for i,g in enumerate(groups):
if g[3] is 0: continue
members_dist = g[3]
scaled_members_dist = (members_dist - min_members_dist) / (max_members_dist - min_members_dist) * 0.99 + 0.01
groups[i][3] = scaled_members_dist
print "n groups: {0}".format(len(groups))
headers = ('n_members','permissions','distance','member_distance')
write_csv('mbga_groups.csv', headers, groups)
def meta_parser(path, data):
meta = re.findall("<li>([^<]+)</li>", data)
meta = map(lambda x: x.split(":")[1], meta)
# return [number of members, permissions]
return int(meta[0].split("人")[0]), PERMISSIONS[meta[2]]
def analyze_people():
ids = people_ids()
mins = {'diary':None, 'greet':None, 'disc':None, 'test':None}
maxs = {'diary':0, 'greet':0, 'disc':0, 'test':0}
people = []
for i,id in enumerate(ids):
# gather all data files associated with a specific person ID
p_files = files('person', '*_{0}_*.data'.format(id))
data = {}
for f in p_files:
ftype = f.split("_")[-1].split(".")[0]
if ftype == "demo":
data['age'] = parse(f, demographics_parser)
elif ftype in ["diary","greet","disc","test"]:
dist = parse(f, time_dist_parser)
data[ftype] = dist
if dist and (mins[ftype] is None or dist < mins[ftype]):
mins[ftype] = dist
if dist and dist > maxs[ftype]:
maxs[ftype] = dist
people.append(data)
people_csv = []
for i,person in enumerate(people):
person_csv = []
for dtype,value in person.items():
if dtype == "age" or not value:
if not value: value = 0
person_csv.append((dtype, value))
continue
dist = float(value)
scaled_dist = 1 - ((dist - mins[dtype])/(maxs[dtype] - mins[dtype])*0.99)
person_csv.append((dtype, scaled_dist))
person_csv.sort()
people_csv.append(map(lambda x: x[-1], person_csv))
headers = ('age', 'diary', 'disc', 'greet', 'intro')
write_csv('mbga_people.csv', headers, people_csv)
def people_ids():
people_files = files('person', '*.data')
n_people = len(people_files)/7
people_ids = []
id_regex = re.compile("[0-9]+_([0-9]+)_[0-9]+")
for f in people_files:
m = id_regex.search(f)
people_ids.append(m.group(1))
return set(people_ids)
def demographics_parser(path, data):
data = data.split("<dt>")
age = -1
for d in data:
if d.startswith ("誕生日(年齢)"): # birthdate (age)
age = re.findall("[0-9]+", re.findall("<dd>([^<]+)</dd>", d)[0])[-1]
return age
def time_dist_parser(path, data):
dist = False
extracted = path.split("/")[-1].split("_")[0]
time_extracted = datetime.strptime(extracted, "%Y%m%d%H%M%S")
dates = re.findall("[0-9]{4}/[0-9]+/[0-9]+ [0-9]+:[0-9]+", data)
if dates:
oldest = datetime.strptime(dates[-1], "%Y/%m/%d %H:%M")
dist = time_extracted - oldest
dist = (dist.days * 86400) + dist.seconds
return dist
def analyze_avatars():
avatars = files('avatar', '*.png')
data = []
for i,a in enumerate(avatars):
pic = numpy.array(Image.open(a))
num_black_pixels = len(numpy.where(pic[0:1][0:1] == 0)[0])
bg_mod = 0 if num_black_pixels == 150 else 1
emotion = a.split("/")[-1].split("_")[-1].split(".")[0]
data.append([EMOTIONS[emotion], bg_mod])
headers = ("emotion", "bg_mod")
write_csv('mbga_avatars.csv', headers, data)
def parse(data_path, parser):
f = open(data_path, 'r').read()
return parser(data_path, f)
def files(folder, pattern):
return glob.glob(os.path.join(DATA_PATH.format(folder), pattern))
def write_csv(fname, headers, list_of_lists):
f = open(fname, 'wb')
writer = csv.writer(f)
writer.writerow(headers)
for l in list_of_lists:
writer.writerow(l)
f.close()
if __name__=="__main__":
#analyze_groups()
#analyze_people()
analyze_avatars()
|
mit
| 6,223,290,673,111,375,000
| 30.426036
| 113
| 0.604029
| false
| 2.939126
| false
| false
| false
|
Jajcus/pyxmpp
|
pyxmpp/expdict.py
|
1
|
4727
|
#
# (C) Copyright 2003-2010 Jacek Konieczny <jajcus@jajcus.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""Dictionary with item expiration."""
__docformat__="restructuredtext en"
import time
import threading
__all__ = ['ExpiringDictionary']
sentinel = object()
class ExpiringDictionary(dict):
"""An extension to standard Python dictionary objects which implements item
expiration.
Each item in ExpiringDictionary has its expiration time assigned, after
which the item is removed from the mapping.
:Ivariables:
- `_timeouts`: a dictionary with timeout values and timeout callback for
stored objects.
- `_default_timeout`: the default timeout value (in seconds from now).
- `_lock`: access synchronization lock.
:Types:
- `_timeouts`: `dict`
- `_default_timeout`: `int`
- `_lock`: `threading.RLock`"""
__slots__=['_timeouts','_default_timeout','_lock']
def __init__(self,default_timeout=300):
"""Initialize an `ExpiringDictionary` object.
:Parameters:
- `default_timeout`: default timeout value for stored objects.
:Types:
- `default_timeout`: `int`"""
dict.__init__(self)
self._timeouts={}
self._default_timeout=default_timeout
self._lock=threading.RLock()
def __delitem__(self,key):
self._lock.acquire()
try:
del self._timeouts[key]
return dict.__delitem__(self,key)
finally:
self._lock.release()
def __getitem__(self,key):
self._lock.acquire()
try:
self._expire_item(key)
return dict.__getitem__(self,key)
finally:
self._lock.release()
def pop(self,key,default=sentinel):
self._lock.acquire()
try:
self._expire_item(key)
del self._timeouts[key]
if default is not sentinel:
return dict.pop(self,key,default)
else:
return dict.pop(self,key)
finally:
self._lock.release()
def __setitem__(self,key,value):
return self.set_item(key,value)
def set_item(self,key,value,timeout=None,timeout_callback=None):
"""Set item of the dictionary.
:Parameters:
- `key`: the key.
- `value`: the object to store.
- `timeout`: timeout value for the object (in seconds from now).
- `timeout_callback`: function to be called when the item expires.
The callback should accept none, one (the key) or two (the key
and the value) arguments.
:Types:
- `key`: any hashable value
- `value`: any python object
- `timeout`: `int`
- `timeout_callback`: callable"""
self._lock.acquire()
try:
if not timeout:
timeout=self._default_timeout
self._timeouts[key]=(time.time()+timeout,timeout_callback)
return dict.__setitem__(self,key,value)
finally:
self._lock.release()
def expire(self):
"""Do the expiration of dictionary items.
Remove items that expired by now from the dictionary."""
self._lock.acquire()
try:
for k in self._timeouts.keys():
self._expire_item(k)
finally:
self._lock.release()
def _expire_item(self,key):
"""Do the expiration of a dictionary item.
Remove the item if it has expired by now.
:Parameters:
- `key`: key to the object.
:Types:
- `key`: any hashable value"""
(timeout,callback)=self._timeouts[key]
if timeout<=time.time():
item = dict.pop(self, key)
del self._timeouts[key]
if callback:
try:
callback(key,item)
except TypeError:
try:
callback(key)
except TypeError:
callback()
# vi: sts=4 et sw=4
|
lgpl-2.1
| 7,930,685,050,958,011,000
| 30.939189
| 80
| 0.576476
| false
| 4.389044
| false
| false
| false
|
ucsd-ccbb/Oncolist
|
src/restLayer/app/TermIdentifier.py
|
1
|
36204
|
__author__ = 'aarongary'
import sys
import pymongo
import requests
import MyGeneInfo
from itertools import islice
from app.util import set_status, create_edges_index
from app.status import Status
from bson.json_util import dumps
from models.TermResolver import TermAnalyzer
import ElasticSearch
import os
from sklearn.linear_model import LinearRegression
import numpy as np
import app
import ESearch
def bulk_identify_terms(terms):
tr = TermAnalyzer()
termsClassified = tr.process_terms_bulk(terms)
return_value = {
'termClassification': termsClassified
}
return return_value
def search_term_description(term):
tr = TermAnalyzer()
termsClassified = tr.process_terms_bulk(term)
entrez_summary = ESearch.get_gene_summary_from_entrez(term)
return_value = {
'termClassification': termsClassified,
'entrez_summary': entrez_summary
}
return return_value
def bulk_identify_terms2(terms):
term_with_id = []
#========================
# Process GENOME terms
#========================
analyzed_terms = process_genome_terms(terms)
for genome_term in analyzed_terms['special_terms']:
a = {
'probabilitiesMap': {
'gene': '0.0',
'icd10': '0.0',
'drug': '0.0',
'disease': '0.0',
'genome': '1.0'
},
'status': 'success',
'termId': genome_term['familiar_term'],
'desc': 'Genome',
'geneSymbol': genome_term['familiar_term'],
'termTitle': genome_term['familiar_term'] + ' (' + genome_term['latin'] + ')'
}
term_with_id.append(a)
terms = analyzed_terms['terms']
#========================
# Process DISEASE terms
#========================
analyzed_terms = process_disease_terms(terms)
for disease_term in analyzed_terms['special_terms']:
a = {
'probabilitiesMap': {
'gene': '0.0',
'icd10': '0.0',
'drug': '0.0',
'disease': '1.0',
'genome': '0.0'
},
'status': 'success',
'termId': disease_term['familiar_term'],
'desc': 'Disease',
'geneSymbol': disease_term['familiar_term'],
'termTitle': disease_term['familiar_term'] + ' (' + disease_term['latin'] + ')'
}
term_with_id.append(a)
terms = analyzed_terms['terms']
if(len(terms) > 0):
queryTermArray = terms.split(',')
types = ['gene','icd10','drug','disease','genome']
for queryTerm in queryTermArray:
termTitle = queryTerm
print queryTerm
a = {
'probabilitiesMap': {},
'status': 'success',
'termId': queryTerm.upper(),
'desc': '',
'geneSymbol': '',
'termTitle': queryTerm
}
term_result = identify_term(queryTerm)
#tt = dumps(term_result)
if(term_result is None or term_result.count() < 1):
term_alt_result = identify_alt_term(queryTerm) #MyGeneInfo.get_gene_info_by_id(queryTerm)
cc = dumps(term_alt_result)
if(term_alt_result['term'] == 'UNKNOWN'):
a['probabilitiesMap'] = {
'gene': '0.0',
'icd10': '0.0',
'drug': '0.0',
'disease': '0.0',
'genome': '0.0'
}
a['status'] = 'unknown'
term_with_id.append(a)
else:
termDesc = ''
termGeneSymbol = ''
term_result_types_array = []
if(term_alt_result['type'] == 'GENE'):
termDesc = term_alt_result['desc']
termGeneSymbol = term_alt_result['geneSymbol']
termTitle = queryTerm.upper() + ' (' + termGeneSymbol.upper() + ')'
a['termId'] = termGeneSymbol.upper()
if(term_alt_result['type'] not in term_result_types_array):
term_result_types_array.append(term_alt_result['type'])
total_found_terms = float(len(term_result_types_array))
for k in types:
if(k.upper() in term_result_types_array):
a['probabilitiesMap'][k] = str(1.0/total_found_terms)
else:
a['probabilitiesMap'][k] = str(0.0)
a['desc'] = termDesc
a['geneSymbol'] = termGeneSymbol
a['termTitle'] = termTitle
term_with_id.append(a)
else:
termDesc = ''
termGeneSymbol = ''
term_result_types_array = []
#tr = dumps(term_result)
for item_type in term_result:
if(item_type['type'] == 'GENE'):
termDesc = item_type['desc']
termGeneSymbol = item_type['geneSymbol']
if(len(queryTerm) > 12 and queryTerm[:3] == 'ENS'):
termTitle = termGeneSymbol.upper() + ' (' + queryTerm.upper() + ')'
a['termId'] = termGeneSymbol.upper()
if(item_type['type'] not in term_result_types_array):
term_result_types_array.append(item_type['type'])
total_found_terms = float(len(term_result_types_array))
for k in types:
if(k.upper() in term_result_types_array):
a['probabilitiesMap'][k] = str(1.0/total_found_terms)
else:
a['probabilitiesMap'][k] = str(0.0)
a['desc'] = termDesc
a['geneSymbol'] = termGeneSymbol
a['termTitle'] = termTitle
term_with_id.append(a)
#print dumps(a)
#term_with_id.append(term_result)
return_value = {
'termClassification': term_with_id
}
#print dumps(return_value)
return dumps(return_value)
def identify_term(name):
client = pymongo.MongoClient()
db = client.identifiers
allterms = db.allterms2
results = allterms.find({'term': name.upper(),'genomeType': 'human'})
return None if results is None else results
def identify_alt_term(name):
client = pymongo.MongoClient()
db = client.identifiers
allterms = db.allterms2
gene_alt_id = MyGeneInfo.get_gene_info_by_id(name)
results = allterms.find_one({'term': gene_alt_id.upper(),'genomeType': 'human'})
if(results is None):
results = {
'term': 'UNKNOWN',
'desc': 'UNKNOWN'
}
return results
#def identify_term(name):
# client = pymongo.MongoClient()
# db = client.identifiers
# allterms = db.allterms
# result = allterms.find_one({'term': name.upper()})
# return None if result is None else result
def add_terms_from_file():
client = pymongo.MongoClient()
db = client.identifiers
allterms = db.allterms2
#url = 'http://geneli.st:8181/add-terms1.tsv'
#url = 'http://geneli.st:8181/mirna-terms.txt'
url = 'http://geneli.st:8181/mirna_label.txt'
r = requests.get(url)
lines = list(r.iter_lines())
count=0
for idx, line in enumerate(lines):
term, term_type = line.split('\t')
term_to_add = {
'term': term.upper(),
'type': term_type
}
allterms.save(term_to_add)
count = count + 1
print 'Done'
print str(count)
def load_variant_to_gene_from_file():
client = pymongo.MongoClient()
db = client.identifiers
variants = db.variants
variants.drop()
f_path = os.path.abspath('./variant_vs_gene.txt')
f = open(f_path, 'r')
count = 0
for line in f:
count += 1
if(count % 5000 == 0):
print str(count) + ' (' + "{0:.2f}%".format(float(count)/89000000 * 100) + ')'
#print str(count) + ' (' + str(count/89000000) + ')c'
#if(count > 10000):
# break
variant, gene = line.split('\t')
#print variant + ' - ' + gene
insertThisRecord = {
'geneSymbol': gene.rstrip().upper(),
'genomeType': 'human',
'term': variant.upper(),
'type': 'GENE'
}
variants.save(insertThisRecord)
variants.create_index([
("term", pymongo.ASCENDING)
])
def get_mirna_from_cluster_file():
f = open('/Users/aarongary/Development/DataSets/Terms/BRCA.json', 'r')
count = 0
for line in f:
if('hsa-' in line):
print count
count += 1
hsa_items = line.split('hsa-')
for hsa_item in hsa_items:
print hsa_item
def add_biomart_terms_from_file():
client = pymongo.MongoClient()
db = client.identifiers
allterms = db.allterms2
allterms.drop()
#filesToParse = [{'genomeType': 'human', 'url': 'http://geneli.st:8181/biomart/human Homo sapiens protein coding genes.txt','termType': 'GENE'},
# {'genomeType': 'human', 'url': 'http://geneli.st:8181/biomart/add-terms-non-GENE.tsv','termType': 'NONGENE'}]
terms_host = 'http://ec2-52-40-169-254.us-west-2.compute.amazonaws.com:3000/Biomart'
filesToParse = [
#{'genomeType': 'dog', 'url': terms_host + '/dog Canis familiaris protein coding genes.txt','termType': 'GENE'},
#{'genomeType': 'fruitfly', 'url': terms_host + '/fruitfly Drosophila melanogaster protein coding genes.txt','termType': 'GENE'},
#{'genomeType': 'monkey', 'url': terms_host + '/monkey Macaca mulatta protein coding genes.txt','termType': 'GENE'},
#{'genomeType': 'mouse', 'url': terms_host + '/mouse Mus musculus protein coding genes.txt','termType': 'GENE'},
#{'genomeType': 'rat', 'url': terms_host + '/rat Rattus norvegicus protein coding genes.txt','termType': 'GENE'},
#{'genomeType': 'worm', 'url': terms_host + '/worm Caenorhabditis elegans protein coding genes.txt','termType': 'GENE'},
#{'genomeType': 'zebrafish', 'url': terms_host + '/zebrafish Danio rerio protein coding genes.txt','termType': 'GENE'},
#{'genomeType': 'dog', 'url': terms_host + '/dog Canis familiaris mirna genes.txt','termType': 'GENE'},
#{'genomeType': 'fruitfly', 'url': terms_host + '/fruitfly Drosophila melanogaster pre-mirna genes.txt','termType': 'GENE'},
#{'genomeType': 'monkey', 'url': terms_host + '/monkey Macaca mulatta mirna genes.txt','termType': 'GENE'},
#{'genomeType': 'mouse', 'url': terms_host + '/mouse Mus musculus mirna genes.txt','termType': 'GENE'},
#{'genomeType': 'rat', 'url': terms_host + '/rat Rattus norvegicus mirna genes.txt','termType': 'GENE'},
#{'genomeType': 'worm', 'url': terms_host + '/worm Caenorhabditis elegans mirna genes.txt','termType': 'GENE'},
#{'genomeType': 'zebrafish', 'url': terms_host + '/zebrafish Danio rerio mirna genes.txt','termType': 'GENE'},
{'genomeType': 'human', 'url': terms_host + '/add-terms-DISEASE.tsv','termType': 'NONGENE'},
{'genomeType': 'human', 'url': terms_host + '/human Homo sapiens protein coding genes.txt','termType': 'GENE'},
{'genomeType': 'human', 'url': terms_host + '/human Homo sapiens miRNA genes.txt','termType': 'GENE'}
]
for f in filesToParse:
r = requests.get(f['url'], stream=True)
lines = r.iter_lines()
lines.next() # ignore header
count = 0
for line in lines:
count += 1
if(count % 1000 == 0):
print count
try:
if(f['termType'] == 'GENE'):
ensGID, desc, geneType, geneStatus, geneSymbol = line.split('\t')
insertThisRecord = {
'ensGID': ensGID,
'desc': desc,
'geneType': geneType,
'geneStatus': geneStatus,
'geneSymbol': geneSymbol,
'genomeType': f['genomeType'],
'term': ensGID.upper(),
'type': 'GENE'
}
allterms.save(insertThisRecord)
insertThisInvertedRecord = {
'ensGID': ensGID,
'desc': desc,
'geneType': geneType,
'geneStatus': geneStatus,
'geneSymbol': geneSymbol,
'genomeType': f['genomeType'],
'term': geneSymbol.upper(),
'type': 'GENE'
}
allterms.save(insertThisInvertedRecord)
else:
fTerm, fType = line.split('\t')
allterms.save({'genomeType': 'human','term': fTerm.upper(),'type': fType})
#allterms.save({'genomeType': 'dog','term': fTerm.upper(),'type': fType})
#allterms.save({'genomeType': 'fruitfly','term': fTerm.upper(),'type': fType})
#allterms.save({'genomeType': 'monkey','term': fTerm.upper(),'type': fType})
#allterms.save({'genomeType': 'mouse','term': fTerm.upper(),'type': fType})
#allterms.save({'genomeType': 'rat','term': fTerm.upper(),'type': fType})
#allterms.save({'genomeType': 'worm','term': fTerm.upper(),'type': fType})
#allterms.save({'genomeType': 'zebrafish','term': fTerm.upper(),'type': fType})
except Exception as e:
print 'Didnt work' + e.message
print 'Done with file'
allterms.ensure_index([("ensGID" , pymongo.ASCENDING)])
allterms.ensure_index([("term" , pymongo.ASCENDING)])
allterms.ensure_index([("type" , pymongo.ASCENDING)])
allterms.ensure_index([("geneType" , pymongo.ASCENDING)])
# allterms.create_indexes([
# pymongo.IndexModel([('ensGID', pymongo.ASCENDING)]),
# pymongo.IndexModel([('term', pymongo.ASCENDING)]),
# pymongo.IndexModel([('type', pymongo.ASCENDING)]),
# pymongo.IndexModel([('geneType', pymongo.ASCENDING)])
# ])
print 'Done'
return ""
def add_terms_from_file_autocomplete():
client = pymongo.MongoClient()
db = client.identifiers
allterms = db.allterms
#url = 'http://geneli.st:8181/add-terms3a.tsv'
url = 'http://geneli.st:8181/add-terms3.tsv'
r = requests.get(url)
lines = list(r.iter_lines())
count=0
for idx, line in enumerate(lines):
term, term_type = line.split('\t')
#print term
term_to_add = {
'term': term.upper(),
'type': term_type
}
allterms.save(term_to_add)
count = count + 1
if(count % 200 == 0):
print count #dumps(term_to_add)
#allterms.create_indexes([pymongo.IndexModel([('term', pymongo.ASCENDING)])])
print 'Done'
def add_terms_from_elasticsearch_autocomplete():
client = pymongo.MongoClient()
db = client.identifiers
allterms = db.allterms3
count=0
phenotypes = ElasticSearch.get_clinvar_phenotypes()
for term in phenotypes:
term_to_add = {
'term': term.upper(),
'type': 'ICD10'
}
allterms.save(term_to_add)
count = count + 1
if(count % 200 == 0):
print count #dumps(term_to_add)
#allterms.create_indexes([pymongo.IndexModel([('term', pymongo.ASCENDING)])])
print 'Done'
def load_terms_from_file():
client = pymongo.MongoClient()
db = client.identifiers
allterms = db.allterms
allterms.drop()
url = 'http://ec2-52-26-19-122.us-west-2.compute.amazonaws.com:8080/all-terms3.tsv'
r = requests.get(url)
lines = list(r.iter_lines())
count=0
for idx, line in enumerate(lines):
term, term_type = line.split('\t')
#print term
term_to_add = {
'term': term.upper(),
'type': term_type
}
allterms.save(term_to_add)
count = count + 1
if(count % 200 == 0):
print count #dumps(term_to_add)
allterms.create_indexes([pymongo.IndexModel([('term', pymongo.ASCENDING)])])
print 'Done'
def process_genome_terms(terms):
terms_uppercase = terms.upper()
return_value = []
genome_id_kv = [
{'k': 'CANIS,FAMILIARIS', 'v': 'DOG'},
{'k': 'DROSOPHILA,MELANOGASTER', 'v': 'FRUITFLY'},
{'k': 'HOMO,SAPIEN', 'v': 'HUMAN'},
{'k': 'MACACA,MULATTA', 'v': 'MONKEY'},
{'k': 'MUS,MUSCULUS', 'v': 'MOUSE'},
{'k': 'RATTUS,NORVEGICUS', 'v': 'RAT'},
{'k': 'CAENORHABDITIS,ELEGANS', 'v': 'WORM'},
{'k': 'DANIO,RERIO', 'v': 'ZEBRAFISH'}
]
for kv in genome_id_kv:
if(kv['k'] in terms_uppercase):
terms_uppercase = terms_uppercase.replace(kv['k'], '').replace(',,',',')
return_value.append({'latin': kv['k'].replace(',',' '), 'familiar_term': kv['v']})
if(terms_uppercase[0:1] == ','):
terms_uppercase = terms_uppercase[1:-1]
if(terms_uppercase == ','):
terms_uppercase = ''
print terms_uppercase
return {'terms': terms_uppercase, 'special_terms': return_value}
def process_disease_terms(terms):
terms_uppercase = terms.upper()
return_value = []
genome_id_kv = [
{'k': 'BLADDER,CANCER', 'v': 'BLCA'},
{'k': 'BRAIN,CANCER', 'v': 'LGG'},
{'k': 'BREAST,CANCER', 'v': 'BRCA'},
{'k': 'CERVICAL,CANCER', 'v': 'CESC'},
{'k': 'ENDOCERVICAL,CANCER', 'v': 'CESC'},
{'k': 'CERVICAL,CANCER', 'v': 'CESC'},
{'k': 'CHOLANGIOCARCINOMA', 'v': 'CHOL'},
{'k': 'BILE,DUCT,CANCER', 'v': 'CHOL'},
{'k': 'COLON,CANCER', 'v': 'COAD'},
{'k': 'ESOPHAGEAL,CANCER', 'v': 'ESCA'},
{'k': 'GLIOBLASTOMA,CANCER', 'v': 'GBM'}, #Wikify
{'k': 'HEAD,AND,NECK,CANCER', 'v': 'HNSC'},
{'k': 'NECK,CANCER', 'v': 'HNSC'},
{'k': 'HEAD,CANCER', 'v': 'HNSC'},
{'k': 'KIDNEY,CHROMOPHOBE', 'v': 'KICH'},
{'k': 'KIDNEY,RENAL,CLEAR,CELL,CARCINOMA', 'v': 'KIRC'}, #Wikify
{'k': 'KIDNEY,RENAL,PAPILLARY,CELL,CARCINOMA', 'v': 'KIRP'},
{'k': 'LIVER,CANCER', 'v': 'LIHC'},
{'k': 'LUNG,CANCER', 'v': 'LUAD'},
{'k': 'LUNG,SQUAMOUS,CELL,CARCINOMA', 'v': 'LUSC'}, #Wikify
{'k': 'LYMPHOID,CANCER', 'v': 'DLBC'},
{'k': 'LYMPHOMA,CANCER', 'v': 'DLBC'},
{'k': 'MESOTHELIOMA,CANCER', 'v': 'MESO'},
{'k': 'OVARIAN,CANCER', 'v': 'OV'},
{'k': 'PANCREATIC,CANCER', 'v': 'PAAD'},
{'k': 'PHEOCHROMOCYTOMA,CANCER', 'v': 'PCPG'},
{'k': 'PARAGANGLIOMA,CANCER', 'v': 'PCPG'},
{'k': 'PROSTATE,CANCER', 'v': 'PRAD'},
{'k': 'RECTUM,CANCER', 'v': 'READ'},
{'k': 'SARCOMA,CANCER', 'v': 'SARC'},
{'k': 'SKIN,CANCER', 'v': 'SKCM'},
{'k': 'STOMACH,CANCER', 'v': 'STAD'},
{'k': 'TESTICULAR,CANCER', 'v': 'TGCT'},
{'k': 'THYMOMA,CANCER', 'v': 'THYM'}, #Wikify
{'k': 'THYROID,CANCER', 'v': 'THCA'},
{'k': 'UTERINE,CANCER', 'v': 'UCS'},
{'k': 'UTERINE,CORPUS,ENDOMETRIAL,CANCER', 'v': 'UCEC'}, #Wikify
{'k': 'UVEAL,MELANOMA,CANCER', 'v': 'UVM'},
{'k': 'UVEAL,CANCER', 'v': 'UVM'},
{'k': 'LEUKEMIA', 'v': 'LAML'},
{'k': 'MYELOID,LEUKEMIA', 'v': 'LAML'},
{'k': 'ADRENOCORTICAL,CARCINOMA', 'v': 'ACC'},
{'k': 'BLADDER,UROTHELIAL,CARCINOMA', 'v': 'BLCA'},
{'k': 'BRAIN,LOWER,GRADE,GLIOMA', 'v': 'LGG'},
{'k': 'BREAST,INVASIVE,CARCINOMA', 'v': 'BRCA'},
{'k': 'CERVICAL,SQUAMOUS,CELL,CARCINOMA', 'v': 'CESC'},
{'k': 'ENDOCERVICAL,ADENOCARCINOMA', 'v': 'CESC'},
{'k': 'CHOLANGIOCARCINOMA', 'v': 'CHOL'},
{'k': 'COLON,ADENOCARCINOMA', 'v': 'COAD'},
{'k': 'ESOPHAGEAL,CARCINOMA', 'v': 'ESCA'},
{'k': 'GLIOBLASTOMA,MULTIFORME', 'v': 'GBM'},
{'k': 'HEAD,AND,NECK,SQUAMOUS,CELL,CARCINOMA', 'v': 'HNSC'},
{'k': 'KIDNEY,CHROMOPHOBE', 'v': 'KICH'},
{'k': 'KIDNEY,RENAL,CLEAR,CELL,CARCINOMA', 'v': 'KIRC'},
{'k': 'KIDNEY,RENAL,PAPILLARY,CELL,CARCINOMA', 'v': 'KIRP'},
{'k': 'LIVER,HEPATOCELLULAR,CARCINOMA', 'v': 'LIHC'},
{'k': 'LUNG,ADENOCARCINOMA', 'v': 'LUAD'},
{'k': 'LUNG,SQUAMOUS,CELL,CARCINOMA', 'v': 'LUSC'},
{'k': 'LYMPHOID,NEOPLASM,DIFFUSE,LARGE,B-CELL,LYMPHOMA', 'v': 'DLBC'},
{'k': 'MESOTHELIOMA', 'v': 'MESO'},
{'k': 'OVARIAN,SEROUS,CYSTADENOCARCINOMA', 'v': 'OV'},
{'k': 'PANCREATIC,ADENOCARCINOMA', 'v': 'PAAD'},
{'k': 'PHEOCHROMOCYTOMA', 'v': 'PCPG'},
{'k': 'PARAGANGLIOMA', 'v': 'PCPG'},
{'k': 'PROSTATE,ADENOCARCINOMA', 'v': 'PRAD'},
{'k': 'RECTUM,ADENOCARCINOMA', 'v': 'READ'},
{'k': 'SARCOMA', 'v': 'SARC'},
{'k': 'SKIN,CUTANEOUS,MELANOMA', 'v': 'SKCM'},
{'k': 'STOMACH,ADENOCARCINOMA', 'v': 'STAD'},
{'k': 'TESTICULAR,GERM,CELL,TUMORS', 'v': 'TGCT'},
{'k': 'THYMOMA', 'v': 'THYM'},
{'k': 'THYROID,CARCINOMA', 'v': 'THCA'},
{'k': 'UTERINE,CARCINOSARCOMA', 'v': 'UCS'},
{'k': 'UTERINE,CORPUS,ENDOMETRIAL,CARCINOMA', 'v': 'UCEC'},
{'k': 'UVEAL,MELANOMA', 'v': 'UVM'}
]
for kv in genome_id_kv:
if(kv['k'] in terms_uppercase):
terms_uppercase = terms_uppercase.replace(kv['k'], '').replace(',,',',')
return_value.append({'latin': kv['k'].replace(',',' '), 'familiar_term': kv['v']})
if(terms_uppercase[0:1] == ','):
terms_uppercase = terms_uppercase[1:-1]
if(terms_uppercase == ','):
terms_uppercase = ''
print terms_uppercase
return {'terms': terms_uppercase, 'special_terms': return_value}
def auto_complete_search(term):
tr = TermAnalyzer()
termsClassified = tr.identify_term_partial(term)
return_value = {
'termClassification': termsClassified
}
return return_value
def test_linear_classifier():
est = LinearRegression(fit_intercept=False)
# random training data
X = np.random.rand(10, 2)
y = np.random.randint(2, size=10)
est.fit(X, y)
est.coef_ # access coefficients
def load_disease_groups():
disease_groups_array = [{
'genomeType': 'human',
'term': 'Adrenocortical Cancer ',
'group': 'Adrenal',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Adrenocortical Carcinoma ',
'group': 'Adrenal',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Pheochromocytoma and Paraganglioma ',
'group': 'Adrenal',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Cholangiocarcinoma ',
'group': 'Bile',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Cholangiocarcinoma ',
'group': 'Bile',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Bladder Cancer',
'group': 'Bladder',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Bladder Urothelial Carcinoma ',
'group': 'Bladder',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Brain Lower Grade Glioma ',
'group': 'Brain',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Glioblastoma ',
'group': 'Brain',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Glioblastoma Multiforme',
'group': 'Brain',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Glioblastoma Multiforme and Brain Lower Grade Glioma ',
'group': 'Brain',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Glioma High Grade',
'group': 'Brain',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Breast Invasive Carcinoma ',
'group': 'Breast',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Breast Tumors RNA',
'group': 'Breast',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Cervical Cancer ChemoradioResistant',
'group': 'Cervical',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Cervical Squamous Cell Carcinoma and Endocervical Adenocarcinoma ',
'group': 'Cervical',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Colon Adenocarcinoma',
'group': 'Colon',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Colon Adenocarcinoma and Rectum adenocarcinoma ',
'group': 'colon',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Colon Cancer ',
'group': 'colon',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Ulcerative Colitis Colon Inflammation ',
'group': 'colon',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Endometrial Cancer Stage I',
'group': 'Endometrial',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Esophageal Cancer',
'group': 'Esophagus',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Esophageal Carcinoma',
'group': 'Esophagus',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Head and Neck ',
'group': 'HeadAndNeck',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Head and Neck Squamous Cell Carcinoma ',
'group': 'HeadAndNeck',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Kidney Chromophobe ',
'group': 'Kidney',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Kidney Chromophobe and Kidney Renal Clear Cell Carcinoma and Kidney Renal Papillary Cell Carcinoma',
'group': 'Kidney',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Kidney Renal Clear Cell Carcinoma ',
'group': 'Kidney',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Kidney Renal Clear Cell Carcinoma ',
'group': 'Kidney',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Kidney Renal Papillary Cell Carcinoma ',
'group': 'Kidney',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Renal Cell Carcinoma',
'group': 'Kidney',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Acute Myeloid Leukemia ',
'group': 'Leukemia',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Acute Myeloid Leukemia ',
'group': 'Leukemia',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Hepatocellular Carcinoma ',
'group': 'Liver',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Liver Hepatocellular Carcinoma ',
'group': 'Liver',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Liver Hepatocellular Carcinoma Early Stage Cirrhosis ',
'group': 'Liver',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Blood Lung Cancer',
'group': 'Lung',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Blood Lung Cancer Stage I ',
'group': 'Lung',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Lung Adenocarcinoma ',
'group': 'Lung',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Lung Squamous Cell Carcinoma ',
'group': 'Lung',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Diffuse Large B-Cell Lymphoma',
'group': 'Lymphoma',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma',
'group': 'Lymphoma',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Mesothelioma ',
'group': 'Ovarian',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Ovarian Cancer',
'group': 'Ovarian',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Ovarian Serous Cystadenocarcinoma ',
'group': 'Ovarian',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Pancreatic ',
'group': 'Pancreatic',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Pancreatic Adenocarcinoma ',
'group': 'Pancreatic',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Pancreatic Ductal Adenocarcinoma',
'group': 'Pancreatic',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Prostate Adenocarcinoma',
'group': 'Prostate',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Prostate Carcinoma ',
'group': 'Prostate',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Rectal Cancer ',
'group': 'Rectal',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Rectum Adenocarcinoma ',
'group': 'Rectal',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Sarcoma ',
'group': 'Sarcoma',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Sarcoma ',
'group': 'Sarcoma',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Melanoma Malignant ',
'group': 'Skin',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Skin Cutaneous Melanoma',
'group': 'Skin',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Stomach Adenocarcinoma ',
'group': 'Stomach',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Stomach and Esophageal Carcinoma',
'group': 'Stomach',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Stomach Cancer 126 ',
'group': 'Stomach',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Testicular Germ Cell Tumors ',
'group': 'Testicular',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Thymoma ',
'group': 'Thymus',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Thyroid Cancer',
'group': 'Thyroid',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Thyroid Carcinoma',
'group': 'Thyroid',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Uterine Carcinosarcoma ',
'group': 'Uterine',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Uterine Corpus Endometrial Carcinoma ',
'group': 'Uterine',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Uveal Melanoma',
'group': 'Uveal',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Uveal Melanoma',
'group': 'Uveal',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Adrenal ',
'group': 'Adrenal',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Bile ',
'group': 'Bile',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Bladder ',
'group': 'Bladder',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Brain',
'group': 'Brain',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Breast ',
'group': 'Breast',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Cervical',
'group': 'Cervical',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'colon',
'group': 'colon',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Endometrial',
'group': 'Endometrial',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Esophagus ',
'group': 'Esophagus',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'HeadAndNeck',
'group': 'HeadAndNeck',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Kidney ',
'group': 'Kidney',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Leukemia',
'group': 'Leukemia',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Liver',
'group': 'Liver',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Lung ',
'group': 'Lung',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Lymphoma',
'group': 'Lymphoma',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Ovarian ',
'group': 'Ovarian',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Pancreatic ',
'group': 'Pancreatic',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Prostate',
'group': 'Prostate',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Rectal ',
'group': 'Rectal',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Sarcoma ',
'group': 'Sarcoma',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Skin ',
'group': 'Skin',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Stomach ',
'group': 'Stomach',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Testicular ',
'group': 'Testicular',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Thymus ',
'group': 'Thymus',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Thyroid ',
'group': 'Thyroid',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Uterine ',
'group': 'Uterine',
'type': 'DISEASE'
},
{
'genomeType': 'human',
'term': 'Uveal',
'group': 'Uveal',
'type': 'DISEASE'
}]
client = pymongo.MongoClient()
db = client.identifiers
allterms = db.allterms2
#allterms.drop()
for disease in disease_groups_array:
allterms.save({'genomeType': disease['genomeType'],'term': disease['term'].upper(),'type': disease['type'], 'group': disease['group']})
|
mit
| 2,962,690,134,142,557,700
| 28.196774
| 149
| 0.487626
| false
| 3.239152
| false
| false
| false
|
robinson96/GRAPE
|
vine/grapeMenu.py
|
1
|
6832
|
import traceback
import addSubproject
import bundle
import branches
import checkout
import clone
import commit
import config
import deleteBranch
import foreach
import grapeConfig
import grapeGit as git
import hooks
import merge
import mergeDevelop
import mergeRemote
import newFlowBranch
import newWorkingTree
import publish
import pull
import push
import quit
import resolveConflicts
import resumable
import review
import stash
import status
import grapeTest as test
import updateLocal
import updateSubproject
import updateView
import utility
import version
import walkthrough
#######################################################################
#The Menu class - encapsulates menu options and sections.
# Menu Options are the objects that perform git-related or bitbucket-related tasks.
# sections are groupings of menu options that are displayed together.
######################################################################
__menuInstance = None
def menu():
global __menuInstance
if __menuInstance is None:
__menuInstance = _Menu()
grapeConfig.readDefaults()
grapeConfig.read()
__menuInstance.postInit()
return __menuInstance
def _resetMenu():
"""
Resets the Singleton Instance. Meant for testing purposes only.
"""
global __menuInstance
__menuInstance = None
grapeConfig.resetGrapeConfig()
class _Menu(object):
def __init__(self):
self._options = {}
#Add menu classes
self._optionLookup = {}
#Add/order your menu option here
self._options = [addSubproject.AddSubproject(), bundle.Bundle(), bundle.Unbundle(), branches.Branches(),
status.Status(), stash.Stash(), checkout.Checkout(), push.Push(), pull.Pull(), commit.Commit(), publish.Publish(),
clone.Clone(), config.Config(), grapeConfig.WriteConfig(),
foreach.ForEach(), merge.Merge(), mergeDevelop.MergeDevelop(), mergeRemote.MergeRemote(),
deleteBranch.DeleteBranch(), newWorkingTree.NewWorkingTree(),
resolveConflicts.ResolveConflicts(),
review.Review(), test.Test(), updateLocal.UpdateLocal(), updateSubproject.UpdateSubproject(),
hooks.InstallHooks(), hooks.RunHook(),
updateView.UpdateView(), version.Version(), walkthrough.Walkthrough(), quit.Quit()]
#Add/order the menu sections here
self._sections = ['Getting Started', 'Code Reviews', 'Workspace',
'Merge', 'Gitflow Tasks', 'Hooks', 'Patches', 'Project Management', 'Other']
def postInit(self):
# add dynamically generated (dependent on grapeConfig) options here
self._options = self._options + newFlowBranch.NewBranchOptionFactory().createNewBranchOptions(grapeConfig.
grapeConfig())
for currOption in self._options:
self._optionLookup[currOption.key] = currOption
####### MENU STUFF #########################################################################
def getOption(self, choice):
try:
return self._optionLookup[choice]
except KeyError:
print("Unknown option '%s'" % choice)
return None
def applyMenuChoice(self, choice, args=None, option_args=None, globalArgs=None):
chosen_option = self.getOption(choice)
if chosen_option is None:
return False
if args is None or len(args) == 0:
args = [chosen_option._key]
#first argument better be the key
if args[0] != chosen_option._key:
args = [chosen_option._key]+args
# use optdoc to parse arguments to the chosen_option.
# utility.argParse also does the magic of filling in defaults from the config files as appropriate.
if option_args is None and chosen_option.__doc__:
try:
config = chosen_option._config
if config is None:
config = grapeConfig.grapeConfig()
else:
config = grapeConfig.grapeRepoConfig(config)
option_args = utility.parseArgs(chosen_option.__doc__, args[1:], config)
except SystemExit as e:
if len(args) > 1 and "--help" != args[1] and "-h" != args[1]:
print("GRAPE PARSING ERROR: could not parse %s\n" % (args[1:]))
raise e
if globalArgs is not None:
utility.applyGlobalArgs(globalArgs)
try:
if isinstance(chosen_option, resumable.Resumable):
if option_args["--continue"]:
return chosen_option._resume(option_args)
return chosen_option.execute(option_args)
except git.GrapeGitError as e:
print traceback.print_exc()
print ("GRAPE: Uncaught Error %s in grape-%s when executing '%s' in '%s'\n%s" %
(e.code, chosen_option._key, e.gitCommand, e.cwd, e.gitOutput))
exit(e.code)
except utility.NoWorkspaceDirException as e:
print ("GRAPE: grape %s must be run from a grape workspace." % chosen_option.key)
print ("GRAPE: %s" % e.message)
exit(1)
finally:
if globalArgs is not None:
utility.popGlobalArgs()
# Present the main menu
def presentTextMenu(self):
width = 60
print("GRAPE - Git Replacement for \"Awesome\" PARSEC Environment".center(width, '*'))
longest_key = 0
for currOption in self._options:
if len(currOption.key) > longest_key:
longest_key = len(currOption.key)
for currSection in self._sections:
lowered_section = currSection.strip().lower()
print("\n" + (" %s " % currSection).center(width, '*'))
for currOption in self._options:
if currOption.section.strip().lower() != lowered_section:
continue
print("%s: %s" % (currOption.key.ljust(longest_key), currOption.description()))
# configures a ConfigParser object with all default values and sections needed by our Option objects
def setDefaultConfig(self, cfg):
cfg.ensureSection("repo")
cfg.set("repo", "name", "repo_name_not.yet.configured")
cfg.set("repo", "url", "https://not.yet.configured/scm/project/unknown.git")
cfg.set("repo", "httpsbase", "https://not.yet.configured")
cfg.set("repo", "sshbase", "ssh://git@not.yet.configured")
for currOption in self._options:
currOption.setDefaultConfig(cfg)
|
bsd-3-clause
| -6,110,235,504,380,364,000
| 38.72093
| 139
| 0.584602
| false
| 4.48294
| true
| false
| false
|
gluke77/rally
|
rally/common/db/api.py
|
1
|
13473
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defines interface for DB access.
The underlying driver is loaded as a :class:`LazyPluggable`.
Functions in this module are imported into the rally.common.db namespace.
Call these functions from rally.common.db namespace, not the
rally.common.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface. Currently, many of these objects are sqlalchemy objects that
implement a dictionary interface. However, a future goal is to have all of
these objects be simple dictionaries.
**Related Flags**
:backend: string to lookup in the list of LazyPluggable backends.
`sqlalchemy` is the only supported backend right now.
:connection: string specifying the sqlalchemy connection to use, like:
`sqlite:///var/lib/cinder/cinder.sqlite`.
:enable_new_services: when adding a new service to the database, is it in the
pool of available hardware (Default: True)
"""
import datetime as dt
from oslo_config import cfg
from oslo_db import api as db_api
from oslo_db import options as db_options
import six
from rally.common.i18n import _
CONF = cfg.CONF
db_options.set_defaults(CONF, connection="sqlite:////tmp/rally.sqlite",
sqlite_db="rally.sqlite")
IMPL = None
def serialize(fn):
def conv(data):
if data is None:
return None
if isinstance(data, (six.integer_types,
six.string_types,
six.text_type,
dt.date,
dt.time,
float,
)):
return data
if isinstance(data, dict):
return {k: conv(v) for k, v in six.iteritems(data)}
if isinstance(data, (list, tuple)):
return [conv(i) for i in data]
if hasattr(data, "_as_dict"):
result = data._as_dict()
for k, v in six.iteritems(result):
result[k] = conv(v)
return result
raise ValueError(_("Can not serialize %s") % data)
def wrapper(*args, **kwargs):
result = fn(*args, **kwargs)
return conv(result)
return wrapper
def get_impl():
global IMPL
if not IMPL:
_BACKEND_MAPPING = {"sqlalchemy": "rally.common.db.sqlalchemy.api"}
IMPL = db_api.DBAPI.from_config(CONF, backend_mapping=_BACKEND_MAPPING)
return IMPL
def engine_reset():
"""Reset DB engine."""
get_impl().engine_reset()
def schema_cleanup():
"""Drop DB schema. This method drops existing database."""
get_impl().schema_cleanup()
def schema_upgrade(revision=None):
"""Migrate the database to `revision` or the most recent revision."""
return get_impl().schema_upgrade(revision)
def schema_create():
"""Create database schema from models description."""
return get_impl().schema_create()
def schema_revision():
"""Return the schema revision."""
return get_impl().schema_revision()
def schema_stamp(revision):
"""Stamps database with provided revision."""
return get_impl().schema_stamp(revision)
def task_get(uuid):
"""Returns task by uuid.
:param uuid: UUID of the task.
:raises TaskNotFound: if the task does not exist.
:returns: task dict with data on the task.
"""
return get_impl().task_get(uuid)
def task_get_status(uuid):
"""Returns task by uuid.
:param uuid: UUID of the task.
:raises TaskNotFound: if the task does not exist.
:returns: task dict with data on the task.
"""
return get_impl().task_get_status(uuid)
def task_get_detailed_last():
"""Returns the most recently created task."""
return get_impl().task_get_detailed_last()
def task_get_detailed(uuid):
"""Returns task with results by uuid.
:param uuid: UUID of the task.
:returns: task dict with data on the task and its results.
"""
return get_impl().task_get_detailed(uuid)
def task_create(values):
"""Create task record in DB.
:param values: dict with record values.
:returns: task dict with data on the task.
"""
return get_impl().task_create(values)
def task_update(uuid, values):
"""Update task by values.
:param uuid: UUID of the task.
:param values: dict with record values.
:raises TaskNotFound: if the task does not exist.
:returns: new updated task dict with data on the task.
"""
return get_impl().task_update(uuid, values)
def task_update_status(task_uuid, status, allowed_statuses):
"""Update task status with specified value.
:param task_uuid: string with UUID of Task instance.
:param status: new value to wrote into db instead of status.
:param allowed_statuses: list of expected statuses to update in db.
:raises RallyException: if task not found with specified status.
:returns: the count of rows match as returned by the database's
"row count" feature
"""
return get_impl().task_update_status(task_uuid, allowed_statuses,
status)
def task_list(status=None, deployment=None):
"""Get a list of tasks.
:param status: Task status to filter the returned list on. If set to
None, all the tasks will be returned.
:param deployment: deployment UUID to filter the returned list on.
if set to None tasks from all deployments well be
returned.
:returns: A list of dicts with data on the tasks.
"""
return get_impl().task_list(status=status, deployment=deployment)
def task_delete(uuid, status=None):
"""Delete a task.
This method removes the task by the uuid, but if the status
argument is specified, then the task is removed only when these
statuses are equal otherwise an exception is raised.
:param uuid: UUID of the task.
:raises TaskNotFound: if the task does not exist.
:raises TaskInvalidStatus: if the status of the task does not
equal to the status argument.
"""
return get_impl().task_delete(uuid, status=status)
def task_result_get_all_by_uuid(task_uuid):
"""Get list of task results.
:param task_uuid: string with UUID of Task instance.
:returns: list instances of TaskResult.
"""
return get_impl().task_result_get_all_by_uuid(task_uuid)
def task_result_create(task_uuid, key, data):
"""Append result record to task.
:param task_uuid: string with UUID of Task instance.
:param key: key expected to update in task result.
:param data: data expected to update in task result.
:returns: TaskResult instance appended.
"""
return get_impl().task_result_create(task_uuid, key, data)
def deployment_create(values):
"""Create a deployment from the values dictionary.
:param values: dict with record values on the deployment.
:returns: a dict with data on the deployment.
"""
return get_impl().deployment_create(values)
def deployment_delete(uuid):
"""Delete a deployment by UUID.
:param uuid: UUID of the deployment.
:raises DeploymentNotFound: if the deployment does not exist.
:raises DeploymentIsBusy: if the resource is not enough.
"""
return get_impl().deployment_delete(uuid)
def deployment_get(deployment):
"""Get a deployment by UUID.
:param deployment: UUID or name of the deployment.
:raises DeploymentNotFound: if the deployment does not exist.
:returns: a dict with data on the deployment.
"""
return get_impl().deployment_get(deployment)
def deployment_update(uuid, values):
"""Update a deployment by values.
:param uuid: UUID of the deployment.
:param values: dict with items to update.
:raises DeploymentNotFound: if the deployment does not exist.
:returns: a dict with data on the deployment.
"""
return get_impl().deployment_update(uuid, values)
def deployment_list(status=None, parent_uuid=None, name=None):
"""Get list of deployments.
:param status: if None returns any deployments with any status.
:param parent_uuid: filter by parent. If None, return only "root"
deployments.
:param name: Name of deployment
:returns: a list of dicts with data on the deployments.
"""
return get_impl().deployment_list(status=status, parent_uuid=parent_uuid,
name=name)
def resource_create(values):
"""Create a resource from the values dictionary.
:param values: a dict with data on the resource.
:returns: a dict with updated data on the resource.
"""
return get_impl().resource_create(values)
def resource_get_all(deployment_uuid, provider_name=None, type=None):
"""Return resources of a deployment.
:param deployment_uuid: filter by uuid of a deployment
:param provider_name: filter by provider_name, if is None, then
return all providers
:param type: filter by type, if is None, then return all types
:returns: a list of dicts with data on a resource
"""
return get_impl().resource_get_all(deployment_uuid,
provider_name=provider_name,
type=type)
def resource_delete(id):
"""Delete a resource.
:param id: ID of a resource.
:raises ResourceNotFound: if the resource does not exist.
"""
return get_impl().resource_delete(id)
def verification_create(deployment_uuid):
"""Create Verification record in DB.
:param deployment_uuid: UUID of the deployment.
:returns: a dict with verification data.
"""
return get_impl().verification_create(deployment_uuid)
def verification_get(verification_uuid):
"""Returns verification by UUID.
:param verification_uuid: UUID of the verification.
:raises NotFoundException: if verification does not exist.
:returns: a dict with verification data.
"""
return get_impl().verification_get(verification_uuid)
def verification_delete(verification_uuid):
"""Delete verification.
:param verification_uuid: UUID of the verification.
:raises NotFoundException: if verification does not exist.
"""
return get_impl().verification_delete(verification_uuid)
def verification_update(uuid, values):
"""Update verification by values.
:param uuid: UUID of the verification.
:param values: dict with record values.
:raises NotFoundException: if verification does not exist.
:returns: new updated task dict with data on the task.
"""
return get_impl().verification_update(uuid, values)
def verification_list(status=None):
"""Get a list of verifications.
:param status: Verification status to filter the returned list on.
:returns: A list of dicts with data on the verifications.
"""
return get_impl().verification_list(status=status)
def verification_result_get(verification_uuid):
"""Get dict of verification results.
:param verification_uuid: string with UUID of Verification instance.
:returns: dict instance of VerificationResult.
"""
return get_impl().verification_result_get(verification_uuid)
def verification_result_create(verification_uuid, values):
"""Append result record to verification.
:param verification_uuid: string with UUID of Verification instance.
:param values: dict with record values.
:returns: TaskResult instance appended.
"""
return get_impl().verification_result_create(verification_uuid, values)
def register_worker(values):
"""Register a new worker service at the specified hostname.
:param values: A dict of values which must contain the following:
{
"hostname": the unique hostname which identifies
this worker service.
}
:returns: A worker.
:raises WorkerAlreadyRegistered: if worker already registered
"""
return get_impl().register_worker(values)
def get_worker(hostname):
"""Retrieve a worker service record from the database.
:param hostname: The hostname of the worker service.
:returns: A worker.
:raises WorkerNotFound: if worker not found
"""
return get_impl().get_worker(hostname)
def unregister_worker(hostname):
"""Unregister this worker with the service registry.
:param hostname: The hostname of the worker service.
:raises WorkerNotFound: if worker not found
"""
get_impl().unregister_worker(hostname)
def update_worker(hostname):
"""Mark a worker as active by updating its "updated_at" property.
:param hostname: The hostname of this worker service.
:raises WorkerNotFound: if worker not found
"""
get_impl().update_worker(hostname)
|
apache-2.0
| 8,278,875,098,060,252,000
| 29.620455
| 79
| 0.66496
| false
| 4.224835
| false
| false
| false
|
stackforge/cloudbase-init
|
cloudbaseinit/plugins/common/userdataplugins/cloudconfig.py
|
1
|
4200
|
# Copyright 2013 Mirantis Inc.
# Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as oslo_logging
import yaml
from cloudbaseinit import conf as cloudbaseinit_conf
from cloudbaseinit.plugins.common import execcmd
from cloudbaseinit.plugins.common.userdataplugins import base
from cloudbaseinit.plugins.common.userdataplugins.cloudconfigplugins import (
factory
)
CONF = cloudbaseinit_conf.CONF
LOG = oslo_logging.getLogger(__name__)
DEFAULT_ORDER_VALUE = 999
class CloudConfigError(Exception):
pass
class CloudConfigPluginExecutor(object):
"""A simple executor class for processing cloud-config plugins.
:kwarg plugins:
Pairs of plugin names and the values corresponding to that plugin.
"""
def __init__(self, **plugins):
def _lookup_priority(plugin):
all_plugins = (CONF.cloud_config_plugins or
list(factory.PLUGINS.keys()))
# return the order from the config or default list
try:
return all_plugins.index(plugin)
except ValueError:
# If plugin is not supported or does not exist
# default to a sane and unreachable value.
return DEFAULT_ORDER_VALUE
self._expected_plugins = sorted(
plugins.items(),
key=lambda item: _lookup_priority(item[0]))
@classmethod
def from_yaml(cls, stream):
"""Initialize an executor from an yaml stream."""
loader = getattr(yaml, 'CLoader', yaml.Loader)
try:
content = yaml.load(stream, Loader=loader)
except (TypeError, ValueError, AttributeError):
raise CloudConfigError("Invalid yaml stream provided.")
if not content:
raise CloudConfigError("Empty yaml stream provided.")
return cls(**content)
def execute(self):
"""Call each plugin, in the order defined by _lookup_priority"""
reboot = execcmd.NO_REBOOT
plugins = factory.load_plugins()
for plugin_name, value in self._expected_plugins:
if CONF.cloud_config_plugins:
try:
CONF.cloud_config_plugins.index(plugin_name)
except ValueError:
LOG.info("Plugin %r is disabled", plugin_name)
continue
method = plugins.get(plugin_name)
if not method:
LOG.error("Plugin %r is currently not supported", plugin_name)
continue
try:
requires_reboot = method(value)
if requires_reboot:
reboot = execcmd.RET_END
except Exception:
LOG.exception("Processing plugin %s failed", plugin_name)
return reboot
class CloudConfigPlugin(base.BaseUserDataPlugin):
def __init__(self):
super(CloudConfigPlugin, self).__init__("text/cloud-config")
def process_non_multipart(self, part):
"""Process the given data, if it can be loaded through yaml.
If any plugin requires a reboot, it will return a particular
value, which will be processed on a higher level.
"""
try:
executor = CloudConfigPluginExecutor.from_yaml(part)
except CloudConfigError as ex:
LOG.error('Could not process part type %(type)r: %(err)r',
{'type': type(part), 'err': str(ex)})
else:
return executor.execute()
def process(self, part):
payload = part.get_payload(decode=True)
return self.process_non_multipart(payload)
|
apache-2.0
| 6,654,339,422,640,970,000
| 34.294118
| 78
| 0.628095
| false
| 4.472843
| true
| false
| false
|
ctb/2014-streaming
|
pipeline/sam-scan-to-coverage-dict.py
|
1
|
2919
|
#! /usr/bin/env python
import sys
import argparse
import screed
import cPickle
def ignore_at(iter):
for item in iter:
if item.startswith('@'):
continue
yield item
def main():
parser = argparse.ArgumentParser()
parser.add_argument('genome')
parser.add_argument('samfile')
parser.add_argument('coverage_d_pickle')
parser.add_argument('covhist')
args = parser.parse_args()
coords_d = {}
for record in screed.open(args.genome):
coords_d[record.name] = [0]*len(record.sequence)
n = 0
n_skipped = 0
for samline in ignore_at(open(args.samfile)):
n += 1
if n % 10000 == 0:
print >>sys.stderr, '...', n
readname, _, refname, refpos, _, _, _, _, _, seq = samline.split()[:10]
if refname == '*' or refpos == '*':
# (don't count these as skipped.)
continue
refpos = int(refpos)
try:
coord = coords_d[refname]
for pos in range(len(seq)):
coord[refpos - 1 + pos] += 1
except KeyError:
print >>sys.stderr, "unknown refname: %s; ignoring (read %s)" % (refname, readname)
n_skipped += 1
continue
if n_skipped / float(n) > .01:
raise Exception, "Error: too many reads ignored! %d of %d" % \
(n_skipped, n)
# now, calculate coverage per read!
coverage_d = {}
total = 0.
n = 0
for samline in ignore_at(open(args.samfile)):
readname, _, refname, refpos, _, _, _, _, _, seq = samline.split()[:10]
if refname == '*' or refpos == '*':
# (don't count these as skipped.)
continue
refpos = int(refpos)
try:
coord = coords_d[refname]
except KeyError:
continue
slice = list(coord[refpos - 1:refpos - 1 + len(seq)])
slice = sorted(slice)
coverage = slice[len(slice)/2] # median
assert readname not in coverage_d, readname
coverage_d[readname] = coverage
total += coverage
n += 1
if n % 10000 == 0:
print >>sys.stderr, '...', n
print 'average of the median mapping coverage', total / float(n)
print 'min coverage by read', min(coverage_d.values())
print 'max coverage by read', max(coverage_d.values())
covhist_d = {}
sofar = 0
for v in coverage_d.values():
v = int(v + 0.5)
covhist_d[v] = covhist_d.get(v, 0) + 1
fp = open(args.covhist, 'w')
total = sum(covhist_d.values())
sofar = 0
for k in range(0, max(covhist_d.keys()) + 1):
v = covhist_d.get(k, 0)
sofar += v
print >>fp, k, v, sofar, sofar / float(total)
fp.close()
fp = open(args.coverage_d_pickle, 'w')
cPickle.dump(coverage_d, fp)
fp.close()
if __name__ == '__main__':
main()
|
bsd-3-clause
| -6,013,978,973,039,782,000
| 26.8
| 95
| 0.526208
| false
| 3.564103
| false
| false
| false
|
svanoort/python-client-benchmarks
|
benchmark.py
|
1
|
11059
|
#!/usr/bin/env python
import timeit
import time
import string
import argparse
import csv
import sys
if sys.version_info[0] > 2:
import urllib.parse as urlparse
else:
import urlparse
# Import clients, so script fails fast if not available
from pycurl import Curl
try:
from cStringIO import StringIO
except:
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import requests, urllib, urllib2, urllib3
def run_test(library, url, cycles, connection_reuse, options, setup_test, run_test, delay=None, timer=None):
""" Runs a benchmark, showing start & stop
the setup_test is a String.template with $url as an option
the run_test allows for the same
"""
TIMER = timeit.default_timer
if timer and timer.lower() == 'cpu':
TIMER = time.clock # Linux only
print("START testing {0} performance with {1} cycles and connection reuse {2}".format(library, cycles, connection_reuse))
print("Options: {0}".format(options))
run_cmd = string.Template(run_test).substitute(url=url)
if delay:
run_cmd = run_cmd + "; time.sleep({0})".format(delay)
setup_cmd = string.Template(setup_test).substitute(url=url)
mytime = timeit.timeit(stmt=run_cmd, setup=setup_cmd, number=cycles, timer=TIMER)
if delay:
mytime = mytime - (delay * cycles)
print("END testing result: {0}".format(mytime))
print(' ')
result = [library, connection_reuse, options, cycles, mytime]
return result
def run_size_benchmarks(url='', cycles=10, delay=None, output_file=None, length_api_format='/length/$length', **kwargs):
timer_type = kwargs.get('timer')
""" Run variable-size benchmarks, where URL is the base url """
sizes = [4, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072] # Yields ~10 GB of traffic, be careful!
REQUESTS_NOREUSE = ('requests', False, 'Default',
'import requests',
"r = requests.get('$url', verify=False)")
REQUESTS_REUSE = ('requests', True, 'Default',
"import requests; \
session = requests.Session(); \
r = requests.Request('GET', '$url').prepare()",
"v = session.send(r, verify=False)")
PYCURL_REUSE = ('pycurl', True, "Reuse handle, save response to new cStringIO buffer",
"from pycurl import Curl; from cStringIO import StringIO; \
mycurl=Curl(); \
mycurl.setopt(mycurl.SSL_VERIFYPEER, 0); \
mycurl.setopt(mycurl.SSL_VERIFYHOST, 0); \
mycurl.setopt(mycurl.URL, '$url')",
"body = StringIO(); \
mycurl.setopt(mycurl.WRITEFUNCTION, body.write); \
mycurl.perform(); \
val = body.getvalue(); \
body.close()")
PYCURL_NOREUSE = ('pycurl', False, "Reuse handle, save response to new cStringIO buffer",
"from pycurl import Curl; from cStringIO import StringIO; \
mycurl=Curl(); \
mycurl.setopt(mycurl.URL, '$url'); \
mycurl.setopt(mycurl.SSL_VERIFYPEER, 0); \
mycurl.setopt(mycurl.SSL_VERIFYHOST, 0); \
body = StringIO(); \
mycurl.setopt(mycurl.FORBID_REUSE, 1)",
"body = StringIO(); \
mycurl.setopt(mycurl.WRITEFUNCTION, body.write); \
mycurl.perform(); \
val = body.getvalue(); \
body.close()")
TEST_TYPES = [REQUESTS_NOREUSE, PYCURL_NOREUSE, REQUESTS_REUSE, PYCURL_REUSE]
all_results = list()
# Run tests
for size in sizes:
temp_url = url + string.Template(length_api_format).substitute(length=size)
for test in TEST_TYPES:
result = run_test(test[0], temp_url, cycles, test[1], test[2], test[3], test[4], delay=delay, timer=timer_type)
del result[3] # Don't need cycles
result.insert(0, size)
all_results.append(result)
# Transform tuples to size, time graphs for each response size
final_output = [[x, 0, 0, 0, 0] for x in sizes]
for i in xrange(0, len(sizes)):
final_output[i][1] = all_results[i*4][4]
final_output[i][2] = all_results[i*4+1][4]
final_output[i][3] = all_results[i*4+2][4]
final_output[i][4] = all_results[i*4+3][4]
headers = ('Response_size', 'Requests Time (no cnxn reuse)', 'pyCurl Time (no cnxn reuse)',
'Requests Time (cnxn reuse)', 'pyCurl Time (cnxn reuse)')
if output_file:
with open(output_file, 'wb') as csvfile:
outwriter = csv.writer(csvfile, dialect=csv.excel)
outwriter.writerow(headers)
for result in final_output:
outwriter.writerow(result)
def run_all_benchmarks(url='', cycles=10, delay=None, output_file=None, **kwargs):
results = list()
headers = ('Library','Reuse Connections?','Options', 'Time')
tests = list()
timer_type = kwargs.get('timer')
# Library, cnxn_reuse, options, setup, run_stmt
# Requests
tests.append(('requests', False, 'Default',
'import requests',
"r = requests.get('$url', verify=False)"))
tests.append(('requests', True, 'Default',
"import requests; \
session = requests.Session(); \
r = requests.Request('GET', '$url').prepare()",
"v = session.send(r, verify=False)"))
# PyCurl
tests.append(('pycurl', True, "Reuse handle, don't save body",
"from pycurl import Curl; \
mycurl=Curl(); \
mycurl.setopt(mycurl.SSL_VERIFYPEER, 0); \
mycurl.setopt(mycurl.SSL_VERIFYHOST, 0); \
mycurl.setopt(mycurl.URL, '$url'); \
mycurl.setopt(mycurl.WRITEFUNCTION, lambda x: None)",
"mycurl.perform()"))
tests.append(('pycurl', True, "Reuse handle, save response to new cStringIO buffer",
"from pycurl import Curl; from cStringIO import StringIO; \
mycurl=Curl(); \
mycurl.setopt(mycurl.SSL_VERIFYPEER, 0); \
mycurl.setopt(mycurl.SSL_VERIFYHOST, 0); \
mycurl.setopt(mycurl.URL, '$url')",
"body = StringIO(); \
mycurl.setopt(mycurl.WRITEFUNCTION, body.write); \
mycurl.perform(); \
val = body.getvalue(); \
body.close()"))
tests.append(('pycurl', False, "Reuse handle, save response to new cStringIO buffer",
"from pycurl import Curl; from cStringIO import StringIO; \
mycurl=Curl(); \
mycurl.setopt(mycurl.URL, '$url'); \
mycurl.setopt(mycurl.SSL_VERIFYPEER, 0); \
mycurl.setopt(mycurl.SSL_VERIFYHOST, 0); \
body = StringIO(); \
mycurl.setopt(mycurl.FORBID_REUSE, 1)",
"body = StringIO(); \
mycurl.setopt(mycurl.WRITEFUNCTION, body.write); \
mycurl.perform(); \
val = body.getvalue(); \
body.close()"))
# The use of global DNS cache avoids a bug on some linux systems with libcurl
# playing badly with DNS resolvers
tests.append(('pycurl', False, "New handle, save response to new cStringIO buffer",
"from pycurl import Curl; from cStringIO import StringIO",
"body = StringIO(); \
mycurl=Curl(); \
body = StringIO(); \
mycurl.setopt(mycurl.URL, '$url'); \
mycurl.setopt(mycurl.SSL_VERIFYPEER, 0); \
mycurl.setopt(mycurl.SSL_VERIFYHOST, 0); \
mycurl.setopt(mycurl.DNS_USE_GLOBAL_CACHE, True); \
mycurl.setopt(mycurl.WRITEFUNCTION, body.write); \
mycurl.perform(); \
val = body.getvalue(); \
body.close()"))
# URLLIB3
# Making URLLIB3 accept self-signed certs is a beast. You have to create a connection pool with the hostname and port supplied.
# See: http://stackoverflow.com/questions/18061640/ignore-certificate-validation-with-urllib3
# Yes, there's an option to bypass hostname verification but I cannot make it play nicely.
parsed_url = urlparse.urlparse(url)
scheme = parsed_url.scheme
hostname = parsed_url.hostname
port = parsed_url.port
setup_string = ""
if scheme == 'https':
setup_string = "import urllib3; \
http_pool = urllib3.HTTPSConnectionPool('{0}', port={1}, cert_reqs='CERT_NONE', assert_hostname=False)".format(hostname, port)
else:
setup_string = "import urllib3; http_pool = urllib3.PoolManager()"
tests.append(('urllib3', True, 'Default',
setup_string,
"body = http_pool.urlopen('GET', '$url').read()"))
# URLLIB2
#tests.append(('urllib2', False, '',
# "import urllib2",
# "body = urllib2.urlopen('$url').read()"))
# URLLIB
tests.append(('urllib', False, 'Default',
"import urllib",
"body = urllib.urlopen('$url').read()"))
for test in tests:
my_result = run_test(test[0], url, cycles, test[1], test[2], test[3], test[4], delay=delay, timer=timer_type)
results.append((test[0], test[1], test[2], my_result[-1]))
if output_file:
with open(output_file, 'wb') as csvfile:
outwriter = csv.writer(csvfile, dialect=csv.excel)
outwriter.writerow(('url', 'cycles', 'delay'))
outwriter.writerow((url, cycles, delay))
outwriter.writerow(headers)
for result in results:
outwriter.writerow(result)
if(__name__ == '__main__'):
parser = argparse.ArgumentParser(description="Benchmark different python request frameworks")
parser.add_argument('--url', metavar='u', type=str, default='http://localhost:8080/ping', help="URL to run requests against")
parser.add_argument('--cycles', metavar='c', type=int, default=10000, help="Number of cycles to run")
parser.add_argument('--delay', metavar='d', type=float, help="Delay in seconds between requests")
parser.add_argument('--output-file', metavar='o', nargs='?', type=str, help="Output file to write CSV results to")
parser.add_argument('--benchmark-type', type=str, default="full", choices=('full','size'), help="Benchmark type to run: full [default]=all libraries, 1 request, size=basic pycurl/requests tests with different request sizes")
parser.add_argument('--timer', type=str, default="real", choices=('real','cpu'), help="Timer type: real [default] or cpu")
parser.add_argument('--length-api-format', metavar='l', type=str, default="/length/$length", help="Template for API request that accepts response length parameter, for size benchmarks")
args = vars(parser.parse_args())
if args.get('url') is None:
print("No URL supplied, you must supply a URL!")
exit(1)
print('RUNNING PYTHON CLIENT BENCHMARKS WITH ARGS: {0}'.format(args))
if args['benchmark_type'] == 'full':
run_all_benchmarks(**args)
elif args['benchmark_type'] =='size':
run_size_benchmarks(**args)
else:
raise Exception("Illegal benchmark type: {0}".format(args['benchmark_type']))
|
apache-2.0
| 8,804,450,190,701,221,000
| 41.698842
| 229
| 0.607288
| false
| 3.6999
| true
| false
| false
|
eeucalyptus/eeDA
|
app/graphics/wirerenderer.py
|
1
|
1977
|
from . import Renderer
from data.util import Vector2i, Vector2d
from .common import eeDAcolor, pMakeCircleArray, pMakeLineArray
class WireRenderer(Renderer):
DEPTH = 1.0
def __init__(self, wire, gl):
super().__init__(gl)
self.wire = wire
self.callList = self._genCallList()
def _genCallList(self):
genList = self.gl.glGenLists(1)
self.gl.glNewList(genList, self.gl.GL_COMPILE)
self.width = self.wire.style['width'] / 2
self.color = self.wire.style['color']
self.pointAry = []
con0_pos = self.wire.connectors[0].pos
con1_pos = self.wire.connectors[1].pos
self.pointAry.append(self.wire.connectors[0].pos) # Start point
for point in self.wire.points:
self.pointAry.append(point) # Intermediate points
self.pointAry.append(self.wire.connectors[1].pos) # End point
self.vertices = pMakeLineArray(self.pointAry, Vector2i(), self.width, self.DEPTH)
if not self.wire.connectors[0].other:
self.renderUnconnected(self.pointAry[0])
if not self.wire.connectors[0].other:
self.renderUnconnected(self.pointAry[-1])
self.setColor(self.color)
self.gl.glEnableClientState(self.gl.GL_VERTEX_ARRAY)
self.gl.glVertexPointer(3, self.gl.GL_FLOAT, 0, self.vertices)
self.gl.glDrawArrays(self.gl.GL_TRIANGLE_STRIP, 0, len(self.vertices) / 3)
self.gl.glDisableClientState(self.gl.GL_VERTEX_ARRAY)
self.gl.glEndList()
return genList
def renderUnconnected(self, pos):
self.setColor(eeDAcolor.WIRE_UNCONNECTED)
self.gl.glEnableClientState(self.gl.GL_VERTEX_ARRAY)
circle = pMakeCircleArray(pos, self.width * 1.5, self.DEPTH, 30)
self.gl.glVertexPointer(3, self.gl.GL_FLOAT, 0, circle)
self.gl.glDrawArrays(self.gl.GL_TRIANGLE_FAN, 0, len(circle) / 3)
self.gl.glDisableClientState(self.gl.GL_VERTEX_ARRAY)
|
apache-2.0
| -317,239,216,122,899,400
| 34.945455
| 89
| 0.654527
| false
| 3.199029
| false
| false
| false
|
andrewsosa/hackfsu_com
|
api/api/models/hack.py
|
1
|
3160
|
from django.db import models
from api.models import Hackathon
from api.models.judging_criteria import JudgingCriteria
from api.models.judging_expo import JudgingExpo
from django.contrib import admin
from hackfsu_com.admin import hackfsu_admin
class HackQuerySet(models.QuerySet):
from api.models.judge_info import JudgeInfo
def from_expo(self, expo: JudgingExpo):
return self.filter(
table_number__gte=expo.table_number_start,
table_number__lte=expo.table_number_end
)
def from_table_number(self, table: int):
return self.get(table_number=table)
def with_active_judge(self, judge: JudgeInfo):
return self.filter(current_judges=judge)
def without_previous_judge(self, judge: JudgeInfo):
return self.exclude(judges=judge)
class HackManager(models.Manager):
def get_next_table_number(self):
number = 1
hackathon = Hackathon.objects.current()
while self.filter(hackathon=hackathon, table_number=number).exists():
number += 1
return number
class Hack(models.Model):
objects = HackManager.from_queryset(HackQuerySet)()
hackathon = models.ForeignKey(to=Hackathon, on_delete=models.CASCADE)
table_number = models.IntegerField()
name = models.CharField(max_length=100) # Devpost "Submission Title"
description = models.TextField() # Devpost "Plain Description"
extra_judging_criteria = models.ManyToManyField(to=JudgingCriteria, blank=True) # Devpost "Desired Prizes"
current_judges = models.ManyToManyField(to='api.JudgeInfo', blank=True, related_name='judges_current')
judges = models.ManyToManyField(to='api.JudgeInfo', blank=True, related_name='judges')
total_judge_score = models.IntegerField(default=0)
times_judged = models.IntegerField(default=0)
def get_expo(self):
expo = JudgingExpo.objects.filter(
hackathon=self.hackathon,
table_number_start__lte=self.table_number,
table_number_end__gte=self.table_number
)
if expo.exists():
return expo.all()[0]
return None
def get_expo_name(self) -> str:
expo = self.get_expo()
if expo is None:
return 'N/A'
return expo.name
def get_criteria_names(self) -> str:
names = []
for criteria in self.extra_judging_criteria.all():
names.append(criteria.name)
return ', '.join(names)
def __str__(self):
return self.name
@admin.register(Hack, site=hackfsu_admin)
class HackAdmin(admin.ModelAdmin):
list_filter = ('hackathon',)
list_display = ('id', 'name', 'expo', 'table_number', 'total_judge_score')
list_editable = ('table_number',)
list_display_links = ('id', 'name')
search_fields = ('name', 'table_number')
ordering = ('table_number', 'total_judge_score')
@staticmethod
def expo(obj: Hack):
return obj.get_expo_name()
@staticmethod
def extra_criteria(obj: Hack) -> str:
return obj.get_criteria_names()
|
apache-2.0
| 8,763,194,786,855,953,000
| 34.111111
| 117
| 0.641772
| false
| 3.570621
| false
| false
| false
|
2B5/ia-3B5
|
module3/syntax_processing/processing_purenltk.py
|
1
|
6358
|
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
_wordnet = nltk.corpus.wordnet
from semantic_processing import semantic_processing as semantics
from nltk.stem import WordNetLemmatizer
class TextProcessor:
def __init__(self, initial_text):
self.text = initial_text
def word_tag(self, word):
if word[1] in ("NN", "NNS", "NNP", "NNPS"):
return _wordnet.NOUN
if word[1] in ("JJ", "JJR", "JJS"):
return _wordnet.ADJ
if word[1] in ("VB", "VBD", "VBG", "VBN", "VBP", "VBZ"):
return _wordnet.VERB
if word[1] in ("RB", "RBR", "RBS"):
return _wordnet.ADV
return None
def get_sentiment(self, polarity):
if polarity <= 0.5 and polarity >= 0:
return "neutral"
if polarity > 0.5:
return "happy"
if polarity < 0:
return "sad"
def remove_signs(self,word_list):
new_list = word_list
for word in new_list:
if word in (".",";","!","?",","):
word_list.remove(word)
return new_list
def traverse(self, t, np_list):
try:
t.label()
except AttributeError:
return
else:
if t.label() == 'NP':
# print('NP:' + str(t.leaves()))
np_list.append(t.leaves())
# print('NPhead:' + str(t.leaves()[-1]))
for child in t:
self.traverse(child, np_list)
else:
for child in t:
self.traverse(child, np_list)
def get_NP(self, np_list):
final_list = []
for item in np_list:
final_expr = ""
for word in item:
final_expr = final_expr + word[0] + " "
final_list.append(final_expr)
return final_list
def processing(self):
wordnet_lemmatizer = WordNetLemmatizer()
map_list = []
try:
sent_tokenize_list = sent_tokenize(self.text)
for sentence in sent_tokenize_list:
# print (sentence)
word_list = self.remove_signs(word_tokenize(sentence))
tag_list = nltk.pos_tag(word_list)
lemmatized_sent = []
proper_nouns = []
pronouns = []
verbs = []
nouns = []
processed_sentence = {}
processed_sentence["original_sentence"] = sentence
processed_sentence["subject"] = ""
processed_sentence["predicate"] = ""
processed_sentence["verbs"] = ""
processed_sentence["nouns"] = []
processed_sentence["numbers"] = []
grammar = "NP: {<DT>?<JJ>*<NN>}"
cp = nltk.RegexpParser(grammar)
p_tree = cp.parse(tag_list)
np_list = []
self.traverse(p_tree, np_list)
final_list = self.get_NP(np_list)
processed_sentence["noun_phrases"] = final_list
for word in tag_list:
w = word[0].lower()
# print(word)
tag = self.word_tag(word)
# print(w, ": ", word[1])
if tag != None:
lemmatized_word = wordnet_lemmatizer.lemmatize(w, tag)
else :
lemmatized_word = wordnet_lemmatizer.lemmatize(w, _wordnet.NOUN)
if word[1] == "NNP" or word[1] == "NNPS":
proper_nouns.append(lemmatized_word)
if word[1] == "NN" or word[1] == "NNS":
nouns.append(lemmatized_word)
if word[1] == "CD" :
processed_sentence["numbers"].append(lemmatized_word)
if word[1] == "PRP":
pronouns.append(lemmatized_word)
if tag == "v":
if (word[1] == "VBG" or word[1] == "VBN") and verbs[-1] == "be":
verbs[-1] = lemmatized_word
elif word[1] == "VBN" and verbs[-1] == "have":
verbs[-1] = lemmatized_word
else:
verbs.append(lemmatized_word)
if tag == "n" :
processed_sentence["nouns"].append(lemmatized_word)
lemmatized_sent.append(lemmatized_word)
processed_sentence["sentence"] = lemmatized_sent
processed_sentence["proper_nouns"] = proper_nouns
# processed_sentance["Noun Phrase"] = list(noun_phrase)
processed_sentence["pronouns"] = pronouns
processed_sentence["verbs"] = verbs
if len(processed_sentence["nouns"]) != 0 and len(pronouns) != 0:
if lemmatized_sent.index(processed_sentence["nouns"][0]) < lemmatized_sent.index(pronouns[0]):
processed_sentence["subject"] = processed_sentence["nouns"][0]
else:
processed_sentence["subject"] = pronouns[0]
elif len(processed_sentence["nouns"]) != 0:
processed_sentence["subject"] = processed_sentence["nouns"][0]
elif len(pronouns) != 0:
processed_sentence["subject"] = pronouns[0]
if len(verbs) != 0:
processed_sentence["predicate"] = verbs[0]
processed_sentence["semantics"] = {}
word_list = [w.lower() for w in word_list]
context = semantics.remove_stopwords(word_list)
lemmas = semantics.remove_stopwords(lemmatized_sent)
for lemma in lemmas:
processed_sentence["semantics"].setdefault(lemma, semantics.semantic_info(lemma, lemma, context))
map_list.append(processed_sentence)
return map_list
except Exception as e:
print("Exception!")
print(str(e))
print(type(e))
#text = "He is my brother."
#t = TextProcessor(text)
#lista = t.processing()
#for prop in lista:
# print(str(prop))
|
mit
| -7,640,416,701,744,384,000
| 33.743169
| 117
| 0.477351
| false
| 3.907806
| false
| false
| false
|
edibledinos/pwnypack
|
docs/conf.py
|
1
|
11416
|
# -*- coding: utf-8 -*-
#
# pwnypack documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 25 15:04:19 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import mock
sys.path.insert(0, os.path.abspath('..'))
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pwnypack'
copyright = u'2015 - 2016, Certified Edible Dinosaurs'
author = u'Ingmar Steen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9'
# The full version, including alpha/beta/rc tags.
release = '0.9.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if not on_rtd:
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pwnydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pwny.tex', u'pwny Documentation',
u'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pwny', u'pwny Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pwny', u'pwny Documentation',
author, 'pwny', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
|
mit
| 3,611,730,147,197,216,300
| 30.191257
| 80
| 0.705676
| false
| 3.626429
| true
| false
| false
|
quantwizard-com/pythonbacktest
|
pythonbacktest/animation/ipythonchartanimation.py
|
1
|
1578
|
from IPython.display import display
from matplotlib import animation, rc
import abc
class IPythonChartAnimation(object):
__metaclass__ = abc.ABCMeta
VIDEO_TAG = """<video controls>
<source src="data:video/x-m4v;base64,{0}" type="video/mp4">
Your browser does not support the video tag.
</video>"""
def __init__(self):
self.__target_canvas = None
self.__number_of_frames = None
self.__interval = None
@abc.abstractmethod
def _init_animation(self):
raise NotImplementedError()
def _start_animation(self, animation_callback, init_animation_callback,
target_canvas, frames=100, interval=200):
anim = animation.FuncAnimation(target_canvas, animation_callback,
init_func=init_animation_callback,
frames=frames, interval=interval, blit=True)
rc('animation', html='html5')
display(anim)
@property
def target_canvas(self):
return self.__target_canvas
@target_canvas.setter
def target_canvas(self, canvas):
self.__target_canvas = canvas
@property
def number_of_frames(self):
return self.__number_of_frames
@number_of_frames.setter
def number_of_frames(self, value):
self.__number_of_frames = value
@property
def interval(self):
return self.__interval
@interval.setter
def interval(self, inter):
self.__interval = inter
|
apache-2.0
| 3,753,715,281,970,296,300
| 26.684211
| 95
| 0.586185
| false
| 4.457627
| false
| false
| false
|
haad/ansible
|
test/sanity/validate-modules/main.py
|
1
|
49902
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Matt Martz <matt@sivel.net>
# Copyright (C) 2015 Rackspace US, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import abc
import argparse
import ast
import json
import errno
import os
import re
import subprocess
import sys
import tempfile
import traceback
from collections import OrderedDict
from contextlib import contextmanager
from distutils.version import StrictVersion
from fnmatch import fnmatch
from ansible import __version__ as ansible_version
from ansible.executor.module_common import REPLACER_WINDOWS
from ansible.plugins.loader import fragment_loader
from ansible.utils.plugin_docs import BLACKLIST, get_docstring
from module_args import AnsibleModuleImportError, get_argument_spec
from schema import doc_schema, metadata_1_1_schema, return_schema
from utils import CaptureStd, parse_yaml
from voluptuous.humanize import humanize_error
from ansible.module_utils.six import PY3, with_metaclass
if PY3:
# Because there is no ast.TryExcept in Python 3 ast module
TRY_EXCEPT = ast.Try
# REPLACER_WINDOWS from ansible.executor.module_common is byte
# string but we need unicode for Python 3
REPLACER_WINDOWS = REPLACER_WINDOWS.decode('utf-8')
else:
TRY_EXCEPT = ast.TryExcept
BLACKLIST_DIRS = frozenset(('.git', 'test', '.github', '.idea'))
INDENT_REGEX = re.compile(r'([\t]*)')
TYPE_REGEX = re.compile(r'.*(if|or)(\s+[^"\']*|\s+)(?<!_)(?<!str\()type\(.*')
BLACKLIST_IMPORTS = {
'requests': {
'new_only': True,
'error': {
'code': 203,
'msg': ('requests import found, should use '
'ansible.module_utils.urls instead')
}
},
r'boto(?:\.|$)': {
'new_only': True,
'error': {
'code': 204,
'msg': 'boto import found, new modules should use boto3'
}
},
}
class ReporterEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, Exception):
return str(o)
return json.JSONEncoder.default(self, o)
class Reporter(object):
def __init__(self):
self.files = OrderedDict()
def _ensure_default_entry(self, path):
try:
self.files[path]
except KeyError:
self.files[path] = {
'errors': [],
'warnings': [],
'traces': [],
'warning_traces': []
}
def _log(self, path, code, msg, level='error', line=0, column=0):
self._ensure_default_entry(path)
lvl_dct = self.files[path]['%ss' % level]
lvl_dct.append({
'code': code,
'msg': msg,
'line': line,
'column': column
})
def error(self, *args, **kwargs):
self._log(*args, level='error', **kwargs)
def warning(self, *args, **kwargs):
self._log(*args, level='warning', **kwargs)
def trace(self, path, tracebk):
self._ensure_default_entry(path)
self.files[path]['traces'].append(tracebk)
def warning_trace(self, path, tracebk):
self._ensure_default_entry(path)
self.files[path]['warning_traces'].append(tracebk)
@staticmethod
@contextmanager
def _output_handle(output):
if output != '-':
handle = open(output, 'w+')
else:
handle = sys.stdout
yield handle
handle.flush()
handle.close()
@staticmethod
def _filter_out_ok(reports):
temp_reports = OrderedDict()
for path, report in reports.items():
if report['errors'] or report['warnings']:
temp_reports[path] = report
return temp_reports
def plain(self, warnings=False, output='-'):
"""Print out the test results in plain format
output is ignored here for now
"""
ret = []
for path, report in Reporter._filter_out_ok(self.files).items():
traces = report['traces'][:]
if warnings and report['warnings']:
traces.extend(report['warning_traces'])
for trace in traces:
print('TRACE:')
print('\n '.join((' %s' % trace).splitlines()))
for error in report['errors']:
error['path'] = path
print('%(path)s:%(line)d:%(column)d: E%(code)d %(msg)s' % error)
ret.append(1)
if warnings:
for warning in report['warnings']:
warning['path'] = path
print('%(path)s:%(line)d:%(column)d: W%(code)d %(msg)s' % warning)
return 3 if ret else 0
def json(self, warnings=False, output='-'):
"""Print out the test results in json format
warnings is not respected in this output
"""
ret = [len(r['errors']) for _, r in self.files.items()]
with Reporter._output_handle(output) as handle:
print(json.dumps(Reporter._filter_out_ok(self.files), indent=4, cls=ReporterEncoder), file=handle)
return 3 if sum(ret) else 0
class Validator(with_metaclass(abc.ABCMeta, object)):
"""Validator instances are intended to be run on a single object. if you
are scanning multiple objects for problems, you'll want to have a separate
Validator for each one."""
def __init__(self, reporter=None):
self.reporter = reporter
@abc.abstractproperty
def object_name(self):
"""Name of the object we validated"""
pass
@abc.abstractproperty
def object_path(self):
"""Path of the object we validated"""
pass
@abc.abstractmethod
def validate(self):
"""Run this method to generate the test results"""
pass
class ModuleValidator(Validator):
BLACKLIST_PATTERNS = ('.git*', '*.pyc', '*.pyo', '.*', '*.md', '*.rst', '*.txt')
BLACKLIST_FILES = frozenset(('.git', '.gitignore', '.travis.yml',
'shippable.yml',
'.gitattributes', '.gitmodules', 'COPYING',
'__init__.py', 'VERSION', 'test-docs.sh'))
BLACKLIST = BLACKLIST_FILES.union(BLACKLIST['MODULE'])
PS_DOC_BLACKLIST = frozenset((
'async_status.ps1',
'slurp.ps1',
'setup.ps1'
))
WHITELIST_FUTURE_IMPORTS = frozenset(('absolute_import', 'division', 'print_function'))
def __init__(self, path, analyze_arg_spec=False, base_branch=None, git_cache=None, reporter=None):
super(ModuleValidator, self).__init__(reporter=reporter or Reporter())
self.path = path
self.basename = os.path.basename(self.path)
self.name, _ = os.path.splitext(self.basename)
self.analyze_arg_spec = analyze_arg_spec
self.base_branch = base_branch
self.git_cache = git_cache or GitCache()
self._python_module_override = False
with open(path) as f:
self.text = f.read()
self.length = len(self.text.splitlines())
try:
self.ast = ast.parse(self.text)
except Exception:
self.ast = None
if base_branch:
self.base_module = self._get_base_file()
else:
self.base_module = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self.base_module:
return
try:
os.remove(self.base_module)
except Exception:
pass
@property
def object_name(self):
return self.basename
@property
def object_path(self):
return self.path
def _python_module(self):
if self.path.endswith('.py') or self._python_module_override:
return True
return False
def _powershell_module(self):
if self.path.endswith('.ps1'):
return True
return False
def _just_docs(self):
"""Module can contain just docs and from __future__ boilerplate
"""
try:
for child in self.ast.body:
if not isinstance(child, ast.Assign):
# allowed from __future__ imports
if isinstance(child, ast.ImportFrom) and child.module == '__future__':
for future_import in child.names:
if future_import.name not in self.WHITELIST_FUTURE_IMPORTS:
break
else:
continue
return False
return True
except AttributeError:
return False
def _get_base_branch_module_path(self):
"""List all paths within lib/ansible/modules to try and match a moved module"""
return self.git_cache.base_module_paths.get(self.object_name)
def _has_alias(self):
"""Return true if the module has any aliases."""
return self.object_name in self.git_cache.head_aliased_modules
def _get_base_file(self):
# In case of module moves, look for the original location
base_path = self._get_base_branch_module_path()
command = ['git', 'show', '%s:%s' % (self.base_branch, base_path or self.path)]
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if int(p.returncode) != 0:
return None
t = tempfile.NamedTemporaryFile(delete=False)
t.write(stdout)
t.close()
return t.name
def _is_new_module(self):
if self._has_alias():
return False
return not self.object_name.startswith('_') and bool(self.base_branch) and not bool(self.base_module)
def _check_interpreter(self, powershell=False):
if powershell:
if not self.text.startswith('#!powershell\n'):
self.reporter.error(
path=self.object_path,
code=102,
msg='Interpreter line is not "#!powershell"'
)
return
if not self.text.startswith('#!/usr/bin/python'):
self.reporter.error(
path=self.object_path,
code=101,
msg='Interpreter line is not "#!/usr/bin/python"'
)
def _check_type_instead_of_isinstance(self, powershell=False):
if powershell:
return
for line_no, line in enumerate(self.text.splitlines()):
typekeyword = TYPE_REGEX.match(line)
if typekeyword:
# TODO: add column
self.reporter.error(
path=self.object_path,
code=403,
msg=('Type comparison using type() found. '
'Use isinstance() instead'),
line=line_no + 1
)
def _check_for_sys_exit(self):
if 'sys.exit(' in self.text:
# TODO: Add line/col
self.reporter.error(
path=self.object_path,
code=205,
msg='sys.exit() call found. Should be exit_json/fail_json'
)
def _check_gpl3_header(self):
header = '\n'.join(self.text.split('\n')[:20])
if ('GNU General Public License' not in header or
('version 3' not in header and 'v3.0' not in header)):
self.reporter.error(
path=self.object_path,
code=105,
msg='GPLv3 license header not found in the first 20 lines of the module'
)
elif self._is_new_module():
if len([line for line in header
if 'GNU General Public License' in line]) > 1:
self.reporter.error(
path=self.object_path,
code=108,
msg='Found old style GPLv3 license header: '
'https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_documenting.html#copyright'
)
def _check_for_tabs(self):
for line_no, line in enumerate(self.text.splitlines()):
indent = INDENT_REGEX.search(line)
if indent and '\t' in line:
index = line.index('\t')
self.reporter.error(
path=self.object_path,
code=402,
msg='indentation contains tabs',
line=line_no + 1,
column=index
)
def _find_blacklist_imports(self):
for child in self.ast.body:
names = []
if isinstance(child, ast.Import):
names.extend(child.names)
elif isinstance(child, TRY_EXCEPT):
bodies = child.body
for handler in child.handlers:
bodies.extend(handler.body)
for grandchild in bodies:
if isinstance(grandchild, ast.Import):
names.extend(grandchild.names)
for name in names:
# TODO: Add line/col
for blacklist_import, options in BLACKLIST_IMPORTS.items():
if re.search(blacklist_import, name.name):
new_only = options['new_only']
if self._is_new_module() and new_only:
self.reporter.error(
path=self.object_path,
**options['error']
)
elif not new_only:
self.reporter.error(
path=self.object_path,
**options['error']
)
def _find_module_utils(self, main):
linenos = []
found_basic = False
for child in self.ast.body:
if isinstance(child, (ast.Import, ast.ImportFrom)):
names = []
try:
names.append(child.module)
if child.module.endswith('.basic'):
found_basic = True
except AttributeError:
pass
names.extend([n.name for n in child.names])
if [n for n in names if n.startswith('ansible.module_utils')]:
linenos.append(child.lineno)
for name in child.names:
if ('module_utils' in getattr(child, 'module', '') and
isinstance(name, ast.alias) and
name.name == '*'):
msg = (
208,
('module_utils imports should import specific '
'components, not "*"')
)
if self._is_new_module():
self.reporter.error(
path=self.object_path,
code=msg[0],
msg=msg[1],
line=child.lineno
)
else:
self.reporter.warning(
path=self.object_path,
code=msg[0],
msg=msg[1],
line=child.lineno
)
if (isinstance(name, ast.alias) and
name.name == 'basic'):
found_basic = True
if not linenos:
self.reporter.error(
path=self.object_path,
code=201,
msg='Did not find a module_utils import'
)
elif not found_basic:
self.reporter.warning(
path=self.object_path,
code=292,
msg='Did not find "ansible.module_utils.basic" import'
)
return linenos
def _get_first_callable(self):
linenos = []
for child in self.ast.body:
if isinstance(child, (ast.FunctionDef, ast.ClassDef)):
linenos.append(child.lineno)
return min(linenos)
def _find_main_call(self):
lineno = False
if_bodies = []
for child in self.ast.body:
if isinstance(child, ast.If):
try:
if child.test.left.id == '__name__':
if_bodies.extend(child.body)
except AttributeError:
pass
bodies = self.ast.body
bodies.extend(if_bodies)
for child in bodies:
# validate that the next to last line is 'if __name__ == "__main__"'
if child.lineno == (self.length - 1):
mainchecked = False
try:
if isinstance(child, ast.If) and \
child.test.left.id == '__name__' and \
len(child.test.ops) == 1 and \
isinstance(child.test.ops[0], ast.Eq) and \
child.test.comparators[0].s == '__main__':
mainchecked = True
except Exception:
pass
if not mainchecked:
self.reporter.error(
path=self.object_path,
code=109,
msg='Next to last line should be: if __name__ == "__main__":',
line=child.lineno
)
# validate that the final line is a call to main()
if isinstance(child, ast.Expr):
if isinstance(child.value, ast.Call):
if (isinstance(child.value.func, ast.Name) and
child.value.func.id == 'main'):
lineno = child.lineno
if lineno < self.length - 1:
self.reporter.error(
path=self.object_path,
code=104,
msg='Call to main() not the last line',
line=lineno
)
if not lineno:
self.reporter.error(
path=self.object_path,
code=103,
msg='Did not find a call to main'
)
return lineno or 0
def _find_has_import(self):
for child in self.ast.body:
found_try_except_import = False
found_has = False
if isinstance(child, TRY_EXCEPT):
bodies = child.body
for handler in child.handlers:
bodies.extend(handler.body)
for grandchild in bodies:
if isinstance(grandchild, ast.Import):
found_try_except_import = True
if isinstance(grandchild, ast.Assign):
for target in grandchild.targets:
if target.id.lower().startswith('has_'):
found_has = True
if found_try_except_import and not found_has:
# TODO: Add line/col
self.reporter.warning(
path=self.object_path,
code=291,
msg='Found Try/Except block without HAS_ assignment'
)
def _ensure_imports_below_docs(self, doc_info, first_callable):
try:
min_doc_line = min(
[doc_info[key]['lineno'] for key in doc_info if doc_info[key]['lineno']]
)
except ValueError:
# We can't perform this validation, as there are no DOCs provided at all
return
max_doc_line = max(
[doc_info[key]['end_lineno'] for key in doc_info if doc_info[key]['end_lineno']]
)
import_lines = []
for child in self.ast.body:
if isinstance(child, (ast.Import, ast.ImportFrom)):
if isinstance(child, ast.ImportFrom) and child.module == '__future__':
# allowed from __future__ imports
for future_import in child.names:
if future_import.name not in self.WHITELIST_FUTURE_IMPORTS:
self.reporter.error(
path=self.object_path,
code=209,
msg=('Only the following from __future__ imports are allowed: %s'
% ', '.join(self.WHITELIST_FUTURE_IMPORTS)),
line=child.lineno
)
break
else: # for-else. If we didn't find a problem nad break out of the loop, then this is a legal import
continue
import_lines.append(child.lineno)
if child.lineno < min_doc_line:
self.reporter.error(
path=self.object_path,
code=106,
msg=('Import found before documentation variables. '
'All imports must appear below '
'DOCUMENTATION/EXAMPLES/RETURN/ANSIBLE_METADATA.'),
line=child.lineno
)
break
elif isinstance(child, TRY_EXCEPT):
bodies = child.body
for handler in child.handlers:
bodies.extend(handler.body)
for grandchild in bodies:
if isinstance(grandchild, (ast.Import, ast.ImportFrom)):
import_lines.append(grandchild.lineno)
if grandchild.lineno < min_doc_line:
self.reporter.error(
path=self.object_path,
code=106,
msg=('Import found before documentation '
'variables. All imports must appear below '
'DOCUMENTATION/EXAMPLES/RETURN/'
'ANSIBLE_METADATA.'),
line=child.lineno
)
break
for import_line in import_lines:
if not (max_doc_line < import_line < first_callable):
msg = (
107,
('Imports should be directly below DOCUMENTATION/EXAMPLES/'
'RETURN/ANSIBLE_METADATA.')
)
if self._is_new_module():
self.reporter.error(
path=self.object_path,
code=msg[0],
msg=msg[1],
line=import_line
)
else:
self.reporter.warning(
path=self.object_path,
code=msg[0],
msg=msg[1],
line=import_line
)
def _validate_ps_replacers(self):
# loop all (for/else + error)
# get module list for each
# check "shape" of each module name
module_requires = r'(?im)^#\s*requires\s+\-module(?:s?)\s*(Ansible\.ModuleUtils\..+)'
found_requires = False
for req_stmt in re.finditer(module_requires, self.text):
found_requires = True
# this will bomb on dictionary format - "don't do that"
module_list = [x.strip() for x in req_stmt.group(1).split(',')]
if len(module_list) > 1:
self.reporter.error(
path=self.object_path,
code=210,
msg='Ansible.ModuleUtils requirements do not support multiple modules per statement: "%s"' % req_stmt.group(0)
)
continue
module_name = module_list[0]
if module_name.lower().endswith('.psm1'):
self.reporter.error(
path=self.object_path,
code=211,
msg='Module #Requires should not end in .psm1: "%s"' % module_name
)
# also accept the legacy #POWERSHELL_COMMON replacer signal
if not found_requires and REPLACER_WINDOWS not in self.text:
self.reporter.error(
path=self.object_path,
code=207,
msg='No Ansible.ModuleUtils module requirements/imports found'
)
def _find_ps_docs_py_file(self):
if self.object_name in self.PS_DOC_BLACKLIST:
return
py_path = self.path.replace('.ps1', '.py')
if not os.path.isfile(py_path):
self.reporter.error(
path=self.object_path,
code=503,
msg='Missing python documentation file'
)
def _get_docs(self):
docs = {
'DOCUMENTATION': {
'value': None,
'lineno': 0,
'end_lineno': 0,
},
'EXAMPLES': {
'value': None,
'lineno': 0,
'end_lineno': 0,
},
'RETURN': {
'value': None,
'lineno': 0,
'end_lineno': 0,
},
'ANSIBLE_METADATA': {
'value': None,
'lineno': 0,
'end_lineno': 0,
}
}
for child in self.ast.body:
if isinstance(child, ast.Assign):
for grandchild in child.targets:
if grandchild.id == 'DOCUMENTATION':
docs['DOCUMENTATION']['value'] = child.value.s
docs['DOCUMENTATION']['lineno'] = child.lineno
docs['DOCUMENTATION']['end_lineno'] = (
child.lineno + len(child.value.s.splitlines())
)
elif grandchild.id == 'EXAMPLES':
docs['EXAMPLES']['value'] = child.value.s
docs['EXAMPLES']['lineno'] = child.lineno
docs['EXAMPLES']['end_lineno'] = (
child.lineno + len(child.value.s.splitlines())
)
elif grandchild.id == 'RETURN':
docs['RETURN']['value'] = child.value.s
docs['RETURN']['lineno'] = child.lineno
docs['RETURN']['end_lineno'] = (
child.lineno + len(child.value.s.splitlines())
)
elif grandchild.id == 'ANSIBLE_METADATA':
docs['ANSIBLE_METADATA']['value'] = child.value
docs['ANSIBLE_METADATA']['lineno'] = child.lineno
try:
docs['ANSIBLE_METADATA']['end_lineno'] = (
child.lineno + len(child.value.s.splitlines())
)
except AttributeError:
docs['ANSIBLE_METADATA']['end_lineno'] = (
child.value.values[-1].lineno
)
return docs
def _validate_docs_schema(self, doc, schema, name, error_code):
# TODO: Add line/col
errors = []
try:
schema(doc)
except Exception as e:
for error in e.errors:
error.data = doc
errors.extend(e.errors)
for error in errors:
path = [str(p) for p in error.path]
if isinstance(error.data, dict):
error_message = humanize_error(error.data, error)
else:
error_message = error
self.reporter.error(
path=self.object_path,
code=error_code,
msg='%s.%s: %s' % (name, '.'.join(path), error_message)
)
def _validate_docs(self):
doc_info = self._get_docs()
deprecated = False
if not bool(doc_info['DOCUMENTATION']['value']):
self.reporter.error(
path=self.object_path,
code=301,
msg='No DOCUMENTATION provided'
)
else:
doc, errors, traces = parse_yaml(
doc_info['DOCUMENTATION']['value'],
doc_info['DOCUMENTATION']['lineno'],
self.name, 'DOCUMENTATION'
)
for error in errors:
self.reporter.error(
path=self.object_path,
code=302,
**error
)
for trace in traces:
self.reporter.trace(
path=self.object_path,
tracebk=trace
)
if not errors and not traces:
with CaptureStd():
try:
get_docstring(self.path, fragment_loader, verbose=True)
except AssertionError:
fragment = doc['extends_documentation_fragment']
self.reporter.error(
path=self.object_path,
code=303,
msg='DOCUMENTATION fragment missing: %s' % fragment
)
except Exception:
self.reporter.trace(
path=self.object_path,
tracebk=traceback.format_exc()
)
self.reporter.error(
path=self.object_path,
code=304,
msg='Unknown DOCUMENTATION error, see TRACE'
)
if 'options' in doc and doc['options'] is None:
self.reporter.error(
path=self.object_path,
code=320,
msg='DOCUMENTATION.options must be a dictionary/hash when used',
)
if self.object_name.startswith('_') and not os.path.islink(self.object_path):
deprecated = True
if 'deprecated' not in doc or not doc.get('deprecated'):
self.reporter.error(
path=self.object_path,
code=318,
msg='Module deprecated, but DOCUMENTATION.deprecated is missing'
)
if os.path.islink(self.object_path):
# This module has an alias, which we can tell as it's a symlink
# Rather than checking for `module: $filename` we need to check against the true filename
self._validate_docs_schema(doc, doc_schema(os.readlink(self.object_path).split('.')[0]), 'DOCUMENTATION', 305)
else:
# This is the normal case
self._validate_docs_schema(doc, doc_schema(self.object_name.split('.')[0]), 'DOCUMENTATION', 305)
self._check_version_added(doc)
self._check_for_new_args(doc)
if not bool(doc_info['EXAMPLES']['value']):
self.reporter.error(
path=self.object_path,
code=310,
msg='No EXAMPLES provided'
)
else:
_, errors, traces = parse_yaml(doc_info['EXAMPLES']['value'],
doc_info['EXAMPLES']['lineno'],
self.name, 'EXAMPLES', load_all=True)
for error in errors:
self.reporter.error(
path=self.object_path,
code=311,
**error
)
for trace in traces:
self.reporter.trace(
path=self.object_path,
tracebk=trace
)
if not bool(doc_info['RETURN']['value']):
if self._is_new_module():
self.reporter.error(
path=self.object_path,
code=312,
msg='No RETURN provided'
)
else:
self.reporter.warning(
path=self.object_path,
code=312,
msg='No RETURN provided'
)
else:
data, errors, traces = parse_yaml(doc_info['RETURN']['value'],
doc_info['RETURN']['lineno'],
self.name, 'RETURN')
if data:
for ret_key in data:
self._validate_docs_schema(data[ret_key], return_schema(data[ret_key]), 'RETURN.%s' % ret_key, 319)
for error in errors:
self.reporter.error(
path=self.object_path,
code=313,
**error
)
for trace in traces:
self.reporter.trace(
path=self.object_path,
tracebk=trace
)
if not bool(doc_info['ANSIBLE_METADATA']['value']):
self.reporter.error(
path=self.object_path,
code=314,
msg='No ANSIBLE_METADATA provided'
)
else:
metadata = None
if isinstance(doc_info['ANSIBLE_METADATA']['value'], ast.Dict):
metadata = ast.literal_eval(
doc_info['ANSIBLE_METADATA']['value']
)
else:
metadata, errors, traces = parse_yaml(
doc_info['ANSIBLE_METADATA']['value'].s,
doc_info['ANSIBLE_METADATA']['lineno'],
self.name, 'ANSIBLE_METADATA'
)
for error in errors:
self.reporter.error(
path=self.object_path,
code=315,
**error
)
for trace in traces:
self.reporter.trace(
path=self.object_path,
tracebk=trace
)
if metadata:
self._validate_docs_schema(metadata, metadata_1_1_schema(deprecated),
'ANSIBLE_METADATA', 316)
return doc_info
def _check_version_added(self, doc):
if not self._is_new_module():
return
try:
version_added = StrictVersion(str(doc.get('version_added', '0.0') or '0.0'))
except ValueError:
version_added = doc.get('version_added', '0.0')
self.reporter.error(
path=self.object_path,
code=306,
msg='version_added is not a valid version number: %r' % version_added
)
return
should_be = '.'.join(ansible_version.split('.')[:2])
strict_ansible_version = StrictVersion(should_be)
if (version_added < strict_ansible_version or
strict_ansible_version < version_added):
self.reporter.error(
path=self.object_path,
code=307,
msg='version_added should be %s. Currently %s' % (should_be, version_added)
)
def _validate_argument_spec(self):
if not self.analyze_arg_spec:
return
try:
spec = get_argument_spec(self.path)
except AnsibleModuleImportError:
self.reporter.error(
path=self.object_path,
code=321,
msg='Exception attempting to import module for argument_spec introspection'
)
self.reporter.trace(
path=self.object_path,
tracebk=traceback.format_exc()
)
return
for arg, data in spec.items():
if data.get('required') and data.get('default', object) != object:
self.reporter.error(
path=self.object_path,
code=317,
msg=('"%s" is marked as required but specifies '
'a default. Arguments with a default '
'should not be marked as required' % arg)
)
def _check_for_new_args(self, doc):
if not self.base_branch or self._is_new_module():
return
with CaptureStd():
try:
existing_doc = get_docstring(self.base_module, fragment_loader, verbose=True)[0]
existing_options = existing_doc.get('options', {}) or {}
except AssertionError:
fragment = doc['extends_documentation_fragment']
self.reporter.warning(
path=self.object_path,
code=392,
msg='Pre-existing DOCUMENTATION fragment missing: %s' % fragment
)
return
except Exception as e:
self.reporter.warning_trace(
path=self.object_path,
tracebk=e
)
self.reporter.warning(
path=self.object_path,
code=391,
msg=('Unknown pre-existing DOCUMENTATION '
'error, see TRACE. Submodule refs may '
'need updated')
)
return
try:
mod_version_added = StrictVersion(
str(existing_doc.get('version_added', '0.0'))
)
except ValueError:
mod_version_added = StrictVersion('0.0')
options = doc.get('options', {}) or {}
should_be = '.'.join(ansible_version.split('.')[:2])
strict_ansible_version = StrictVersion(should_be)
for option, details in options.items():
try:
names = [option] + details.get('aliases', [])
except (TypeError, AttributeError):
# Reporting of this syntax error will be handled by schema validation.
continue
if any(name in existing_options for name in names):
continue
try:
version_added = StrictVersion(
str(details.get('version_added', '0.0'))
)
except ValueError:
version_added = details.get('version_added', '0.0')
self.reporter.error(
path=self.object_path,
code=308,
msg=('version_added for new option (%s) '
'is not a valid version number: %r' %
(option, version_added))
)
continue
except Exception:
# If there is any other exception it should have been caught
# in schema validation, so we won't duplicate errors by
# listing it again
continue
if (strict_ansible_version != mod_version_added and
(version_added < strict_ansible_version or
strict_ansible_version < version_added)):
self.reporter.error(
path=self.object_path,
code=309,
msg=('version_added for new option (%s) should '
'be %s. Currently %s' %
(option, should_be, version_added))
)
@staticmethod
def is_blacklisted(path):
base_name = os.path.basename(path)
file_name, _ = os.path.splitext(base_name)
if file_name.startswith('_') and os.path.islink(path):
return True
if not frozenset((base_name, file_name)).isdisjoint(ModuleValidator.BLACKLIST):
return True
for pat in ModuleValidator.BLACKLIST_PATTERNS:
if fnmatch(base_name, pat):
return True
return False
def validate(self):
super(ModuleValidator, self).validate()
if not self._python_module() and not self._powershell_module():
self.reporter.error(
path=self.object_path,
code=501,
msg=('Official Ansible modules must have a .py '
'extension for python modules or a .ps1 '
'for powershell modules')
)
self._python_module_override = True
if self._python_module() and self.ast is None:
self.reporter.error(
path=self.object_path,
code=401,
msg='Python SyntaxError while parsing module'
)
try:
compile(self.text, self.path, 'exec')
except Exception:
self.reporter.trace(
path=self.object_path,
tracebk=traceback.format_exc()
)
return
if self._python_module():
doc_info = self._validate_docs()
if self._python_module() and not self._just_docs():
self._validate_argument_spec()
self._check_for_sys_exit()
self._find_blacklist_imports()
main = self._find_main_call()
self._find_module_utils(main)
self._find_has_import()
self._check_for_tabs()
first_callable = self._get_first_callable()
self._ensure_imports_below_docs(doc_info, first_callable)
if self._powershell_module():
self._validate_ps_replacers()
self._find_ps_docs_py_file()
self._check_gpl3_header()
if not self._just_docs():
self._check_interpreter(powershell=self._powershell_module())
self._check_type_instead_of_isinstance(
powershell=self._powershell_module()
)
class PythonPackageValidator(Validator):
BLACKLIST_FILES = frozenset(('__pycache__',))
def __init__(self, path, reporter=None):
super(PythonPackageValidator, self).__init__(reporter=reporter or Reporter())
self.path = path
self.basename = os.path.basename(path)
@property
def object_name(self):
return self.basename
@property
def object_path(self):
return self.path
def validate(self):
super(PythonPackageValidator, self).validate()
if self.basename in self.BLACKLIST_FILES:
return
init_file = os.path.join(self.path, '__init__.py')
if not os.path.exists(init_file):
self.reporter.error(
path=self.object_path,
code=502,
msg='Ansible module subdirectories must contain an __init__.py'
)
def re_compile(value):
"""
Argparse expects things to raise TypeError, re.compile raises an re.error
exception
This function is a shorthand to convert the re.error exception to a
TypeError
"""
try:
return re.compile(value)
except re.error as e:
raise TypeError(e)
def main():
parser = argparse.ArgumentParser(prog="validate-modules")
parser.add_argument('modules', nargs='+',
help='Path to module or module directory')
parser.add_argument('-w', '--warnings', help='Show warnings',
action='store_true')
parser.add_argument('--exclude', help='RegEx exclusion pattern',
type=re_compile)
parser.add_argument('--arg-spec', help='Analyze module argument spec',
action='store_true', default=False)
parser.add_argument('--base-branch', default=None,
help='Used in determining if new options were added')
parser.add_argument('--format', choices=['json', 'plain'], default='plain',
help='Output format. Default: "%(default)s"')
parser.add_argument('--output', default='-',
help='Output location, use "-" for stdout. '
'Default "%(default)s"')
args = parser.parse_args()
args.modules[:] = [m.rstrip('/') for m in args.modules]
reporter = Reporter()
git_cache = GitCache(args.base_branch)
check_dirs = set()
for module in args.modules:
if os.path.isfile(module):
path = module
if args.exclude and args.exclude.search(path):
continue
if ModuleValidator.is_blacklisted(path):
continue
with ModuleValidator(path, analyze_arg_spec=args.arg_spec,
base_branch=args.base_branch, git_cache=git_cache, reporter=reporter) as mv:
mv.validate()
check_dirs.add(os.path.dirname(path))
for root, dirs, files in os.walk(module):
basedir = root[len(module) + 1:].split('/', 1)[0]
if basedir in BLACKLIST_DIRS:
continue
for dirname in dirs:
if root == module and dirname in BLACKLIST_DIRS:
continue
path = os.path.join(root, dirname)
if args.exclude and args.exclude.search(path):
continue
check_dirs.add(path)
for filename in files:
path = os.path.join(root, filename)
if args.exclude and args.exclude.search(path):
continue
if ModuleValidator.is_blacklisted(path):
continue
with ModuleValidator(path, analyze_arg_spec=args.arg_spec,
base_branch=args.base_branch, git_cache=git_cache, reporter=reporter) as mv:
mv.validate()
for path in sorted(check_dirs):
pv = PythonPackageValidator(path, reporter=reporter)
pv.validate()
if args.format == 'plain':
sys.exit(reporter.plain(warnings=args.warnings, output=args.output))
else:
sys.exit(reporter.json(warnings=args.warnings, output=args.output))
class GitCache(object):
def __init__(self, base_branch):
self.base_branch = base_branch
if self.base_branch:
self.base_tree = self._git(['ls-tree', '-r', '--name-only', self.base_branch, 'lib/ansible/modules/'])
else:
self.base_tree = []
try:
self.head_tree = self._git(['ls-tree', '-r', '--name-only', 'HEAD', 'lib/ansible/modules/'])
except GitError as ex:
if ex.status == 128:
# fallback when there is no .git directory
self.head_tree = self._get_module_files()
else:
raise
except OSError as ex:
if ex.errno == errno.ENOENT:
# fallback when git is not installed
self.head_tree = self._get_module_files()
else:
raise
self.base_module_paths = dict((os.path.basename(p), p) for p in self.base_tree if os.path.splitext(p)[1] in ('.py', '.ps1'))
self.base_module_paths.pop('__init__.py', None)
self.head_aliased_modules = set()
for path in self.head_tree:
filename = os.path.basename(path)
if filename.startswith('_') and filename != '__init__.py':
if os.path.islink(path):
self.head_aliased_modules.add(os.path.basename(os.path.realpath(path)))
@staticmethod
def _get_module_files():
module_files = []
for (dir_path, dir_names, file_names) in os.walk('lib/ansible/modules/'):
for file_name in file_names:
module_files.append(os.path.join(dir_path, file_name))
return module_files
@staticmethod
def _git(args):
cmd = ['git'] + args
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise GitError(stderr, p.returncode)
return stdout.decode('utf-8').splitlines()
class GitError(Exception):
def __init__(self, message, status):
super(GitError, self).__init__(message)
self.status = status
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
gpl-3.0
| 8,874,566,115,994,565,000
| 35.585044
| 132
| 0.487936
| false
| 4.688275
| false
| false
| false
|
sixdub/Minions
|
scans/models.py
|
1
|
1839
|
from django.db import models
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
import re
# Create your models here.
class Scan(models.Model):
name=models.CharField(max_length=200,default="")
hosts=models.TextField(default="")
profile=models.ForeignKey("Scan_Profile", related_name="scanprofile")
user = models.ForeignKey(User,blank=True, null=True, related_name="user")
version =models.CharField(max_length=100, blank=True, null=True)
summary=models.TextField(blank=True, null=True)
finished=models.BooleanField(default=False)
def __unicode__(self):
return self.args
#only allow ip addresses and properly formatted host names to pass through. allow comma separated and split by line.
def isvalid(self, el):
el = el.rstrip()
fqdn = re.findall("(?=^.{4,255}$)(^((?!-)[a-zA-Z0-9-]{0,62}[a-zA-Z0-9]\.)+[a-zA-Z]{2,63}$)", el)
ips = re.findall("(?:[0-9]{1,3}\.){3}[0-9]{1,3}", el)
if len(ips) + len(fqdn) <= 0:
raise ValidationError("Proper FQDN or IP not provided")
def clean(self):
for line in self.hosts.split("\n"): #if your hosts field can have multiple lines, you can remove this
elems = line.split(",")#creates an array from comma separated values
if line:
for el in elems:
self.isvalid(el)
class Scan_Profile(models.Model):
name=models.CharField(max_length=100, default="", unique=True)
author=models.ForeignKey(User, related_name="profile_author")
cmdline=models.TextField(default="")
def __unicode__(self):
return self.name
#dont allow any output format. We handle that :)
def clean(self):
if "nmap" in self.cmdline:
raise ValidationError('Do not place "nmap" in the command line arguments!')
m = re.findall("-o[A-Z]", self.cmdline)
if m:
raise ValidationError('No "-o" flags... We will decide the output for you!')
|
gpl-2.0
| -5,329,743,307,292,903,000
| 35.058824
| 118
| 0.703643
| false
| 3.17069
| false
| false
| false
|
kubevirt/client-python
|
kubevirt/models/v1_domain_spec.py
|
1
|
10040
|
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1DomainSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'chassis': 'V1Chassis',
'clock': 'V1Clock',
'cpu': 'V1CPU',
'devices': 'V1Devices',
'features': 'V1Features',
'firmware': 'V1Firmware',
'io_threads_policy': 'str',
'machine': 'V1Machine',
'memory': 'V1Memory',
'resources': 'V1ResourceRequirements'
}
attribute_map = {
'chassis': 'chassis',
'clock': 'clock',
'cpu': 'cpu',
'devices': 'devices',
'features': 'features',
'firmware': 'firmware',
'io_threads_policy': 'ioThreadsPolicy',
'machine': 'machine',
'memory': 'memory',
'resources': 'resources'
}
def __init__(self, chassis=None, clock=None, cpu=None, devices=None, features=None, firmware=None, io_threads_policy=None, machine=None, memory=None, resources=None):
"""
V1DomainSpec - a model defined in Swagger
"""
self._chassis = None
self._clock = None
self._cpu = None
self._devices = None
self._features = None
self._firmware = None
self._io_threads_policy = None
self._machine = None
self._memory = None
self._resources = None
if chassis is not None:
self.chassis = chassis
if clock is not None:
self.clock = clock
if cpu is not None:
self.cpu = cpu
self.devices = devices
if features is not None:
self.features = features
if firmware is not None:
self.firmware = firmware
if io_threads_policy is not None:
self.io_threads_policy = io_threads_policy
if machine is not None:
self.machine = machine
if memory is not None:
self.memory = memory
if resources is not None:
self.resources = resources
@property
def chassis(self):
"""
Gets the chassis of this V1DomainSpec.
Chassis specifies the chassis info passed to the domain.
:return: The chassis of this V1DomainSpec.
:rtype: V1Chassis
"""
return self._chassis
@chassis.setter
def chassis(self, chassis):
"""
Sets the chassis of this V1DomainSpec.
Chassis specifies the chassis info passed to the domain.
:param chassis: The chassis of this V1DomainSpec.
:type: V1Chassis
"""
self._chassis = chassis
@property
def clock(self):
"""
Gets the clock of this V1DomainSpec.
Clock sets the clock and timers of the vmi.
:return: The clock of this V1DomainSpec.
:rtype: V1Clock
"""
return self._clock
@clock.setter
def clock(self, clock):
"""
Sets the clock of this V1DomainSpec.
Clock sets the clock and timers of the vmi.
:param clock: The clock of this V1DomainSpec.
:type: V1Clock
"""
self._clock = clock
@property
def cpu(self):
"""
Gets the cpu of this V1DomainSpec.
CPU allow specified the detailed CPU topology inside the vmi.
:return: The cpu of this V1DomainSpec.
:rtype: V1CPU
"""
return self._cpu
@cpu.setter
def cpu(self, cpu):
"""
Sets the cpu of this V1DomainSpec.
CPU allow specified the detailed CPU topology inside the vmi.
:param cpu: The cpu of this V1DomainSpec.
:type: V1CPU
"""
self._cpu = cpu
@property
def devices(self):
"""
Gets the devices of this V1DomainSpec.
Devices allows adding disks, network interfaces, and others
:return: The devices of this V1DomainSpec.
:rtype: V1Devices
"""
return self._devices
@devices.setter
def devices(self, devices):
"""
Sets the devices of this V1DomainSpec.
Devices allows adding disks, network interfaces, and others
:param devices: The devices of this V1DomainSpec.
:type: V1Devices
"""
if devices is None:
raise ValueError("Invalid value for `devices`, must not be `None`")
self._devices = devices
@property
def features(self):
"""
Gets the features of this V1DomainSpec.
Features like acpi, apic, hyperv, smm.
:return: The features of this V1DomainSpec.
:rtype: V1Features
"""
return self._features
@features.setter
def features(self, features):
"""
Sets the features of this V1DomainSpec.
Features like acpi, apic, hyperv, smm.
:param features: The features of this V1DomainSpec.
:type: V1Features
"""
self._features = features
@property
def firmware(self):
"""
Gets the firmware of this V1DomainSpec.
Firmware.
:return: The firmware of this V1DomainSpec.
:rtype: V1Firmware
"""
return self._firmware
@firmware.setter
def firmware(self, firmware):
"""
Sets the firmware of this V1DomainSpec.
Firmware.
:param firmware: The firmware of this V1DomainSpec.
:type: V1Firmware
"""
self._firmware = firmware
@property
def io_threads_policy(self):
"""
Gets the io_threads_policy of this V1DomainSpec.
Controls whether or not disks will share IOThreads. Omitting IOThreadsPolicy disables use of IOThreads. One of: shared, auto
:return: The io_threads_policy of this V1DomainSpec.
:rtype: str
"""
return self._io_threads_policy
@io_threads_policy.setter
def io_threads_policy(self, io_threads_policy):
"""
Sets the io_threads_policy of this V1DomainSpec.
Controls whether or not disks will share IOThreads. Omitting IOThreadsPolicy disables use of IOThreads. One of: shared, auto
:param io_threads_policy: The io_threads_policy of this V1DomainSpec.
:type: str
"""
self._io_threads_policy = io_threads_policy
@property
def machine(self):
"""
Gets the machine of this V1DomainSpec.
Machine type.
:return: The machine of this V1DomainSpec.
:rtype: V1Machine
"""
return self._machine
@machine.setter
def machine(self, machine):
"""
Sets the machine of this V1DomainSpec.
Machine type.
:param machine: The machine of this V1DomainSpec.
:type: V1Machine
"""
self._machine = machine
@property
def memory(self):
"""
Gets the memory of this V1DomainSpec.
Memory allow specifying the VMI memory features.
:return: The memory of this V1DomainSpec.
:rtype: V1Memory
"""
return self._memory
@memory.setter
def memory(self, memory):
"""
Sets the memory of this V1DomainSpec.
Memory allow specifying the VMI memory features.
:param memory: The memory of this V1DomainSpec.
:type: V1Memory
"""
self._memory = memory
@property
def resources(self):
"""
Gets the resources of this V1DomainSpec.
Resources describes the Compute Resources required by this vmi.
:return: The resources of this V1DomainSpec.
:rtype: V1ResourceRequirements
"""
return self._resources
@resources.setter
def resources(self, resources):
"""
Sets the resources of this V1DomainSpec.
Resources describes the Compute Resources required by this vmi.
:param resources: The resources of this V1DomainSpec.
:type: V1ResourceRequirements
"""
self._resources = resources
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1DomainSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
apache-2.0
| -6,167,048,185,573,170,000
| 25.560847
| 170
| 0.566036
| false
| 4.286934
| false
| false
| false
|
ConnectedSystems/veneer-py
|
veneer/navigate.py
|
1
|
2140
|
'''
Prototype functionality for interacting with the Source model directly, including tab-completion in IPython/Jupyter. Eg
v = veneer.Veneer()
scenario = Queryable(v)
scenario.Name = 'New Scenario Name'
'''
class Queryable(object):
def __init__(self,v,path='scenario',namespace=None):
self._v = v
self._path = path
self._init = False
self._ns = namespace
def _eval_(self):
return self._v.model.get(self._path,namespace=self._ns)
def _child_(self,path):
val = Queryable(self._v,'%s.%s'%(self._path,path),namespace=self._ns)
return val
def _double_quote_(self,maybe_string):
v = maybe_string
if not isinstance(v,str):
return v
if not "'" in v:
return "'%s'"%v
if not '"' in v:
return '"%s"'%v
v = v.replace('"','\\"')
return '"%s"'%v
def _child_idx_(self,ix):
return Queryable(self._v,'%s[%s]'%(self._path,str(ix)),namespace=self._ns)
def _initialise_children_(self,entries):
if self._init: return
self._init = True
for r in entries:
if r[:2]=='__': continue
super(Queryable,self).__setattr__(r,self._child_(r))
def _run_script(self,script):
return self._v.model._safe_run('%s\n%s'%(self._v.model._init_script(self._ns),script))
def __call__(self,*args,**kwargs):
return self._v.model.call(self._path+str(tuple(args)))
def __repr__(self):
return str(self._eval_())
def __dir__(self):
res = [e['Value'] for e in self._run_script('dir(%s)'%(self._path))['Response']['Value']]
self._initialise_children_(res)
return res
def __getattr__(self,attrname):
return self._child_(attrname)
def __getitem__(self,ix):
return self._child_idx_(ix)
def __setattr__(self,a,v):
if a.startswith('_'):
return super(Queryable,self).__setattr__(a,v)
v = self._double_quote_(v)
if not self._v.model.set('%s.%s'%(self._path,a),v):
raise Exception("Couldn't set property")
|
isc
| -557,938,653,125,760,300
| 28.722222
| 119
| 0.550935
| false
| 3.548922
| false
| false
| false
|
pwwang/bioprocs
|
bioprocs/utils/shell2.py
|
1
|
1995
|
import sys
from modkit import Modkit
import cmdy
DEFAULT_CONFIG = dict(
default = dict(_raise = True),
bedtools = dict(_prefix = '-'),
biobambam = dict(_sep = '=', _prefix = ''),
bowtie2 = dict(_dupkey = True),
dtoxog = dict(_out = cmdy.DEVERR, _prefix = '-'),
sort = dict(_sep = '', _dupkey = True),
gatk3 = dict(_dupkey = True),
hla_la = dict(_raw = True),
liftover = dict(_prefix = '-', _sep = '='),
oncotator = dict(_sep = 'auto'),
optitype = dict(_dupkey = False),
maf2vcf = dict(_sep = ' '),
netmhc = dict(_prefix = '-'),
# As of picard 2.20.5-SNAPSHOT
# it's changing in the futher. See: https://github.com/broadinstitute/picard/wiki/Command-Line-Syntax-Transition-For-Users-(Pre-Transition)
# Future one should be:
# picard = dict(_sep = ' ', _prefix = '-')
picard = dict(_sep = '=', _prefix = ''),
plink = dict(_out = cmdy.DEVERR),
pyclone = dict(_raw = True),
razers3 = dict(_prefix = '-'),
snpeff = dict(_prefix = '-'),
vcfanno = dict(_prefix = '-'),
vep = dict(_dupkey = True, _raw = True),
)
cmdy.config._load(DEFAULT_CONFIG)
def _modkit_delegate(name):
return getattr(cmdy, name)
# run command at foreground
fg = cmdy(_fg = True, _debug = True)
bg = cmdy(_bg = True, _debug = True)
out = cmdy(_out = '>')
pipe = cmdy(_pipe = True)
## aliases
rm_rf = cmdy.rm.bake(r = True, f = True)
ln_s = cmdy.ln.bake(s = True)
kill_9 = cmdy.kill.bake(s = 9)
wc_l = cmdy.wc.bake(l = True)
cp = copy = cmdy.cp
mv = move = cmdy.mv
which = lambda x: cmdy.which(x).strip()
runcmd = lambda cmd: cmdy.bash(c = cmd)
def load_config(conf = None, **kwargs):
conf = conf or {}
conf.update(kwargs)
conf2load = {'default': DEFAULT_CONFIG['default']}
for key, val in conf.items():
conf2load[key] = DEFAULT_CONFIG.get(key, {}).copy()
conf2load[key].update(val if isinstance(val, dict) else {'_exe': val})
cmdy.config._load(conf2load)
fg.config._load(conf2load)
out.config._load(conf2load)
Modkit()
|
mit
| 2,583,899,961,950,900,000
| 28.776119
| 140
| 0.606015
| false
| 2.604439
| true
| false
| false
|
otuncelli/Xpert-Screen-Recorder
|
src/main.py
|
1
|
16935
|
# -*- coding: utf-8 -*-
# =============================================================================
# Xpert Screen Recorder
# Copyright (C) 2013 OSMAN TUNCELLI
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
import singleton, logging
singleton.logger.setLevel(logging.CRITICAL)
singleton.SingleInstance()
import pygtk
pygtk.require('2.0')
import gtk, os, sys, subprocess, operator, signal, webbrowser
from datetime import datetime
from collections import OrderedDict
from ConfigParser import ConfigParser
DEBUG_MODE = False
if not DEBUG_MODE:
sys.stderr = open(os.path.devnull, 'w')
LANG = 'en'
def T(s):
dic = {'Xpert Screen Recorder' : u'Xpert Ekran Görüntüsü Kaydedici',
'Start Recording' : u'Kaydı Başlat',
'Stop Recording' : u'Kaydı Durdur',
'Settings' : 'Ayarlar',
'About' : u'Hakkında',
'Exit' : u'Çıkış',
'Resolution' : u'Çözünürlük',
'Frame rate' : u'Çerçeve hızı',
'Language' : u'Arayüz Dili',
'Save To' : u'Kayıt Yeri',
'Xpert Screen Recorder is a multi-platform screencast recorder.' : u'Xpert Ekran Görüntüsü Kaydedici, ekran görüntüsünü çeşitli platformlarda kaydedebilen bir araçtır.',
'All Done! Do you want to watch the recorded video now?' : u'Tamamlandı! Kaydedilen görüntüyü şimdi izlemek ister misiniz?' }
return (dic[s] if LANG == 'tr' else s)
class Settings(object):
def __init__(self, screen_size, inifile = 'settings.ini'):
self.defaults = { 'framerate' : 30, 'resolution' : screen_size, 'saveto' : os.path.expanduser('~'), 'lang' : 'en' }
self.active = self.defaults.copy()
self.screen_size = screen_size
self.dialog_shown = False
self.valid_framerates = (15,25,30)
self._set_valid_resolutions()
self.valid_languages = OrderedDict((('en', 'English'), ('tr', u'Türkçe')))
self.inifile = inifile
self.cp = ConfigParser()
if os.path.isfile(inifile):
self.cp.read(inifile)
self.correct(self.cp._defaults)
self.active = self.cp._defaults.copy()
else:
self.cp._defaults = self.defaults.copy()
with open(inifile, 'w') as fp:
self.cp.write(fp)
def correct(self, d):
try:
d['framerate'] = int(d['framerate'])
assert d['framerate'] in self.valid_framerates
except:
d['framerate'] = self.defaults['framerate']
try:
d['resolution'] = eval(d['resolution'])
assert d['resolution'] in self.valid_resolutions
except:
d['resolution'] = self.defaults['resolution']
try:
assert os.path.isdir(d['saveto'])
except:
d['saveto'] = self.defaults['saveto']
try:
assert d['lang'] in ('tr', 'en')
except:
d['lang'] = 'en'
def _set_valid_resolutions(self):
width_array = (1920, 1680, 1280, 960)
aspect_ratio = operator.truediv(*self.screen_size)
self.valid_resolutions = tuple((w, int(w / aspect_ratio)) for w in width_array if w <= self.screen_size[0])
def set_framerate(self, framerate):
self.active['framerate'] = int(framerate)
def set_resolution(self, res):
if isinstance(res, basestring):
self.active['resolution'] = tuple(res.split('x'))
else:
self.active['resolution'] = tuple(res)
def set_saveto(self, saveto):
self.active['saveto'] = saveto
def get_framerate(self):
return self.active['framerate']
def get_resolution(self):
return self.active['resolution']
def get_saveto(self):
return self.active['saveto']
def get_language(self):
return self.active['lang']
def show_dialog(self, reload_func):
self.dialog_shown = True
self.reload_func = reload_func
dialog = gtk.Dialog()
dialog.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_UTILITY)
dialog.set_size_request(250,250)
dialog.set_resizable(False)
dialog.set_position(gtk.WIN_POS_CENTER)
label_settings = gtk.Label()
label_resolution = gtk.Label()
label_framerate = gtk.Label()
label_language = gtk.Label()
def set_settings_texts():
dialog.set_title(T('Settings'))
label_settings.set_markup('<span font_family="Verdana" weight="heavy" size="x-large">' + dialog.get_title() + '</span>')
label_resolution.set_text(T('Resolution') + ' :')
label_framerate.set_text(T('Frame rate') + ' :')
label_language.set_text(T('Language') + ' :')
set_settings_texts()
store_resolution = gtk.ListStore(str)
store_framerate = gtk.ListStore(str)
store_language = gtk.ListStore(str)
for v in self.valid_languages.values():
store_language.append([v])
renderer = gtk.CellRendererText()
renderer.set_alignment(1, 0.5)
for vr in self.valid_resolutions:
store_resolution.append(['x'.join(map(str, vr))])
self.combo_resolution = gtk.ComboBox(store_resolution)
self.combo_resolution.pack_start(renderer)
self.combo_resolution.add_attribute(renderer, 'text', 0)
self.combo_resolution.set_active(self.valid_resolutions.index(self.get_resolution()))
for fr in self.valid_framerates:
store_framerate.append([fr])
self.combo_framerate = gtk.ComboBox(store_framerate)
self.combo_framerate.pack_start(renderer)
self.combo_framerate.add_attribute(renderer, 'text', 0)
self.combo_framerate.set_active(self.valid_framerates.index(self.get_framerate()))
self.combo_language = gtk.ComboBox(store_language)
self.combo_language.pack_start(renderer)
self.combo_language.add_attribute(renderer, 'text', 0)
self.combo_language.set_active(self.valid_languages.keys().index(self.get_language()))
button_browse = gtk.Button(T('Save To'))
button_okay = gtk.Button(stock=gtk.STOCK_OK)
button_okay.set_size_request(40, -1)
button_cancel = gtk.Button(stock=gtk.STOCK_CANCEL)
button_cancel.set_size_request(40, -1)
padding = 5
table = gtk.Table(rows=3, columns=2, homogeneous=False)
xyoptions = dict(xoptions=0, yoptions=0, xpadding=padding, ypadding=padding)
table.attach(label_resolution, 0, 1, 0, 1, **xyoptions)
table.attach(self.combo_resolution, 1, 2, 0, 1, xoptions=gtk.FILL|gtk.EXPAND, xpadding=padding, ypadding=padding)
table.attach(label_framerate, 0, 1, 1, 2, **xyoptions)
table.attach(self.combo_framerate, 1, 2, 1, 2, xoptions=gtk.FILL|gtk.EXPAND, xpadding=padding, ypadding=padding)
table.attach(label_language, 0, 1, 2, 3, **xyoptions)
table.attach(self.combo_language, 1, 2, 2, 3, xoptions=gtk.FILL|gtk.EXPAND, xpadding=padding, ypadding=padding)
table.attach(button_browse, 1, 2, 3, 4, xoptions=gtk.FILL|gtk.EXPAND, xpadding=padding, ypadding=padding)
vb = dialog.vbox
vb.pack_start(label_settings, 1, 0, padding)
vb.pack_start(table, 0, 0, padding)
hb = gtk.HBox(homogeneous=False, spacing=0)
hb.pack_start(button_okay, 1, 1, padding)
hb.pack_start(button_cancel, 1, 1, padding)
vb.pack_start(hb, 0, 0, padding)
saveto = [self.get_saveto()]
def on_browse(widget, saveto):
fc = gtk.FileChooserDialog(T('Save To'), dialog,
gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER|gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OK, gtk.RESPONSE_OK))
if os.path.isdir(saveto[0]):
fc.set_current_folder(saveto[0])
try:
response = fc.run()
if response == gtk.RESPONSE_OK:
saveto[0] = fc.get_filename()
finally:
fc.destroy()
def on_ok(widget):
global LANG
LANG = self.active['lang'] = self.valid_languages.keys()[self.combo_language.get_active()]
self.active['resolution'] = self.valid_resolutions[self.combo_resolution.get_active()]
self.active['framerate'] = self.valid_framerates[self.combo_framerate.get_active()]
self.active['saveto'] = saveto[0]
self.cp._defaults = self.active.copy()
with open(self.inifile, 'w') as fp:
self.cp.write(fp)
self.reload_func()
dialog.destroy()
def on_cancel(widget):
self.active = self.cp._defaults.copy()
dialog.destroy()
button_browse.connect('clicked', lambda w : on_browse(w,saveto))
button_okay.connect('clicked', on_ok)
button_cancel.connect('clicked', on_cancel)
dialog.show_all()
dialog.present_with_time(2)
dialog.run()
self.dialog_shown = False
class XpertScreenRecorder(object):
def __init__(self, indicator = None):
global LANG
self.app_version = "1.0"
self.app_icon = gtk.StatusIcon()
self.app_icon.set_from_stock(gtk.STOCK_MEDIA_PLAY)
self.app_icon.connect('popup-menu', self.show_popup)
self.app_icon.connect('activate', self.kill_popup)
self.settings = Settings(self._get_screen_size())
self.active = self.settings.active
LANG = self.active['lang']
self.menu = gtk.Menu()
self.mi_rec_start = gtk.MenuItem()
self.mi_rec_stop = gtk.MenuItem()
self.mi_settings = gtk.MenuItem()
self.mi_about = gtk.MenuItem()
self.mi_exit = gtk.MenuItem()
self._reload_texts()
self.mi_rec_start.set_sensitive(True)
self.mi_rec_stop.set_sensitive(False)
self.mi_rec_start.connect('activate', self.start_recording)
self.mi_rec_stop.connect('activate', self.stop_recording)
self.mi_settings.connect('activate', lambda _: self.settings.show_dialog(self._reload_texts))
self.mi_about.connect('activate', self.show_about)
self.mi_exit.connect('activate', self.exit)
for mi in (self.mi_rec_start, self.mi_rec_stop, gtk.SeparatorMenuItem(), self.mi_settings, self.mi_about, self.mi_exit):
self.menu.append(mi)
self.menu.show_all()
if indicator:
indicator.set_menu(self.menu)
self.indicator = indicator
self._recording = False
def _reload_texts(self):
self.app_title = T('Xpert Screen Recorder')
self.app_icon.set_tooltip_text('{} v{}'.format(self.app_title, self.app_version))
self.mi_rec_start.set_label(T('Start Recording'))
self.mi_rec_stop.set_label(T('Stop Recording'))
self.mi_settings.set_label(T('Settings'))
self.mi_about.set_label(T('About'))
self.mi_exit.set_label(T('Exit'))
def _get_screen_size(self):
screen = self.app_icon.get_screen()
return screen.get_width(), screen.get_height()
def is_recording(self):
return self._recording
def set_recording(self, boolean):
self._recording = boolean
self.app_icon.set_blinking(self._recording)
if self._recording:
if self.indicator:
self.indicator.set_status(appindicator.STATUS_ATTENTION)
self.app_icon.set_from_stock(gtk.STOCK_MEDIA_RECORD)
self.mi_rec_start.set_sensitive(False)
self.mi_rec_stop.set_sensitive(True)
else:
if self.indicator:
self.indicator.set_status(appindicator.STATUS_ACTIVE)
self.app_icon.set_from_stock(gtk.STOCK_MEDIA_PLAY)
delattr(self, 'p')
self.mi_rec_start.set_sensitive(True)
self.mi_rec_stop.set_sensitive(False)
def generate_filename(self):
return os.path.join(self.active['saveto'], datetime.now().strftime("%Y_%m_%d_%H_%M_%S") + ".mp4")
def start_recording(self, widget):
framerate = self.active['framerate']
rtbufsize = bufsize = 2147483647 # you can also use smaller buffer sizes
self.filename = self.generate_filename()
if sys.platform == 'win32': # ffmpeg for windows
cmdline = ['ffmpeg', '-r', framerate, '-rtbufsize', rtbufsize, '-f', 'dshow',
'-i', 'video=screen-capture-recorder:audio=virtual-audio-capturer', '-threads', 2,
'-pix_fmt', 'yuv420p','-bufsize', bufsize, '-c:v', 'libx264',
'-preset', 'ultrafast', '-tune', 'zerolatency', '-threads', 2]
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW
else:
cmdline = ['avconv', '-rtbufsize', rtbufsize, '-loglevel', 'quiet', '-f', 'alsa', '-i', 'pulse', '-f', 'x11grab',
'-s:v', 'x'.join(map(str, self._get_screen_size())), '-i', ':0.0', '-ar', '44100',
'-bufsize', bufsize, '-pix_fmt', 'yuv420p', '-c:v', 'libx264', '-c:a', 'libvo_aacenc',
'-preset', 'ultrafast', '-tune', 'zerolatency', '-threads', 2]
startupinfo = None
if not DEBUG_MODE:
cmdline += ['-loglevel', 'quiet']
if self.settings.screen_size <> self.active["resolution"]:
cmdline += ['-vf', 'scale=%d:-1' % self.active["resolution"][0], '-sws_flags', 'lanczos']
cmdline.append(self.filename)
cmdline = map(unicode, cmdline)
if DEBUG_MODE:
print ' '.join(cmdline)
self.p = subprocess.Popen(cmdline, stdin=subprocess.PIPE, startupinfo = startupinfo)
self.set_recording(True)
def stop_recording(self, widget):
if not self.is_recording():
return
if sys.platform == 'win32':
self.p.communicate('q\\n')
else:
self.p.send_signal(signal.SIGINT)
self.p.wait()
self.set_recording(False)
md = gtk.MessageDialog(None, 0, gtk.MESSAGE_QUESTION, gtk.BUTTONS_YES_NO, T('All Done! Do you want to watch the recorded video now?'))
md.set_position(gtk.WIN_POS_CENTER)
response = md.run()
md.destroy()
if response == gtk.RESPONSE_YES:
webbrowser.open(self.filename)
def show_about(self, widget):
about = gtk.AboutDialog()
about.set_position(gtk.WIN_POS_CENTER)
about.set_icon_name (self.app_title)
about.set_name(self.app_title)
about.set_version('v1.0')
about.set_comments(T('Xpert Screen Recorder is a multi-platform screencast recorder.'))
about.set_authors([u'Osman Tunçelli <tuncelliosman-at-gmail.com>'])
about.run()
about.destroy()
def exit(self, widget):
self.stop_recording(widget)
self.app_icon.set_visible(False)
gtk.main_quit()
def kill_popup(self, widget):
if hasattr(self, 'menu'):
self.menu.popdown()
def show_popup(self, icon, event_button, event_time):
if not self.settings.dialog_shown:
self.menu.popup(None, None, None if os.name == 'nt' else gtk.status_icon_position_menu,
event_button, event_time, self.app_icon)
main = gtk.main
if __name__ == "__main__":
indicator = None
if sys.platform == 'linux2':
import appindicator
indicator = appindicator.Indicator("Xpert", "gtk-media-play-ltr", appindicator.CATEGORY_APPLICATION_STATUS)
indicator.set_attention_icon(gtk.STOCK_MEDIA_RECORD)
indicator.set_status(appindicator.STATUS_ACTIVE)
app = XpertScreenRecorder(indicator)
app.main()
|
gpl-3.0
| -6,111,160,903,389,773,000
| 42.986979
| 180
| 0.584192
| false
| 3.608203
| false
| false
| false
|
jparal/loopy
|
loopy/io.py
|
1
|
3026
|
import tables as pt
import numpy as np
import loopy as lpy
import shutil as sh # move
import os.path as pth # exists
def readhdf5(fname, path='/'):
"""
.. py:function:: writehdf5(fname, path='/')
The function traverse HDF5 files and creates structured dictionary.
:param fname: File name to read.
:param path: Root path from where to start reading.
:rtype: loopy.struct (i.e. dictionary) or variable
"""
def _traverse_tree(h5f, path):
# Remove double slashes and the last one
path = '/'+'/'.join(filter(None, path.split('/')))
gloc = ''.join(path.rpartition('/')[0:2])
name = path.rpartition('/')[2]
# We want to read a single variable
groups = h5f.listNodes(where=gloc, classname='Group')
nodes = h5f.listNodes(where=gloc)
leafs = [n for n in nodes if n not in groups]
leaf = [n for n in leafs if n.name == name]
if len(leaf) == 1:
return leaf[0].read()
dat = lpy.struct()
for node in h5f.listNodes(where=path):
name = node._v_name
dat[name] = _traverse_tree(h5f, path+'/'+name)
return dat
h5f = pt.File(fname, 'r')
dat = _traverse_tree(h5f, path)
h5f.close()
return dat
def writehdf5(fname, data, path='/', append=False, backup=False):
"""
.. py:function:: writehdf5(fname, data, path='/', append=False)
The function writes HDF5 file using PyTables and CArray.
This is high level function which shoud handle the most common scenarios.
:param fname: name of the HDF5 file
:param path: location inside of HDF5 file (e.g. /new/Bz)
:param data: the actual data to be stored
:type data: dict or ndarray otherwise will be converted into ndarray
:param append: Should the data be appended to an existing file?
:param backup: This argument if True rename the file to .bak instead of
overwriting the file.
:rtype: none
"""
if backup and pth.exists(fname):
sh.move(fname, fname+'.bak')
mode = 'a' if append else 'w'
filters = pt.Filters(complevel=6)
h5f = pt.File(fname, mode)
# Remove double slashes and the last one
path = '/'+'/'.join(filter(None, path.split('/')))
dloc = path.rsplit('/',1)
root = dloc[0] if np.size(dloc) > 1 else '/'
root = root if root.startswith('/') else '/' + root
name = path if np.size(dloc) == 1 else dloc[1]
if isinstance(data, dict):
h5f.close()
for key in data.keys():
writehdf5(fname, data[key], path=path+'/'+key, append=True)
return
if not isinstance(data, np.ndarray):
data = np.array(data, ndmin=1)
atm = pt.Atom.from_dtype(data.dtype)
arr = h5f.createCArray(root, name, atm, data.shape, \
createparents=True, filters=filters)
arr[:] = data
h5f.close()
return
from pyhdf.SD import SD, SDC
def loadhdf4(fname,variable):
data_set = SD(fname, SDC.READ)
return data_set.select(variable)[:]
|
gpl-2.0
| -3,930,732,108,166,029,000
| 30.195876
| 77
| 0.61236
| false
| 3.466208
| false
| false
| false
|
PeterHenell/performance-dashboard
|
performance-collector2/query.py
|
1
|
1713
|
import types
class Query:
"""
Queries are a way for collectors to collect data. They are one way of getting data from the source.
query_name - the name of the query
key_column - the name of the key column in the result
Does not produces anything but are a field of source.
Only contain metadata about the query.
Source can add functions to Query for collecting data from some kind of server.
get_data_fun - the function or callable class to call in order for the query to collect data
mapping - elasticsearch mapping specific for this query. If some of
the fields from this query need to be mapped differently.
Used during init of the indexes.
non_data_fields = [] - Fields which should not be part of the delta calculations, instead be sent directly to es.
"""
def __init__(self, get_data, query_name, key_column, mapping, non_data_fields):
assert isinstance(get_data, types.FunctionType) \
or callable(get_data), "get_data must be a function or callable class"
assert len(query_name) > 0, "query_name must be a string"
assert len(key_column) > 0, "key_column must have some value"
assert type(mapping) is dict, "mapping must be a dictionary"
assert type(non_data_fields) is list, "non_data_fields must be a list"
self.query_name = query_name
self.key_column = key_column
self.mapping = mapping
self.non_data_fields = non_data_fields
self.get_data = get_data
def get_data(self):
result = self.get_data()
assert type(result) is list, "Result from get_data function must be list of dict"
return result
|
apache-2.0
| 7,730,568,456,058,332,000
| 41.825
| 117
| 0.669002
| false
| 4.167883
| false
| false
| false
|
aamlima/discobot
|
MPUtils.py
|
1
|
4400
|
import array
import os
from disco.voice.playable import (AbstractOpus, BasePlayable, BufferedIO,
OpusEncoder, YoutubeDLInput)
from disco.voice.queue import PlayableQueue
from gevent.fileobject import FileObjectThread
class YoutubeDLFInput(YoutubeDLInput):
def read(self, sz):
if sz is 0:
if not os.path.isfile(os.path.join('data', self.info['id'])):
f_obj = open(os.path.join('data', self.info['id']), 'wb')
file = FileObjectThread(f_obj, 'wb')
super(YoutubeDLFInput, self).read(0)
file.write(self._buffer.read())
file.close()
self.close()
return b''
if not self._buffer:
if os.path.isfile(os.path.join('data', self.info['id'])):
with open(os.path.join('data', self.info['id']), 'rb') as file:
self._buffer = BufferedIO(file.read())
else:
f_obj = open(os.path.join('data', self.info['id']), 'wb')
file = FileObjectThread(f_obj, 'wb')
super(YoutubeDLFInput, self).read(0)
file.write(self._buffer.read())
file.close()
self._buffer.seekable() and self._buffer.seek(0)
return self._buffer.read(sz)
def close(self):
if self._buffer:
self._buffer.close()
self._buffer = None
class UnbufferedOpusEncoderPlayable(BasePlayable, OpusEncoder, AbstractOpus):
def __init__(self, source, *args, **kwargs):
self.source = source
if hasattr(source, 'info'):
self.info = source.info
self.volume = 0.1
library_path = kwargs.pop('library_path', None)
AbstractOpus.__init__(self, *args, **kwargs)
OpusEncoder.__init__(self, self.sampling_rate,
self.channels, library_path=library_path)
self.source.read(0)
def next_frame(self):
if self.source:
raw = self.source.read(self.frame_size)
if len(raw) < self.frame_size:
self.source.close()
return None
if self.volume == 1.0:
return self.encode(raw, self.samples_per_frame)
buffer = array.array('h', raw)
for pos, byte in enumerate(buffer):
buffer[pos] = int(min(32767, max(-32767, byte * self.volume)))
return self.encode(buffer.tobytes(), self.samples_per_frame)
return None
class CircularQueue(PlayableQueue):
def get(self):
# pylint: disable=W0212
item = self._get()
if item.source and item.source._buffer and item.source._buffer.seekable():
item.source._buffer.seek(0)
self.append(item)
return item
def remove(self, index):
if len(self._data) > index:
return self._data.pop(index)
return None
def prepend(self, item):
self._data.insert(0, item)
if self._event:
self._event.set()
self._event = None
def contains(self, item, func):
for i in self._data:
if func(i, item):
return True
return False
def gen_player_data(player):
data = {}
data['paused'] = True if player.paused else False
data['volume'] = player.volume
data['duckingVolume'] = player.ducking_volume
data['autopause'] = player.autopause
data['autovolume'] = player.autovolume
data['queue'] = len(player.queue)
data['items'] = len(player.items)
data['playlist'] = [{'id': value.info['id'], 'title':value.info['title'],
'duration':value.info['duration'], 'webpageUrl':value.info['webpage_url']} for value in player.queue]
data['curItem'] = None
if player.now_playing:
data['curItem'] = {
'id': player.now_playing.info['id'],
'duration': player.now_playing.info['duration'],
'webpageUrl': player.now_playing.info['webpage_url'],
'title': player.now_playing.info['title'],
'thumbnail': player.now_playing.info['thumbnail'],
'fps': player.now_playing.sampling_rate * player.now_playing.sample_size / player.now_playing.frame_size,
'frame': player.tell_or_seek() / player.now_playing.frame_size
}
return data
|
gpl-3.0
| 3,910,258,917,146,183,000
| 33.645669
| 126
| 0.560227
| false
| 3.859649
| false
| false
| false
|
degoldschmidt/fly-analysis
|
src/experiment_stop.py
|
1
|
2820
|
"""
Experiment stop (experiment_stop.py)
This script takes a video and calculates the frame number of when
the experiment was stopped, based on overall pixel changes.
D.Goldschmidt - 09/08/16
"""
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import cv2
import os
import matplotlib.pyplot as plt ## package for plotting
__VERBOSE = True
def averag(input):
sum = 0.*input[0]
for vals in input:
sum += vals
return sum/len(input)
# Func to print out only if VERBOSE
def vprint(*arg):
if __VERBOSE:
s= " ".join( map( str, arg ) )
print(s)
# Local test
#folder = "/Users/degoldschmidt/"
#filename = "output.avi"
folder = "/Volumes/Elements/raw_data_flies/0727/"
filename="VidSave_0726_20-13.avi"
profolder = "../tmp/vid/"
if not os.path.isfile(profolder + filename):
os.system("ffmpeg -i " + folder + filename + " -vf fps=fps=4 -f avi -c:v libx264 -s 50x50 " + profolder + filename) ## maybe in fly logger
cap = cv2.VideoCapture(profolder + filename)
if not cap.isOpened():
print("Error: Could not open")
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS)
print("Open video", profolder + filename, "(","#frames:", length, "dims:", (width,height), "fps:", fps,")")
delta = []
i=0
filter = int(500/fps)
motionthr=50
frames = filter*[None]
while(i+1 < length):
if i%1000==0:
vprint(i)
# Capture frame-by-frame
ret, gray = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
# center and radius are the results of HoughCircle
# mask is a CV_8UC1 image with 0
mask = np.zeros((gray.shape[0], gray.shape[1]), dtype = "uint8")
cv2.circle( mask, (int(width/2),int(height/2)), int(width/2-width/20), (255,255,255), -1, 8, 0 )
res = np.zeros((gray.shape[0], gray.shape[1]), dtype = "uint8")
np.bitwise_and(gray, mask, res)
if i>0:
frames[(i-1)%filter] = res-oldpx
if i > filter-1:
out = averag(frames)
if __VERBOSE:
cv2.imshow('frame', out)
delta.append(sum(sum(out)))
oldpx = res
i=i+1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
ddelta = [j-i for i, j in zip(delta[:-1], delta[1:])]
plt.plot(delta[:],'k--', label="Sum of lowpass-filtered px changes")
plt.plot(ddelta[:],'r-', label= "Temp. difference")
plt.legend()
if __VERBOSE:
plt.show()
ddelta = np.asarray(ddelta)
stopframes = np.asarray(np.nonzero(ddelta > motionthr))
if stopframes.size > 0:
print("Experiment stops at frame", stopframes[0,0])
else:
print("No experiment stop detected")
|
gpl-3.0
| 2,363,480,963,356,214,000
| 27.21
| 142
| 0.63227
| false
| 2.974684
| false
| false
| false
|
Amarandus/xmppsh
|
plugins/ipdb.py
|
1
|
1331
|
import socket
import sqlite3
class Plugin:
def __init__(self, parser, sqlitecur):
self._cursor = sqlitecur
self._cursor.execute("CREATE TABLE IF NOT EXISTS IPs(Id INT, Name TEXT, IP TEXT, MUC TEXT)")
parser.registerCommand([(u"ip", ), (u"list", "List all registered IPs", self._list)])
parser.registerCommand([(u"ip", ), (u"register", "Register your IP", self._register)])
def _list(self, ignore, fromUser):
self._cursor.execute("SELECT Name, IP FROM IPs WHERE MUC=?", (fromUser.bare, ))
rows = self._cursor.fetchall()
msgtext = ""
for r in rows:
msgtext += "%s - %s\n" % (r[1], r[0])
return (msgtext, 0)
def _register(self, ip, fromUser):
try:
socket.inet_aton(ip[0])
name = fromUser.resource
muc = fromUser.bare
self._cursor.execute("UPDATE OR IGNORE IPs SET IP=? WHERE Name=? AND MUC=?", (ip[0], name, muc))
if self._cursor.rowcount == 0:
self._cursor.execute("INSERT OR IGNORE INTO IPs (IP, Name, MUC) VALUES (?, ?, ?)", (ip[0], name, muc))
return ("Your IP %s has been added" % (ip[0]), 1)
except socket.error:
return ("Your IP looks malformed", 1)
except:
return ("You omitted the IP", 1)
|
mit
| -3,799,865,405,480,094,000
| 40.59375
| 118
| 0.557476
| false
| 3.646575
| false
| false
| false
|
jpzk/evopy
|
evopy/examples/problems/TR/ORIDSESSVC.py
|
1
|
2168
|
'''
This file is part of evopy.
Copyright 2012, Jendrik Poloczek
evopy is free software: you can redistribute it
and/or modify it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
evopy is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along with
evopy. If not, see <http://www.gnu.org/licenses/>.
'''
from sys import path
path.append("../../../..")
from numpy import matrix
from sklearn.cross_validation import KFold
from evopy.strategies.ori_dses_svc import ORIDSESSVC
from evopy.problems.tr_problem import TRProblem
from evopy.simulators.simulator import Simulator
from evopy.metamodel.dses_svc_linear_meta_model import DSESSVCLinearMetaModel
from evopy.operators.scaling.scaling_standardscore import ScalingStandardscore
from evopy.metamodel.cv.svc_cv_sklearn_grid_linear import SVCCVSkGridLinear
from evopy.operators.termination.accuracy import Accuracy
def get_method():
sklearn_cv = SVCCVSkGridLinear(\
C_range = [2 ** i for i in range(-1, 14, 2)],
cv_method = KFold(20, 5))
meta_model = DSESSVCLinearMetaModel(\
window_size = 10,
scaling = ScalingStandardscore(),
crossvalidation = sklearn_cv,
repair_mode = 'mirror')
method = ORIDSESSVC(\
mu = 15,
lambd = 100,
theta = 0.3,
pi = 70,
initial_sigma = matrix([[4.5, 4.5]]),
delta = 4.5,
tau0 = 0.5,
tau1 = 0.6,
initial_pos = matrix([[10.0, 10.0]]),
beta = 1.0,
meta_model = meta_model)
return method
if __name__ == "__main__":
problem = TRProblem()
optimizer = get_method()
print optimizer.description
print problem.description
optfit = problem.optimum_fitness()
sim = Simulator(optimizer, problem, Accuracy(optfit, 10**(-3)))
results = sim.simulate()
|
gpl-3.0
| -3,507,452,398,660,205,600
| 30.42029
| 79
| 0.688653
| false
| 3.595357
| false
| false
| false
|
hyperwd/hwcram
|
vpc/migrations/0023_auto_20170926_0016.py
|
1
|
1677
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-25 16:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vpc', '0022_auto_20170926_0005'),
]
operations = [
migrations.AlterField(
model_name='createip',
name='bandwidth_charge_mode',
field=models.CharField(blank=True, choices=[('traffic', '按流量计费'), ('bandwidth', '按带宽计费')], help_text="<font color='blue'>独享带宽填写</font>,<font color='red'>共享带宽留空</font>", max_length=10, null=True, verbose_name='带宽计费方式'),
),
migrations.AlterField(
model_name='createip',
name='bandwidth_name',
field=models.CharField(blank=True, help_text="<font color='blue'>独享带宽填写</font>,<font color='red'>共享带宽留空</font", max_length=128, null=True, verbose_name='带宽名称'),
),
migrations.AlterField(
model_name='createip',
name='bandwidth_share_id',
field=models.CharField(blank=True, help_text="<font color='blue'>独享带宽留空</font>,<font color='red'>共享带宽填写</font>", max_length=40, null=True, verbose_name='共享带宽ID'),
),
migrations.AlterField(
model_name='createip',
name='bandwidth_size',
field=models.IntegerField(blank=True, help_text="<font color='blue'>独享带宽,填写数字,范围1~300M</font>,<font color='red'>共享带宽留空</font>", null=True, verbose_name='带宽大小'),
),
]
|
mit
| -2,558,818,778,893,020,000
| 42
| 230
| 0.608638
| false
| 2.802607
| false
| false
| false
|
Lemma1/MAC-POSTS
|
src/setup.py
|
1
|
2581
|
import os
import re
import sys
import platform
import subprocess
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
# print "DEBUG", os.listdir(extdir)
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
setup(
name='MNMAPI',
version='0.0.1',
author='Wei Ma',
author_email='lemma171@gmail.com',
description='A API library for MAC-POSTS (MNM)',
long_description='',
ext_modules=[CMakeExtension('MNMAPI')],
cmdclass=dict(build_ext=CMakeBuild),
zip_safe=False,
)
|
mit
| -3,688,370,340,519,691,000
| 35.871429
| 98
| 0.573809
| false
| 3.863772
| false
| false
| false
|
mishudark/indie
|
mongoforms/forms.py
|
1
|
4752
|
from mongoforms.forms import *
from .fields import MongoFormFieldGeneratorCustom
import types
from django import forms
from django.utils.datastructures import SortedDict
from mongoengine.base import BaseDocument
from mongoforms.fields import MongoFormFieldGenerator
from mongoforms.utils import mongoengine_validate_wrapper, iter_valid_fields
from mongoengine.fields import ReferenceField
class MongoFormMetaClassCustom(type):
"""Metaclass to create a new MongoForm."""
def __new__(cls, name, bases, attrs):
# get all valid existing Fields and sort them
fields = [(field_name, attrs.pop(field_name)) for field_name, obj in \
attrs.items() if isinstance(obj, forms.Field)]
fields.sort(lambda x, y: cmp(x[1].creation_counter, y[1].creation_counter))
# get all Fields from base classes
for base in bases[::-1]:
if hasattr(base, 'base_fields'):
fields = base.base_fields.items() + fields
# add the fields as "our" base fields
attrs['base_fields'] = SortedDict(fields)
# Meta class available?
if 'Meta' in attrs and hasattr(attrs['Meta'], 'document') and \
issubclass(attrs['Meta'].document, BaseDocument):
doc_fields = SortedDict()
formfield_generator = getattr(attrs['Meta'], 'formfield_generator', \
MongoFormFieldGeneratorCustom)()
# walk through the document fields
for field_name, field in iter_valid_fields(attrs['Meta']):
# add field and override clean method to respect mongoengine-validator
doc_fields[field_name] = formfield_generator.generate(field_name, field)
doc_fields[field_name].clean = mongoengine_validate_wrapper(
doc_fields[field_name].clean, field._validate)
# write the new document fields to base_fields
doc_fields.update(attrs['base_fields'])
attrs['base_fields'] = doc_fields
# maybe we need the Meta class later
attrs['_meta'] = attrs.get('Meta', object())
return super(MongoFormMetaClassCustom, cls).__new__(cls, name, bases, attrs)
class MongoFormIndie(forms.BaseForm):
"""Base MongoForm class. Used to create new MongoForms"""
__metaclass__ = MongoFormMetaClassCustom
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=forms.util.ErrorList, label_suffix=':',
empty_permitted=False, instance=None):
""" initialize the form"""
assert isinstance(instance, (types.NoneType, BaseDocument)), \
'instance must be a mongoengine document, not %s' % \
type(instance).__name__
assert hasattr(self, 'Meta'), 'Meta class is needed to use MongoForm'
# new instance or updating an existing one?
if instance is None:
if self._meta.document is None:
raise ValueError('MongoForm has no document class specified.')
self.instance = self._meta.document()
object_data = {}
self.instance._adding = True
else:
self.instance = instance
self.instance._adding = False
object_data = {}
# walk through the document fields
for field_name, field in iter_valid_fields(self._meta):
# add field data if needed
field_data = getattr(instance, field_name)
if isinstance(self._meta.document._fields[field_name], ReferenceField):
# field data could be None for not populated refs
field_data = field_data and str(field_data.id)
object_data[field_name] = field_data
# additional initial data available?
if initial is not None:
object_data.update(initial)
for field_name, field in iter_valid_fields(self._meta):
if not data.get(field_name, None) and field.default:
try:
default = field.default()
except Exception, e:
default = field.default
data[field_name] = default
self._validate_unique = False
super(MongoFormIndie, self).__init__(data, files, auto_id, prefix,
object_data, error_class, label_suffix, empty_permitted)
def save(self, commit=True):
"""save the instance or create a new one.."""
# walk through the document fields
for field_name, field in iter_valid_fields(self._meta):
setattr(self.instance, field_name, self.cleaned_data.get(field_name))
if commit:
self.instance.save()
return self.instance
|
mit
| 4,198,350,166,523,423,000
| 40.684211
| 88
| 0.616162
| false
| 4.420465
| false
| false
| false
|
TGDiamond/Diamond
|
qa/rpc-tests/getblocktemplate.py
|
1
|
3681
|
#!/usr/bin/env python
# Copyright (c) 2014 The Diamond Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listtransactions API
from test_framework import DiamondTestFramework
from diamondrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
import threading
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
# query current longpollid
templat = node.getblocktemplate()
self.longpollid = templat['longpollid']
# create a new connection to the node, we can't use the same
# connection from two threads
self.node = AuthServiceProxy(node.url, timeout=600)
def run(self):
self.node.getblocktemplate({'longpollid':self.longpollid})
class GetBlockTemplateTest(DiamondTestFramework):
'''
Test longpolling with getblocktemplate.
'''
def run_test(self, nodes):
print "Warning: this test will take about 70 seconds in the best case. Be patient."
nodes[0].setgenerate(True, 10)
templat = nodes[0].getblocktemplate()
longpollid = templat['longpollid']
# longpollid should not change between successive invocations if nothing else happens
templat2 = nodes[0].getblocktemplate()
assert(templat2['longpollid'] == longpollid)
# Test 1: test that the longpolling wait if we do nothing
thr = LongpollThread(nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert(thr.is_alive())
# Test 2: test that longpoll will terminate if another node generates a block
nodes[1].setgenerate(True, 1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 3: test that longpoll will terminate if we generate a block ourselves
thr = LongpollThread(nodes[0])
thr.start()
nodes[0].setgenerate(True, 1) # generate a block on another node
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
thr = LongpollThread(nodes[0])
thr.start()
# generate a random transaction and submit it
(txid, txhex, fee) = random_transaction(nodes, Decimal("1.1"), Decimal("0.0"), Decimal("0.001"), 20)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert(not thr.is_alive())
if __name__ == '__main__':
GetBlockTemplateTest().main()
|
mit
| -8,546,662,378,669,306,000
| 38.159574
| 108
| 0.652268
| false
| 4.022951
| true
| false
| false
|
danianr/NINJa
|
joblist.py
|
1
|
1541
|
from collections import deque
class JobList(object):
def __init__(self, jobMap=None, initial=None):
self.jobs = dict()
self.merged= deque()
if type(jobMap) is dict:
for (user, prev) in jobMap.iteritems():
assert type(prev) is list
self.jobs[user] = prev
if initial is None:
self.merged.extendleft(jobs)
if type(initial) is deque:
self.merged.extend(initial)
def add(self, username, jobId):
if username in self.jobs:
for n in filter( lambda x: x in self.merged, self.jobs[username]):
self.merged.remove(n)
self.jobs[username].append(jobId)
else:
self.jobs[username] = [jobId]
self.merged.extendleft(self.jobs[username])
def remove(self, removedJobs):
for n in filter( lambda x: x in self.merged, removedJobs):
self.merged.remove(n)
for jobseq in self.jobs.values():
map( jobseq.remove, filter( lambda x: x in jobseq, removedJobs) )
def __iter__(self):
return iter(self.merged)
def __getitem__(self, n):
return self.merged[n]
def __getslice__(self, i, j):
return self.merged[i:j]
def __delitem__(self, n):
self.remove([n])
def __delslice__(self, i, j):
self.remove(self, self.merged[i:j])
def __repr__(self):
return "JobList( jobMap=%s, initial=%s )" % \
(repr(self.jobs), repr(self.merged) )
def __str__(self):
return "%s" % list(self.merged)
|
mit
| -6,834,064,029,022,998,000
| 23.078125
| 75
| 0.573005
| false
| 3.550691
| false
| false
| false
|
jarhill0/ABot
|
memetext.py
|
1
|
6729
|
spork = 'hi every1 im new!!!!!!! holds up spork my name is katy but u can call me t3h PeNgU1N oF d00m!!!!!!!! lol…as ' \
'u can see im very random!!!! thats why i came here, 2 meet random ppl like me _… im 13 years old (im mature ' \
'4 my age tho!!) i like 2 watch invader zim w/ my girlfreind (im bi if u dont like it deal w/it) its our ' \
'favorite tv show!!! bcuz its SOOOO random!!!! shes random 2 of course but i want 2 meet more random ppl =) ' \
'like they say the more the merrier!!!! lol…neways i hope 2 make alot of freinds here so give me lots of ' \
'commentses!!!!\nDOOOOOMMMM!!!!!!!!!!!!!!!! <--- me bein random again _^ hehe…toodles!!!!!\n\nlove and ' \
'waffles,\n\nt3h PeNgU1N oF d00m'
settings = 'Current settings:\n/redditlimit followed by a number to set limit of reddit posts displayed by ' \
'/redditposts (example usage: `/redditlimit 5`)\n/subscribe or /unsubscribe followed by a topic (' \
'`xkcd`, `launches`, etc.) to subscribe or unsubscribe the current chat from notifications about ' \
'that topic\n/timezone followed by a number between -24 and 24 to set your offset from UTC'
marines = 'What the fuck did you just fucking say about me, you little bitch? I’ll have you know I graduated top of ' \
'my class in the Navy Seals, and I’ve been involved in numerous secret raids on Al-Quaeda, and I have over ' \
'300 confirmed kills. I am trained in gorilla warfare and I’m the top sniper in the entire US armed forces.' \
' You are nothing to me but just another target. I will wipe you the fuck out with precision the likes of ' \
'which has never been seen before on this Earth, mark my fucking words. You think you can get away with ' \
'saying that shit to me over the Internet? Think again, fucker. As we speak I am contacting my secret ' \
'network of spies across the USA and your IP is being traced right now so you better prepare for the ' \
'storm, maggot. The storm that wipes out the pathetic little thing you call your life. You’re fucking dead,' \
' kid. I can be anywhere, anytime, and I can kill you in over seven hundred ways, and that’s just with my' \
' bare hands. Not only am I extensively trained in unarmed combat, but I have access to the entire arsenal' \
' of the United States Marine Corps and I will use it to its full extent to wipe your miserable ass off the' \
' face of the continent, you little shit. If only you could have known what unholy retribution your little ' \
'“clever” comment was about to bring down upon you, maybe you would have held your fucking tongue. But you ' \
'couldn’t, you didn’t, and now you’re paying the price, you goddamn idiot. I will shit fury all over you ' \
'and you will drown in it. You’re fucking dead, kiddo.'
myrynys = 'Whyt thy fyck dyd yyy yyst fyckyng syy ybyyt my, yyy lyttly bytch? y’ll hyvy yyy knyw Y ' \
'grydyytyd typ yf my clyss yn thy Nyvy Syyls, ynd Y’ve byyn ynvylvyd yn nymyryys sycryt ryyds yn ' \
'Yl-Qyyydy, ynd Y hyvy yvyr 300 cynfyrmyd kylls. Y ym tryynyd yn gyrylly wyrfyry ynd Y’m thy typ ' \
'snypyr yn thy yntyry YS yrmyd fyrcys. Yyy yry nythyng ty my byt jyst ynythyr tyrgyt. Y wyll wypy ' \
'yyy thy fyck yyt wyth prycysyyn thy lykys yf whych hys nyvyr byyn syyn byfyry yn thys Yyrth, ' \
'myrk my fyckyng wyrds. Yyy thynk yyy cyn gyt ywyy wyth syyyng thyt shyt ty my yvyr thy Yntyrnyt?' \
'Thynk ygyyn, fyckyr. Ys wy spyyk Y ym cyntyctyng my sycryt nytwyrk yf spyys ycryss thy YSY ynd ' \
'yyyr YP ys byyng trycyd ryght nyw sy yyy byttyr prypyry fyr thy styrm, myggyt. Thy styrm thyt ' \
'wypys yyt thy pythytyc lyttly thyng yyy cyll yyyr lyfy. Yyy’ry fyckyng dyyd, kyd. Y cyn by ' \
'ynywhyry, ynytymy, ynd Y cyn kyll yyy yn yvyr syvyn hyndryd wyys, ynd thyt’s jyst wyth my byry ' \
'hynds. Nyt ynly ym Y yxtynsyvyly tryynyd yn ynyrmyd cymbyt, byt y hyvy yccyss ty thy yntyry ' \
'yrsynyl yf thy Ynytyd Stytys Myryny Cyrps ynd Y wyll ysy yt ty yts fyll yxtynt ty wypy yyyr ' \
'mysyrybly yss yff thy fycy yf thy cyntynynt, yyy lyttly shyt. Yf ynly yyy cyyld hyvy knywn whyt ' \
'ynhyly rytrybytyyn yyyr lyttly “clyvyr” cymmynt wys abyyt ty bryng dywn ypyn yyy, ' \
'myyby yyy wyyld hyvy hyld yyyr fyckyng tyngyy. Byt yyy cyyldn’t, yyy dydn’t, ynd nyw yyy’ry ' \
'pyyyng thy prycy, yyy gyddymn ydyyt. Y wyll shyt fyry yll yvyr yyy ynd yyy wyll drywn yn yt. ' \
'Yyy’ry fyckyng dyyd, kyddy.'
xD = """
😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂
😂🆒🆒🆒🆒🆒🆒🆒🆒🆒🆒🆒🆒🆒😂
😂🆒💯🆒🆒🆒💯🆒💯💯💯🆒🆒🆒😂
😂🆒💯💯🆒💯💯🆒💯🆒💯💯🆒🆒😂
😂🆒🆒💯🆒💯🆒🆒💯🆒🆒💯💯🆒😂
😂🆒🆒💯💯💯🆒🆒💯🆒🆒🆒💯🆒😂
😂🆒🆒🆒💯🆒🆒🆒💯🆒🆒🆒💯🆒😂
😂🆒🆒💯💯💯🆒🆒💯🆒🆒🆒💯🆒😂
😂🆒🆒💯🆒💯🆒🆒💯🆒🆒💯💯🆒😂
😂🆒💯💯🆒💯💯🆒💯🆒💯💯🆒🆒😂
😂🆒💯🆒🆒🆒💯🆒💯💯💯🆒🆒🆒😂
😂🆒🆒🆒🆒🆒🆒🆒🆒🆒🆒🆒🆒🆒😂
😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂
"""
pede = """
╚═( ͡° ͜ʖ ͡°)═╝
╚═(███)═╝
╚═(███)═╝
.╚═(███)═╝
..╚═(███)═╝
…╚═(███)═╝
…╚═(███)═╝
..╚═(███)═╝
.╚═(███)═╝
╚═(███)═╝
.╚═(███)═╝
..╚═(███)═╝
…╚═(███)═╝
…╚═(███)═╝
..╚═(███)═╝
.╚═(███)═╝
╚═(███)═╝
.╚═(███)═╝
..╚═(███)═╝
…╚═(███)═╝
…╚═(███)═╝
..╚═(███)═╝
.╚═(███)═╝
╚═(███)═╝
.╚═(███)═╝
..╚═(███)═╝
…╚═(███)═╝
…╚═(███)═╝
..╚═(███)═╝
.╚═(███)═╝
╚═(███)═╝
.╚═(███)═╝
..╚═(███)═╝
…╚═(███)═╝
…╚═(███)═╝
..╚═(███)═╝
.╚═(███)═╝
╚═(███)═╝
.╚═(███)═╝
..╚═(███)═╝
…╚═(███)═╝
…╚═(███)═╝
…..╚(███)╝
……╚(██)╝
………(█)
……….*
"""
|
gpl-3.0
| 1,667,627,879,406,881,800
| 48.036036
| 120
| 0.580669
| false
| 1.762306
| false
| false
| false
|
hit9/skylark
|
examples/messageboard/messageboard/views.py
|
1
|
1358
|
# coding=utf8
from datetime import datetime
from messageboard import app
from messageboard.models import Message
from flask import flash, render_template, request, redirect, url_for
@app.route('/', methods=['GET'])
def index():
query = Message.orderby(
Message.create_at, desc=True).select() # sort by created time
results = query.execute()
messages = results.all()
return render_template('template.html', messages=messages)
@app.route('/create', methods=['POST'])
def create():
title = request.form['title']
content = request.form['content']
if title and content:
message = Message.create(
title=title, content=content, create_at=datetime.now())
if message is not None: # ok
flash(dict(type='success', content='New message created'))
else: # create failed
flash(dict(type='error', content='Failed to create new message'))
else: # invalid input
flash(dict(type='warning', content='Empty input'))
return redirect(url_for('index'))
@app.route('/delete/<int:id>')
def delete(id):
query = Message.at(id).delete()
if query.execute():
flash(dict(type='success', content='Message %d dropped' % id))
else:
flash(dict(type='error', content='Failed to drop message %d' % id))
return redirect(url_for('index'))
|
bsd-2-clause
| 4,533,618,793,132,142,000
| 29.863636
| 77
| 0.648012
| false
| 3.891117
| false
| false
| false
|
rwl/muntjac
|
muntjac/demo/sampler/features/panels/PanelBasicExample.py
|
1
|
1214
|
from muntjac.api import VerticalLayout, Panel, Label, Button
from muntjac.ui.button import IClickListener
class PanelBasicExample(VerticalLayout, IClickListener):
def __init__(self):
super(PanelBasicExample, self).__init__()
self.setSpacing(True)
# Panel 1 - with caption
self._panel = Panel('This is a standard Panel')
self._panel.setHeight('200px') # we want scrollbars
# let's adjust the panels default layout (a VerticalLayout)
layout = self._panel.getContent()
layout.setMargin(True) # we want a margin
layout.setSpacing(True) # and spacing between components
self.addComponent(self._panel)
# Let's add a few rows to provoke scrollbars:
for _ in range(20):
l = Label('The quick brown fox jumps over the lazy dog.')
self._panel.addComponent(l)
# Caption toggle:
b = Button('Toggle caption')
b.addListener(self, IClickListener)
self.addComponent(b)
def buttonClick(self, event):
if self._panel.getCaption() == '':
self._panel.setCaption('This is a standard Panel')
else:
self._panel.setCaption('')
|
apache-2.0
| 2,618,713,346,185,340,000
| 31.810811
| 69
| 0.625206
| false
| 3.954397
| false
| false
| false
|
intip/aldryn-bootstrap3
|
aldryn_bootstrap3/model_fields.py
|
1
|
8060
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from six import with_metaclass
import django.core.exceptions
import django.db.models
import django.forms
from django.utils.encoding import smart_text
from . import fields
class SouthMixinBase(object):
south_field_class = ''
def south_field_triple(self):
"""Returns a suitable description of this field for South."""
if not self.south_field_class:
raise NotImplementedError('please set south_field_class when using the south field mixin')
# We'll just introspect ourselves, since we inherit.
from south.modelsinspector import introspector
field_class = self.south_field_class
args, kwargs = introspector(self)
# That's our definition!
return field_class, args, kwargs
class SouthCharFieldMixin(SouthMixinBase):
south_field_class = "django.db.models.fields.CharField"
class SouthTextFieldMixin(SouthMixinBase):
south_field_class = "django.db.models.fields.TextField"
class SouthIntegerFieldMixin(SouthMixinBase):
south_field_class = "django.db.models.fields.IntegerField"
class Classes(django.db.models.TextField, SouthTextFieldMixin):
# TODO: validate
default_field_class = fields.Classes
def __init__(self, *args, **kwargs):
if 'blank' not in kwargs:
kwargs['blank'] = True
if 'default' not in kwargs:
kwargs['default'] = ''
if 'help_text' not in kwargs:
kwargs['help_text'] = 'space separated classes that are added to the class. see <a href="http://getbootstrap.com/css/" target="_blank">bootstrap docs</a>'
super(Classes, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {
'form_class': self.default_field_class,
}
defaults.update(kwargs)
return super(Classes, self).formfield(**defaults)
class Context(django.db.models.fields.CharField, SouthCharFieldMixin):
default_field_class = fields.Context
def __init__(self, *args, **kwargs):
if 'max_length' not in kwargs:
kwargs['max_length'] = 255
if 'blank' not in kwargs:
kwargs['blank'] = False
if 'default' not in kwargs:
kwargs['default'] = self.default_field_class.DEFAULT
super(Context, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {
'form_class': self.default_field_class,
'choices_form_class': self.default_field_class,
}
defaults.update(kwargs)
return super(Context, self).formfield(**defaults)
def get_choices(self, **kwargs):
# if there already is a "blank" choice, don't add another
# default blank choice
if '' in dict(self.choices).keys():
kwargs['include_blank'] = False
return super(Context, self).get_choices(**kwargs)
class Size(django.db.models.CharField, SouthCharFieldMixin):
default_field_class = fields.Size
def __init__(self, *args, **kwargs):
if 'max_length' not in kwargs:
kwargs['max_length'] = 255
if 'blank' not in kwargs:
kwargs['blank'] = True
if 'default' not in kwargs:
kwargs['default'] = self.default_field_class.DEFAULT
super(Size, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {
'form_class': self.default_field_class,
'choices_form_class': self.default_field_class,
}
defaults.update(kwargs)
return super(Size, self).formfield(**defaults)
def get_choices(self, **kwargs):
# if there already is a "blank" choice, don't add another
# default blank choice
if '' in dict(self.choices).keys():
kwargs['include_blank'] = False
return super(Size, self).get_choices(**kwargs)
class Icon(django.db.models.CharField, SouthCharFieldMixin):
default_field_class = fields.Icon
def __init__(self, *args, **kwargs):
if 'max_length' not in kwargs:
kwargs['max_length'] = 255
if 'blank' not in kwargs:
kwargs['blank'] = True
if 'default' not in kwargs:
kwargs['default'] = self.default_field_class.DEFAULT
super(Icon, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {
'form_class': self.default_field_class,
}
defaults.update(kwargs)
return super(Icon, self).formfield(**defaults)
class IntegerField(django.db.models.IntegerField, SouthIntegerFieldMixin):
default_field_class = fields.Integer
def __init__(self, verbose_name=None, name=None, min_value=None, max_value=None, **kwargs):
self.min_value, self.max_value = min_value, max_value
django.db.models.IntegerField.__init__(self, verbose_name, name, **kwargs)
def formfield(self, **kwargs):
defaults = {
'form_class': self.default_field_class,
'min_value': self.min_value,
'max_value': self.max_value,
}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class MiniText(django.db.models.TextField, SouthTextFieldMixin):
default_field_class = fields.MiniText
def __init__(self, *args, **kwargs):
if 'blank' not in kwargs:
kwargs['blank'] = True
if 'default' not in kwargs:
kwargs['default'] = ''
super(MiniText, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {
'form_class': self.default_field_class,
}
defaults.update(kwargs)
return super(MiniText, self).formfield(**defaults)
class LinkOrButton(django.db.models.fields.CharField, SouthCharFieldMixin):
default_field_class = fields.LinkOrButton
def __init__(self, *args, **kwargs):
if 'max_length' not in kwargs:
kwargs['max_length'] = 10
if 'blank' not in kwargs:
kwargs['blank'] = False
if 'default' not in kwargs:
kwargs['default'] = self.default_field_class.DEFAULT
super(LinkOrButton, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {
'form_class': self.default_field_class,
'choices_form_class': self.default_field_class,
}
defaults.update(kwargs)
return super(LinkOrButton, self).formfield(**defaults)
def get_choices(self, **kwargs):
# if there already is a "blank" choice, don't add another
# default blank choice
if '' in dict(self.choices).keys():
kwargs['include_blank'] = False
return super(LinkOrButton, self).get_choices(**kwargs)
# class JSONField(json_field.JSONField, SouthTextFieldMixin):
# pass
class Responsive(MiniText):
default_field_class = fields.Responsive
def __init__(self, *args, **kwargs):
if 'blank' not in kwargs:
kwargs['blank'] = True
if 'default' not in kwargs:
kwargs['default'] = ''
super(Responsive, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {
'form_class': self.default_field_class,
}
defaults.update(kwargs)
return super(Responsive, self).formfield(**defaults)
class ResponsivePrint(MiniText):
default_field_class = fields.ResponsivePrint
def __init__(self, *args, **kwargs):
if 'blank' not in kwargs:
kwargs['blank'] = True
if 'default' not in kwargs:
kwargs['default'] = ''
super(ResponsivePrint, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {
'form_class': self.default_field_class,
}
defaults.update(kwargs)
return super(ResponsivePrint, self).formfield(**defaults)
#TODO:
# * btn-block, disabled
# * pull-left, pull-right
# * margins/padding
|
bsd-3-clause
| 6,422,208,715,115,952,000
| 32.443983
| 166
| 0.6134
| false
| 3.990099
| false
| false
| false
|
alubbock/pysb-legacy
|
pysb/tools/render_species.py
|
1
|
4636
|
#!/usr/bin/env python
import sys
import os
import re
import pygraphviz
import pysb.bng
def run(model):
pysb.bng.generate_equations(model)
graph = pygraphviz.AGraph(name="%s species" % model.name, rankdir="LR", fontname='Arial')
graph.edge_attr.update(fontname='Arial', fontsize=8)
for si, cp in enumerate(model.species):
sgraph_name = 'cluster_s%d' % si
cp_label = re.sub(r'% ', '%<br align="left"/>', str(cp)) + '<br align="left"/>'
sgraph_label = '<<font point-size="10" color="blue">s%d</font><br align="left"/><font face="Consolas" point-size="6">%s</font>>' % (si, cp_label)
sgraph = graph.add_subgraph(name=sgraph_name, label=sgraph_label,
color="gray75", sortv=sgraph_name)
bonds = {}
for mi, mp in enumerate(cp.monomer_patterns):
monomer_node = '%s_%d' % (sgraph_name, mi)
monomer_label = '<<table border="0" cellborder="1" cellspacing="0">'
monomer_label += '<tr><td bgcolor="#a0ffa0"><b>%s</b></td></tr>' % mp.monomer.name
for site in mp.monomer.sites:
site_state = None
cond = mp.site_conditions[site]
if isinstance(cond, str):
site_state = cond
elif isinstance(cond, tuple):
site_state = cond[0]
site_label = site
if site_state is not None:
site_label += '=<font color="purple">%s</font>' % site_state
monomer_label += '<tr><td port="%s">%s</td></tr>' % (site, site_label)
for site, value in mp.site_conditions.items():
site_bonds = [] # list of bond numbers
if isinstance(value, int):
site_bonds.append(value)
elif isinstance(value, tuple):
site_bonds.append(value[1])
elif isinstance(value, list):
site_bonds += value
for b in site_bonds:
bonds.setdefault(b, []).append((monomer_node, site))
monomer_label += '</table>>'
sgraph.add_node(monomer_node,
label=monomer_label, shape="none", fontname="Arial",
fontsize=8)
for bi, sites in bonds.items():
node_names, port_names = zip(*sites)
sgraph.add_edge(node_names, tailport=port_names[0],
headport=port_names[1], label=str(bi))
return graph.string()
usage = """
Usage: python -m pysb.tools.render_species mymodel.py > mymodel.dot
Renders the species from a model into the "dot" graph format which can be
visualized with Graphviz.
To create a PDF from the .dot file, use the Graphviz tools in the following
command pipeline:
ccomps -x mymodel.dot | dot | gvpack -m0 | neato -n2 -T pdf -o mymodel.pdf
You can also change the "dot" command to "circo" or "sfdp" for a different type
of layout. Note that you can pipe the output of render_species straight into a
Graphviz command pipeline without creating an intermediate .dot file, which is
especially helpful if you are making continuous changes to the model and need to
visualize your changes repeatedly:
python -m pysb.tools.render_species mymodel.py | ccomps -x | dot |
gvpack -m0 | neato -n2 -T pdf -o mymodel.pdf
Note that some PDF viewers will auto-reload a changed PDF, so you may not even
need to manually reopen it every time you rerun the tool.
"""
usage = usage[1:] # strip leading newline
if __name__ == '__main__':
# sanity checks on filename
if len(sys.argv) <= 1:
print usage,
exit()
model_filename = sys.argv[1]
if not os.path.exists(model_filename):
raise Exception("File '%s' doesn't exist" % model_filename)
if not re.search(r'\.py$', model_filename):
raise Exception("File '%s' is not a .py file" % model_filename)
sys.path.insert(0, os.path.dirname(model_filename))
model_name = re.sub(r'\.py$', '', os.path.basename(model_filename))
# import it
try:
# FIXME if the model has the same name as some other "real" module which we use,
# there will be trouble (use the imp package and import as some safe name?)
model_module = __import__(model_name)
except StandardError as e:
print "Error in model script:\n"
raise
# grab the 'model' variable from the module
try:
model = model_module.__dict__['model']
except KeyError:
raise Exception("File '%s' isn't a model file" % model_filename)
print run(model)
|
bsd-2-clause
| 2,940,613,078,015,021,600
| 42.735849
| 153
| 0.59189
| false
| 3.627543
| false
| false
| false
|
xiangarpm/arpym_template
|
arpym_template/estimation/flexible_probabilities.py
|
1
|
4668
|
# -*- coding: utf-8 -*-
"""
For details, see
`Section 3.1 <https://www.arpm.co/lab/redirect.php?permalink=setting-flexible-probabilities>`_.
"""
from collections import namedtuple
import numpy as np
class FlexibleProbabilities(object):
"""Flexible Probabilities
"""
def __init__(self, data):
self.x = data
self.p = np.ones(len(data))/len(data)
def shape(self):
"""Shape of the data
"""
return self.x.shape
def mean(self):
"""Sample mean with flexible probabilities
"""
return np.dot(self.p, self.x)
def cov(self):
"""Sample covariance with flexible probabilities
"""
x_ = self.x - np.mean(self.x, axis=0)
return np.dot(np.multiply(np.transpose(x_), self.p), x_)
def equal_weight(self):
"""Equally weighted probabilities
"""
self.p = np.ones(len(self.x))/len(self.x)
def exponential_decay(self, tau):
"""Exponentail decay probabilities
"""
t_ = len(self.x)
self.p = np.exp(-np.log(2)/tau*(t_-np.arange(0, t_)))
self.p = self.p / np.sum(self.p)
def smooth_kernel(self, z=None, z_star=None, h=None, gamma=2):
"""Smooth kernel probabilities
"""
if z is None:
z = self.x[:, 0]
if z_star is None:
z_star = np.mean(z)
if h is None:
h = np.std(z)
self.p = np.exp(-(np.abs(z - z_star)/h)**gamma)
self.p = self.p / np.sum(self.p)
def effective_scenarios(self, Type=None):
"""This def computes the Effective Number of Scenarios of Flexible
Probabilities via different types of defs
For details on the function, please see
|ex_effective_scenarios| |code_effective_scenarios|
Note:
The exponential of the entropy is set as default, otherwise specify
``Type.ExpEntropy.on = true`` to use the exponential of the entropy
or specify ``Type.GenExpEntropy.on = true`` and supply the scalar
``Type.ExpEntropy.g`` to use the generalized exponential of the
entropy.
Args:
Type (tuple): type of def: ``ExpEntropy``, ``GenExpEntropy``
Returns:
ens (double): Effective Number of Scenarios
.. |ex_effective_scenarios| image:: icon_ex_inline.png
:scale: 20 %
:target: https://www.arpm.co/lab/redirect.php?permalink=EBEffectNbScenFun
.. |code_effective_scenarios| image:: icon-code-1.png
:scale: 20 %
:target: https://www.arpm.co/lab/redirect.php?code=EffectiveScenarios
"""
if Type is None:
Type = namedtuple('type', ['Entropy'])
Type.Entropy = 'Exp'
if Type.Entropy != 'Exp':
Type.Entropy = 'GenExp'
# Code
p_ = self.p
if Type.Entropy == 'Exp':
p_[p_ == 0] = 10 ** (-250) # avoid log(0) in ens computation
ens = np.exp(-p_@np.log(p_.T))
else:
ens = np.sum(p_ ** Type.g) ** (-1 / (Type.g - 1))
return ens
def diff_length_mlfp(fp, nu, threshold, smartinverse=0, maxiter=10**5):
"""Maximum-likelihood with flexible probabilities for different-length
series
For details on the function, please see
|ex_diff_length_mlfp| |code_diff_length_mlfp|
Note:
We suppose the missing values, if any, are at the beginning.
(the farthest observations in the past could be missing).
We reshuffle the series in a nested pattern, such that the series with
the longer history comes first and the one with the shorter history
comes last.
Args:
fp (FlexibleProbabilities): obsrevations with flexible probabilities
nu (double): degrees of freedom for the multivariate Student
t-distribution
threshold (double): convergence thresholds
smartinverse (double, optional): additional parameter: set it to 1 to
use LRD smart inverse in the regression process
maxiter (int, optional): maximum number of iterations inside
``MaxLikFPTReg``
Returns:
mu (numpy.ndarray): DLFP estimate of the location parameter
sig2 (numpy.ndarray): DLFP estimate of the dispersion parameter
.. |ex_diff_length_mlfp| image:: icon_ex_inline.png
:scale: 20 %
:target: https://www.arpm.co/lab/redirect.php?permalink=DiffLengthRout
.. |code_diff_length_mlfp| image:: icon-code-1.png
:scale: 20 %
:target: https://www.arpm.co/lab/redirect.php?codeplay=DiffLengthMLFP
"""
return None
|
bsd-2-clause
| 3,235,306,941,653,997,600
| 32.106383
| 95
| 0.592759
| false
| 3.785888
| false
| false
| false
|
Azulinho/sunflower-file-manager-with-tmsu-tagging-support
|
application/plugins/tmsu_column/plugin.py
|
1
|
1904
|
import gtk
from plugins.file_list.plugin import Column, FileList
from plugin_base.column_extension import ColumnExtension
from subprocess import check_output
def register_plugin(application):
"""Register plugin class with application"""
application.register_column_extension(FileList, TagsColumn)
class BaseColumn(ColumnExtension):
"""Base class for extending owner and group for item list"""
def __init__(self, parent, store):
ColumnExtension.__init__(self, parent, store)
self._parent = parent
# create column object
self._create_column()
def _create_column(self):
"""Create column"""
self._cell_renderer = gtk.CellRendererText()
self._parent.set_default_font_size(self._get_column_name(), 8)
self._column = gtk.TreeViewColumn(self._get_column_title())
self._column.pack_start(self._cell_renderer, True)
self._column.set_data('name', self._get_column_name())
def _get_column_name(self):
"""Returns column name"""
return None
def _get_column_title(self):
"""Returns column title"""
return None
def __set_cell_data(self, column, cell, store, selected_iter, data=None):
"""Set column value"""
pass
class TagsColumn(BaseColumn):
"""Adds support for displaying tags in item list"""
def __set_cell_data(self, column, cell, store, selected_iter, data=None):
"""Set column value"""
is_parent = store.get_value(selected_iter, Column.IS_PARENT_DIR)
value = (store.get_value(selected_iter, Column.TAGS), '')[is_parent]
cell.set_property('text', value)
def _create_column(self):
"""Configure column"""
BaseColumn._create_column(self)
self._column.set_cell_data_func(self._cell_renderer, self.__set_cell_data)
def _get_column_name(self):
"""Returns column name"""
return 'tags'
def _get_column_title(self):
"""Returns column title"""
return _('Tags')
def get_sort_column(self):
"""Return sort column"""
return Column.TAGS
|
gpl-3.0
| -4,224,020,129,806,103,600
| 25.816901
| 76
| 0.707458
| false
| 3.294118
| false
| false
| false
|
miquelcampos/GEAR_mc
|
gear/xsi/rig/component/eyelid_01/guide.py
|
1
|
7407
|
'''
This file is part of GEAR_mc.
GEAR_mc is a fork of Jeremie Passerin's GEAR project.
GEAR is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/lgpl.html>.
Author: Jeremie Passerin geerem@hotmail.com www.jeremiepasserin.com
Fork Author: Miquel Campos hello@miqueltd.com www.miqueltd.com
Date: 2013 / 08 / 16
'''
## @package gear.xsi.rig.component.eyelid_01.guide
# @author Miquel Campos
#
##########################################################
# GLOBAL
##########################################################
# gear
from gear.xsi import xsi, c, XSIMath
from gear.xsi.rig.component.guide import ComponentGuide
import gear.xsi.applyop as aop
# guide info
AUTHOR = "Miquel Campos "
URL = "http://www.miqueltd.com"
EMAIL = "hello@miqueltd.com"
VERSION = [1,0,0]
TYPE = "eyelid_01"
NAME = "eyelid"
DESCRIPTION = "eyelids rig"
##########################################################
# CLASS
##########################################################
class Guide(ComponentGuide):
compType = TYPE
compName = NAME
description = DESCRIPTION
author = AUTHOR
url = URL
email = EMAIL
version = VERSION
# =====================================================
##
# @param self
def postInit(self):
self.pick_transform = ["root", "#_loc"]
self.save_transform = ["root", "upVector", "direction", "#_loc"]
self.save_blade = ["blade"]
self.addMinMax("#_loc", 1, -1)
# =====================================================
## Add more object to the object definition list.
# @param self
def addObjects(self):
self.root = self.addRoot()
self.locs = self.addLocMulti("#_loc", self.root, False)
vTemp = XSIMath.CreateVector3(self.root.Kinematics.Global.PosX.Value , self.root.Kinematics.Global.PosY.Value +2, self.root.Kinematics.Global.PosZ.Value )
self.upVector = self.addLoc("upVector", self.root, vTemp )
vTemp = XSIMath.CreateVector3(self.root.Kinematics.Global.PosX.Value , self.root.Kinematics.Global.PosY.Value , self.root.Kinematics.Global.PosZ.Value +2 )
self.direction = self.addLoc("direction", self.root, vTemp )
centers = [self.direction, self.root, self.upVector]
self.dispcrv = self.addDispCurve("crvUp", centers)
self.blade = self.addBlade("blade", self.root, self.upVector)
centers = []
centers.extend(self.locs)
self.dispcrv = self.addDispCurve("crv", centers)
# =====================================================
## Add more parameter to the parameter definition list.
# @param self
def addParameters(self):
# eye corners controlers
self.pCornerA = self.addParam("cornerARef", c.siInt4, None, 0, None)
self.pCornerAArray = self.addParam("cornerARefArray", c.siString, "")
self.pCornerB = self.addParam("cornerBRef", c.siInt4, None, 0, None)
self.pCornerBArray = self.addParam("cornerBRefArray", c.siString, "")
# =====================================================
## Add layout for new parameters.
# @param self
def addLayout(self):
# --------------------------------------------------
# Items
cornerAItemsCode = "cornerARefItems = []" +"\r\n"+\
"if PPG."+self.pCornerAArray.scriptName+".Value:" +"\r\n"+\
" a = PPG."+self.pCornerAArray.scriptName+".Value.split(',')" +"\r\n"+\
" for i, v in enumerate(a):" +"\r\n"+\
" cornerARefItems.append(a[i])" +"\r\n"+\
" cornerARefItems.append(i)" +"\r\n"+\
"item.UIItems = cornerARefItems" +"\r\n"
cornerBItemsCode = "cornerBRefItems = []" +"\r\n"+\
"if PPG."+self.pCornerBArray.scriptName+".Value:" +"\r\n"+\
" a = PPG."+self.pCornerBArray.scriptName+".Value.split(',')" +"\r\n"+\
" for i, v in enumerate(a):" +"\r\n"+\
" cornerBRefItems.append(a[i])" +"\r\n"+\
" cornerBRefItems.append(i)" +"\r\n"+\
"item.UIItems = cornerBRefItems" +"\r\n"
# --------------------------------------------------
# Layout
tab = self.layout.addTab("Options")
# IK/Upv References
group = tab.addGroup("Eyelids controls")
row = group.addRow()
item = row.addEnumControl(self.pCornerA.scriptName, [], "Corner control A", c.siControlCombo)
item.setCodeAfter(cornerAItemsCode)
row.addButton("PickCornerARef", "Pick New")
row.addButton("DeleteCornerARef", "Delete")
row = group.addRow()
item = row.addEnumControl(self.pCornerB.scriptName, [], "Corner control B", c.siControlCombo)
item.setCodeAfter(cornerBItemsCode)
row.addButton("PickCornerBRef", "Pick New")
row.addButton("DeleteCornerBRef", "Delete")
# =====================================================
## Add logic for new layout.
# @param self
def addLogic(self):
self.logic.addGlobalCode("from gear.xsi.rig.component import logic\r\nreload(logic)")
self.logic.addOnClicked("PickCornerARef",
"prop = PPG.Inspected(0)\r\n" +
"logic.pickReferences(prop, '"+self.pCornerAArray.scriptName+"', '"+self.pCornerA.scriptName+"')\r\n" +
"PPG.Refresh() \r\n")
self.logic.addOnClicked("DeleteCornerARef",
"prop = PPG.Inspected(0)\r\n" +
"logic.deleteReference(prop, '"+self.pCornerAArray.scriptName+"', '"+self.pCornerA.scriptName+"')\r\n" +
"PPG.Refresh() \r\n")
self.logic.addOnClicked("PickCornerBRef",
"prop = PPG.Inspected(0)\r\n" +
"logic.pickReferences(prop, '"+self.pCornerBArray.scriptName+"', '"+self.pCornerB.scriptName+"')\r\n" +
"PPG.Refresh() \r\n")
self.logic.addOnClicked("DeleteCornerBRef",
"prop = PPG.Inspected(0)\r\n" +
"logic.deleteReference(prop, '"+self.pCornerBArray.scriptName+"', '"+self.pCornerB.scriptName+"')\r\n" +
"PPG.Refresh() \r\n")
|
lgpl-3.0
| -5,199,398,780,847,984,000
| 39.26087
| 164
| 0.509788
| false
| 3.754181
| false
| false
| false
|
esrille/replace-with-kanji-by-tutcode
|
mazegaki/kigou.py
|
1
|
1585
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright 2017 Esrille Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 記号やギリシア文字をつかっている語をリストアップします。
import re
import sys
re_kigou = re.compile(r"[〇〻\u0370-\u03FF¬°∃∧◇∨≪∪∩〓△▲▽▼■∀≒◆◇≫※□⇔≡⇒∈⊆⊇⊂⊃○●◎〒∵√]")
re_kana = re.compile(r"[ぁ-んァ-ヶー]")
re_non_regular_yomi = re.compile(r"[^ぁ-んァ-ヶー]")
def is_inflectable(kana):
return l[0][-1] == "―";
#
# main
#
if __name__ == "__main__":
for line in sys.stdin:
l = line.split(" ", 1)
kana = l[0]
if re_non_regular_yomi.search(kana):
continue;
kanji = l[1].strip(" \n/").split("/")
for cand in kanji[:]:
if not re_kigou.search(cand):
kanji.remove(cand)
continue
if re_kana.search(cand):
kanji.remove(cand)
continue
if kanji:
print(kana, " /", '/'.join(kanji), "/", sep='')
|
apache-2.0
| 5,985,953,077,179,379,000
| 27.66
| 79
| 0.59037
| false
| 2.479239
| false
| false
| false
|
peastman/cbang
|
config/rpm/__init__.py
|
1
|
4971
|
import os
import shutil
from SCons.Script import *
from SCons.Action import CommandAction
def replace_dash(s):
return s.replace('-', '_')
def write_spec_text_section(f, env, name, var):
if var in env:
f.write('%%%s\n%s\n\n' % (name, env.get(var).strip()))
def write_spec_script(f, env, name, var):
if var in env:
script = env.get(var)
input = None
try:
input = open(script, 'r')
contents = input.read().strip()
finally:
if input is not None: input.close()
f.write('%%%s\n%s\n\n' % (name, contents))
def install_files(f, env, key, build_dir, path, prefix = None, perms = None,
dperms = 0755):
if perms is None: perms = 0644
if key in env:
target = build_dir + path
# Copy
env.CopyToPackage(env.get(key), target, perms, dperms)
# Write files list
for src, dst, mode in env.ResolvePackageFileMap(env.get(key), target):
if prefix is not None: f.write(prefix + ' ')
f.write(dst[len(build_dir):] + '\n')
def build_function(target, source, env):
name = env.get('package_name_lower')
# Create package build dir
build_dir = 'build/%s-RPM' % name
if os.path.exists(build_dir): shutil.rmtree(build_dir)
os.makedirs(build_dir)
# Create the SPEC file
spec_file = 'build/%s.spec' % name
f = None
try:
f = open(spec_file, 'w')
# Create the preamble
write_var = env.WriteVariable
write_var(env, f, 'Summary', 'summary')
write_var(env, f, 'Name', 'package_name_lower', None, replace_dash)
write_var(env, f, 'Version', 'version', None, replace_dash)
write_var(env, f, 'Release', 'package_build', '1', replace_dash)
write_var(env, f, 'License', 'rpm_license')
write_var(env, f, 'Group', 'rpm_group')
write_var(env, f, 'URL', 'url')
write_var(env, f, 'Vendor', 'vendor')
write_var(env, f, 'Packager', 'maintainer')
write_var(env, f, 'Icon', 'icon')
write_var(env, f, 'Prefix', 'prefix')
#write_var(env, f, 'BuildArch', 'package_arch', env.GetPackageArch())
write_var(env, f, 'Provides', 'rpm_provides', multi = True)
write_var(env, f, 'Conflicts', 'rpm_conflicts', multi = True)
write_var(env, f, 'Obsoletes', 'rpm_obsoletes', multi = True)
write_var(env, f, 'BuildRequires', 'rpm_build_requires', multi = True)
write_var(env, f, 'Requires(pre)', 'rpm_pre_requires', multi = True)
write_var(env, f, 'Requires', 'rpm_requires', multi = True)
write_var(env, f, 'Requires(postun)', 'rpm_postun_requires',
multi = True)
# Description
write_spec_text_section(f, env, 'description', 'description')
# Scripts
for script in ['prep', 'build', 'install', 'clean', 'pre', 'post',
'preun', 'postun', 'verifyscript']:
write_spec_script(f, env, script, 'rpm_' + script)
# Files
if 'rpm_filelist' in env:
f.write('%%files -f %s\n' % env.get('rpm_filelist'))
else: f.write('%files\n')
f.write('%defattr(- root root)\n')
for files in [
['documents', '/usr/share/doc/' + name, '%doc', None],
['programs', '/usr/bin', '%attr(0775 root root)', 0755],
['scripts', '/usr/bin', '%attr(0775 root root)', 0755],
['desktop_menu', '/usr/share/applications', None, None],
['init_d', '/etc/init.d', '%config %attr(0775 root root)', None],
['config', '/etc/' + name, '%config', None],
['icons', '/usr/share/pixmaps', None, None],
['platform_independent', '/usr/share/' + name, None, None],
]:
install_files(f, env, files[0], build_dir, files[1], files[2],
files[3])
# ChangeLog
write_spec_text_section(f, env, 'changelog', 'rpm_changelog')
finally:
if f is not None: f.close()
# Create directories needed by rpmbuild
for dir in ['BUILD', 'BUILDROOT', 'RPMS', 'SOURCES', 'SPECS', 'SRPMS']:
dir = 'build/' + dir
if not os.path.exists(dir): os.makedirs(dir)
# Build the package
build_dir = os.path.realpath(build_dir)
cmd = 'rpmbuild -bb --buildroot %s --define "_topdir %s/build" ' \
'--target %s %s' % (
build_dir, os.getcwd(), env.GetPackageArch(), spec_file)
CommandAction(cmd).execute(target, [build_dir], env)
# Move the package
target = str(target[0])
path = 'build/RPMS/' + env.GetPackageArch() + '/' + target
shutil.move(path, target)
def generate(env):
bld = Builder(action = build_function,
source_factory = SCons.Node.FS.Entry,
source_scanner = SCons.Defaults.DirScanner)
env.Append(BUILDERS = {'RPM' : bld})
return True
def exists():
return 1
|
lgpl-2.1
| -2,002,260,641,959,615,500
| 32.816327
| 78
| 0.554416
| false
| 3.388548
| false
| false
| false
|
kaos-addict/weborf
|
python_cgi_weborf/cgi.py
|
1
|
8232
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Weborf
Copyright (C) 2009 Salvo "LtWorf" Tomaselli
Weborf is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
@author Salvo "LtWorf" Tomaselli <tiposchi@tiscali.it>
This package provides useful functions for cgi scripts
'''
import sys
import os
def pyinfo():
'''Shows information page'''
print "<h1>Weborf Python CGI Module</h1>"
print "<p>Version 0.2</p>"
print "<p>Written by Salvo 'LtWorf' Tomaselli <tiposchi@tiscali.it></p>"
i_vars=("GET","POST","SERVER","SESSION","COOKIE","FILES")
for var in i_vars:
v=eval(var)
if isinstance(v,list):
l=True
else: #Dict
l=False
print "<H2>%s</H2>" % var
print "<table border=1>"
for j in v:
if l:
print "<tr><td>%s</td></tr>" % (j)
else:
print "<tr><td>%s</td><td><code>%s</code></td></tr>" % (j,v[j])
print "</table>"
print "<p><h2>Weborf</h2></p><p>This program comes with ABSOLUTELY NO WARRANTY.<br>This is free software, and you are welcome to redistribute it<br>under certain conditions.<br>For details see the GPLv3 Licese.</p>"
def __post_escape(val):
'''Post fields use certains escapes. This function returns the original string.
This function is for internal use, not meant for use by others'''
val=val.replace("+"," ") #Replaces all + with a space
i=val.find("%") #% is the char for an exadecimal escape
while i!=-1: #If there is a % in the non parsed part of the string
s=val[i+1] + val[i+2] #Extract the exadecimal code
if s!="37":
#Replaces all the escapes in the string
val=val.replace("%"+s,chr(int(s,16)))
else:
'''Replaces only once because this char is a % so there would be %
that aren't escapes in the non parsed part of the string'''
val=val.replace("%"+s,chr(int(s,16)),1)
i=val.find("%",i+1)
return val
def __read_post():
'''Reads POST data.
This function is for internal use.'''
#Reading POST Data
if 'CONTENT_LENGTH' not in os.environ:
return None
RAW=sys.stdin.read(int(os.getenv('CONTENT_LENGTH')))
if os.getenv('CONTENT_TYPE')=='application/x-www-form-urlencoded':
for i in RAW.split("&"):
v=i.split("=")
POST[__post_escape(v[0])]=__post_escape(v[1])
elif os.getenv('CONTENT_TYPE').startswith('multipart/form-data'):
#Finding boundary
for i in os.getenv('CONTENT_TYPE').split("; "):
if i.strip().startswith("boundary"):
boundary=i.split("=")[1]
files=RAW.split(boundary)
for i in files:
j=i.split("\r\n\r\n")
if len(j)==1:
continue
dic={}
dic['content']=j[1][:-2]
fields=j[0].split("\r\n")
for k in fields:
a=k.split(": ",1)
if len(a)==2:
dic[a[0]]=a[1]
elif len(a[0])!=0:
dic[a[0]]=None
for k in dic['Content-Disposition'].split("; "):
d=k.split("=",1)
if len(d)>1:
dic[d[0]]=d[1].replace("\"","")
else:
dic[d[0]]=None
FILES.append(dic)
return RAW
def redirect(location):
'''Sends to the client the request to redirect to another page.
It will work only if headers aren't sent yet.
It will make the script terminate immediately and redirect.'''
os.write(1,"Status: 303\r\nLocation: "+location+"\r\n\r\n") #Writes location header
sys.exit(0) #Redirects
def savesession():
'''Saves the session to the file.
Before terminating the script, this function has to be executed to ensure that the session is saved
'''
import csv
if 'PHPSESSID' not in COOKIE==None:
return #No session to save
#Opens the file with the session
fp=file(TMPDIR+"/"+COOKIE['PHPSESSID'],"w")
writer=csv.writer(fp)
#Converting dictionary into 2 level array for csv module
a=[]
for i in SESSION:
a.append((i,SESSION[i]))
writer.writerows(a)
fp.close()
def session_start():
'''Inits the session vars'''
if 'PHPSESSID' not in COOKIE or COOKIE['PHPSESSID']==None: #No session, creating a new one
import random
import md5
#Creating session's id with random numbers and multiple hashes
r=random.Random()
a=md5.md5(os.getenv("SCRIPT_FILENAME")).hexdigest()+md5.md5(str(r.random())).hexdigest()
for i in range(10):
a=md5.md5(a).hexdigest()+md5.md5(str(r.random())).hexdigest()
s_id= "weborf-%s-%s" % (str(os.getpid()), a)
setcookie('PHPSESSID',s_id)
COOKIE['PHPSESSID']=s_id
else:#Session exists, loading data
import time
try:
#If session expired after inactivity
if (os.stat(TMPDIR+"/"+COOKIE['PHPSESSID'])[7] + SESSIONEXPIRE) < time.time():
#Deletes old session file, just to try to avoid to fill the disk
os.unlink(TMPDIR+"/"+COOKIE['PHPSESSID'])
#Creating an empty session
COOKIE['PHPSESSID']=None
session_start()
return
import csv
fp=file(TMPDIR+"/"+COOKIE['PHPSESSID'])
reader=csv.reader(fp) #Creating a csv reader
for i in reader.__iter__(): #Iterating rows
SESSION[i[0]]=i[1]
except:
#Start sessions with a new session id
COOKIE['PHPSESSID']=None
session_start()
def setcookie(name,value,expires=None):
'''Sets a cookie, by default it will be a session cookie.
Expires is the time in seconds to wait to make the cookie expire'''
if expires!=None:
s= "Set-Cookie: %s=%s; Max-Age=%s\r\n" % (str(name),str(value),str(expires))
else:
s= "Set-Cookie: %s=%s\r\n" % (str(name),str(value))
sys.stdout.write(s)
COOKIE[str(name)]=str(value)
def finalize_headers(content="text/html"):
'''This function finalizes headers. After calling this function the script can output its data.
If Content-Type of the page is not text/html, it must be specified as parameter here.'''
sys.stdout.write("Content-Type: %s\r\n\r\n"%content)
def __get_array(sep,query):
'''Returns dictionary containing all the data passed via GET'''
dic={}
if query==None:
return dic
for p in query.split(sep):
i=p.split("=",1)
if len(i)!=1:
dic[i[0]]=i[1]
elif len(i[0])!=0:
dic[i[0]]=None
return dic
def __auth_fields():
'''If there is authentication, gets username and password'''
#Deconding auth field
v=os.getenv("HTTP_AUTHORIZATION")
if v!=None:
import base64
q=v.split(" ")
os.environ['AUTH_TYPE']=q[0]
auth=base64.b64decode(q[1]).split(":",1)
os.environ['AUTH_USER']=auth[0]
os.environ['AUTH_PW']=auth[1]
#Loading configuration from file or setting default
try:
execfile("/etc/weborf/pywrapper.conf")
except:
TMPDIR="/tmp"
SESSIONEXPIRE=600
#chdir_to_file(os.getenv("SCRIPT_FILENAME"))
__auth_fields()
#Changing the order of those lines can be dangerous
COOKIE=__get_array('; ',os.getenv("HTTP_COOKIE"))
GET=__get_array('&',os.getenv("QUERY_STRING"))
SESSION={}
POST={}
FILES=[]
RAW=__read_post()
SERVER=os.environ
#Executes file
#execfile(os.getenv("SCRIPT_FILENAME"))
#savesession()
|
gpl-3.0
| -1,482,226,055,591,729,700
| 32.737705
| 219
| 0.58467
| false
| 3.542169
| false
| false
| false
|
cvsuser-chromium/chromium
|
chrome/common/extensions/docs/server2/caching_file_system.py
|
1
|
4951
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import posixpath
import sys
from file_system import FileSystem, StatInfo, FileNotFoundError
from future import Future
from object_store_creator import ObjectStoreCreator
class _AsyncUncachedFuture(object):
def __init__(self,
uncached_read_futures,
stats_for_uncached,
current_results,
file_system,
object_store):
self._uncached_read_futures = uncached_read_futures
self._stats_for_uncached = stats_for_uncached
self._current_results = current_results
self._file_system = file_system
self._object_store = object_store
def Get(self):
new_results = self._uncached_read_futures.Get()
# Update the cached data in the object store. This is a path -> (read,
# version) mapping.
self._object_store.SetMulti(dict(
(path, (new_result, self._stats_for_uncached[path].version))
for path, new_result in new_results.iteritems()))
new_results.update(self._current_results)
return new_results
class CachingFileSystem(FileSystem):
'''FileSystem which implements a caching layer on top of |file_system|. It's
smart, using Stat() to decided whether to skip Read()ing from |file_system|,
and only Stat()ing directories never files.
'''
def __init__(self, file_system, object_store_creator):
self._file_system = file_system
def create_object_store(category, **optargs):
return object_store_creator.Create(
CachingFileSystem,
category='%s/%s' % (file_system.GetIdentity(), category),
**optargs)
self._stat_object_store = create_object_store('stat')
# The read caches can start populated (start_empty=False) because file
# updates are picked up by the stat, so it doesn't need the force-refresh
# which starting empty is designed for. Without this optimisation, cron
# runs are extra slow.
self._read_object_store = create_object_store('read', start_empty=False)
self._read_binary_object_store = create_object_store('read-binary',
start_empty=False)
def Refresh(self):
return self._file_system.Refresh()
def Stat(self, path):
'''Stats the directory given, or if a file is given, stats the file's parent
directory to get info about the file.
'''
# Always stat the parent directory, since it will have the stat of the child
# anyway, and this gives us an entire directory's stat info at once.
dir_path, file_path = posixpath.split(path)
if dir_path and not dir_path.endswith('/'):
dir_path += '/'
# ... and we only ever need to cache the dir stat, too.
dir_stat = self._stat_object_store.Get(dir_path).Get()
if dir_stat is None:
dir_stat = self._file_system.Stat(dir_path)
assert dir_stat is not None # should raise a FileNotFoundError
self._stat_object_store.Set(dir_path, dir_stat)
if path == dir_path:
stat_info = dir_stat
else:
file_version = dir_stat.child_versions.get(file_path)
if file_version is None:
raise FileNotFoundError('No stat found for %s in %s' % (path, dir_path))
stat_info = StatInfo(file_version)
return stat_info
def Read(self, paths, binary=False):
'''Reads a list of files. If a file is in memcache and it is not out of
date, it is returned. Otherwise, the file is retrieved from the file system.
'''
read_object_store = (self._read_binary_object_store if binary else
self._read_object_store)
read_values = read_object_store.GetMulti(paths).Get()
stat_values = self._stat_object_store.GetMulti(paths).Get()
results = {} # maps path to read value
uncached = {} # maps path to stat value
for path in paths:
stat_value = stat_values.get(path)
if stat_value is None:
# TODO(cduvall): do a concurrent Stat with the missing stat values.
try:
stat_value = self.Stat(path)
except:
return Future(exc_info=sys.exc_info())
read_value = read_values.get(path)
if read_value is None:
uncached[path] = stat_value
continue
read_data, read_version = read_value
if stat_value.version != read_version:
uncached[path] = stat_value
continue
results[path] = read_data
if not uncached:
return Future(value=results)
return Future(delegate=_AsyncUncachedFuture(
self._file_system.Read(uncached.keys(), binary=binary),
uncached,
results,
self,
read_object_store))
def GetIdentity(self):
return self._file_system.GetIdentity()
def __repr__(self):
return '<%s of %s>' % (type(self).__name__,
type(self._file_system).__name__)
|
bsd-3-clause
| -7,957,290,888,908,834,000
| 37.084615
| 80
| 0.646738
| false
| 3.861934
| false
| false
| false
|
zhaochl/python-utils
|
tar_file_ftp/tar_file.py
|
1
|
1991
|
#!/usr/bin/env python
# coding=utf-8
from file_util import *
from pdb import *
import commands
import urllib2
#output = os.popen('ls')
#print output.read()
#print '----------------------------'
#(status, output) = commands.getstatusoutput('ls')
#print status, output
def execute_cmd(cmd):
_result={}
(status, output) = commands.getstatusoutput(cmd)
_result['status'] = status
_result['output'] = output
return _result
def gen_ftp_sh(file_name):
_content = """
ftp -n <<- EOF
open timeplan.cn
user name password
cd /path/
bin
put {}
bye
EOF
""".format(file_name)
return _content
def gen_test_dir(dir_name):
_content="""
if [ -d {} ];then
echo "exist"
exit
else
mkdir {}
fi
""".format(dir_name,dir_name)
return _content
def main():
name_list = read_file_line('list')
content = '#!/bin/bash\n'
content_file=''
next_dir_index = 0
for index,name in enumerate(name_list):
if len(name)==1:
continue
name = name.encode('utf8','ignore')
dir_name = '_tmp_'+str(next_dir_index)
content_file +='cp /path/'+name +' '+dir_name+'/\n'
tar_name = dir_name+'.tar.gz'
if index%100==0:
f_name = '_bash_/bash_'+str(index)+'.sh'
#content+='mkdir '+dir_name+'\n'
content+=gen_test_dir(dir_name)
content+=content_file
content+="tar -zcvf "+ tar_name+' '+dir_name+'\n'
content+= gen_ftp_sh(tar_name)
content+='rm -rf '+tar_name+'\n'
content+='rm -rf '+dir_name+'\n'
content +="echo 'run at' `date +'%Y/%m/%d %H:%M:%S'`,file:"+tar_name+'\n'
content_file=''
next_dir_index = (index+100)/100
write_file(f_name,content)
content = '#!/bin/bash\n'
#if index>=2:
# break
print 'ok'
if __name__=='__main__':
#result = execute_cmd('ls')
#print result['output']
main()
|
apache-2.0
| 8,161,350,097,538,408,000
| 24.525641
| 85
| 0.530889
| false
| 3.237398
| false
| false
| false
|
tensorflow/profiler
|
plugin/tensorboard_plugin_profile/convert/trace_events_json_test.py
|
1
|
4311
|
# -*- coding: utf-8 -*-
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the Trace -> catapult JSON conversion."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import tensorflow as tf
from google.protobuf import text_format
from tensorboard_plugin_profile.convert import trace_events_json
from tensorboard_plugin_profile.protobuf import trace_events_pb2
class TraceEventsJsonStreamTest(tf.test.TestCase):
def convert(self, proto_text):
proto = trace_events_pb2.Trace()
text_format.Parse(proto_text, proto)
return json.loads(''.join(trace_events_json.TraceEventsJsonStream(proto)))
def testJsonConversion(self):
self.assertEqual(
self.convert("""
devices { key: 2 value {
name: 'D2'
device_id: 2
resources { key: 2 value {
resource_id: 2
name: 'R2.2'
} }
} }
devices { key: 1 value {
name: 'D1'
device_id: 1
resources { key: 2 value {
resource_id: 1
name: 'R1.2'
} }
} }
trace_events {
device_id: 1
resource_id: 2
name: "E1.2.1"
timestamp_ps: 100000
duration_ps: 10000
args { key: "label" value: "E1.2.1" }
args { key: "extra" value: "extra info" }
}
trace_events {
device_id: 2
resource_id: 2
name: "E2.2.1"
timestamp_ps: 105000
}
"""),
dict(
displayTimeUnit='ns',
metadata={'highres-ticks': True},
traceEvents=[
dict(
ph='M',
pid=1,
name='process_name',
args=dict(name='D1')),
dict(
ph='M',
pid=1,
name='process_sort_index',
args=dict(sort_index=1)),
dict(
ph='M',
pid=1,
tid=2,
name='thread_name',
args=dict(name='R1.2')),
dict(
ph='M',
pid=1,
tid=2,
name='thread_sort_index',
args=dict(sort_index=2)),
dict(
ph='M',
pid=2,
name='process_name',
args=dict(name='D2')),
dict(
ph='M',
pid=2,
name='process_sort_index',
args=dict(sort_index=2)),
dict(
ph='M',
pid=2,
tid=2,
name='thread_name',
args=dict(name='R2.2')),
dict(
ph='M',
pid=2,
tid=2,
name='thread_sort_index',
args=dict(sort_index=2)),
dict(
ph='X',
pid=1,
tid=2,
name='E1.2.1',
ts=0.1,
dur=0.01,
args=dict(label='E1.2.1', extra='extra info')),
dict(ph='i', pid=2, tid=2, name='E2.2.1', ts=0.105, s='t'),
{},
]))
if __name__ == '__main__':
tf.test.main()
|
apache-2.0
| 1,974,153,693,499,353,600
| 30.933333
| 80
| 0.429135
| false
| 4.453512
| true
| false
| false
|
dpawlows/MGITM
|
srcPython/gitm_3d_test.py
|
1
|
1981
|
#!/usr/bin/env python
'''
Open a GITM 3D file adn create a plot similar to the example given by Aaron.
Note that as pybats.gitm is more developed, a plot like this should be made
using syntax like,
>>>a=gitm.GitmBin('filename')
>>>a.add_alt_slice(0, 'Rho', add_cbar=True)
That's how most pybats stuff works right now.
'''
# Import shit. I needed a lot of shit this time.
import numpy as np
from spacepy.pybats import gitm
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
from matplotlib.ticker import ScalarFormatter, FormatStrFormatter
# Open file.
a=gitm.GitmBin('./3DALL_t061213_000000.bin')
# Make contour of rho at lowest altitude (index 0).
# Convert lat lon from rad to degrees.
p=180.0/np.pi
f=plt.figure() #make a fig.
ax=f.add_subplot(111) #make an ax.
# Create the contour for an altitude slice and call it 'cnt' (no jokes, please.)
# The '61' is the number of contours; you could use a vector of values to set
# levels manually if you wish. get_cmap accepts any of the color map names
# from the colormap demo pic from the Matplotlib gallery; adding '_r'
# reverses the colormap.
cnt=ax.contourf(a['Longitude'][:,:,0]*p,
p*a['Latitude'][:,:,0],
a['Rho'][:,:,0], 61, cmap=get_cmap('Spectral_r'))
# Configure axis.
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
ax.set_title(r'$\rho$ at Altitude=%5.2f$km$' % (a['Altitude'][0,0,0]/1000.0))
f.suptitle('File=%s'%(a.attrs['file']))
# Add a colorbar and set the tick format to exponential notation.
cb=plt.colorbar(cnt)
cb.formatter=FormatStrFormatter('%7.2E')
cb.update_ticks()
# Add the quivers.
ax.quiver(a['Longitude'][:,:,0]*p, p*a['Latitude'][:,:,0],
a['V!Dn!N (east)'][:,:,0],a['V!Dn!N (north)'][:,:,0])
# Draw to screen.
if plt.isinteractive():
plt.draw() #In interactive mode, you just "draw".
else:
# W/o interactive mode, "show" stops the user from typing more
# at the terminal until plots are drawn.
plt.show()
|
mit
| -5,919,587,534,596,864,000
| 32.576271
| 80
| 0.67996
| false
| 2.970015
| false
| false
| false
|
superfluidity/RDCL3D
|
code/toscaparser/elements/statefulentitytype.py
|
1
|
4045
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from toscaparser.common.exception import ExceptionCollector
from toscaparser.common.exception import InvalidTypeError
from toscaparser.elements.attribute_definition import AttributeDef
from toscaparser.elements.entity_type import EntityType
from toscaparser.elements.property_definition import PropertyDef
from toscaparser.unsupportedtype import UnsupportedType
class StatefulEntityType(EntityType):
'''Class representing TOSCA states.'''
interfaces_node_lifecycle_operations = ['create',
'configure', 'start',
'stop', 'delete']
interfaces_relationship_configure_operations = ['post_configure_source',
'post_configure_target',
'add_target',
'remove_target']
def __init__(self, entitytype, prefix, custom_def=None):
entire_entitytype = entitytype
if UnsupportedType.validate_type(entire_entitytype):
self.defs = None
else:
if entitytype.startswith(self.TOSCA + ":"):
entitytype = entitytype[(len(self.TOSCA) + 1):]
entire_entitytype = prefix + entitytype
if not entitytype.startswith(self.TOSCA):
entire_entitytype = prefix + entitytype
if entire_entitytype in list(self.TOSCA_DEF.keys()):
self.defs = self.TOSCA_DEF[entire_entitytype]
entitytype = entire_entitytype
elif custom_def and entitytype in list(custom_def.keys()):
self.defs = custom_def[entitytype]
else:
self.defs = None
ExceptionCollector.appendException(
InvalidTypeError(what=entitytype))
self.type = entitytype
def get_properties_def_objects(self):
'''Return a list of property definition objects.'''
properties = []
props = self.get_definition(self.PROPERTIES)
if props:
for prop, schema in props.items():
properties.append(PropertyDef(prop, None, schema))
return properties
def get_properties_def(self):
'''Return a dictionary of property definition name-object pairs.'''
return {prop.name: prop
for prop in self.get_properties_def_objects()}
def get_property_def_value(self, name):
'''Return the property definition associated with a given name.'''
props_def = self.get_properties_def()
if props_def and name in props_def.keys():
return props_def[name].value
def get_attributes_def_objects(self):
'''Return a list of attribute definition objects.'''
attrs = self.get_value(self.ATTRIBUTES, parent=True)
if attrs:
return [AttributeDef(attr, None, schema)
for attr, schema in attrs.items()]
return []
def get_attributes_def(self):
'''Return a dictionary of attribute definition name-object pairs.'''
return {attr.name: attr
for attr in self.get_attributes_def_objects()}
def get_attribute_def_value(self, name):
'''Return the attribute definition associated with a given name.'''
attrs_def = self.get_attributes_def()
if attrs_def and name in attrs_def.keys():
return attrs_def[name].value
|
apache-2.0
| 2,767,152,885,875,572,700
| 43.450549
| 78
| 0.616564
| false
| 4.565463
| false
| false
| false
|
lfloeer/hiprofile
|
lineprofile/utils.py
|
1
|
4109
|
import numpy as np
import itertools as it
def sample_prior(n_sampler, fitter, thermal_noise=0.023, thermal_noise_std=0.01):
"""Given a fitter object and the number of samplers, sample the prior
distribution of the fit parameters for use as the initial positions for
the walkers.
There are two exceptions:
1) The outlier fraction is only sampled on the
interval (fraction_min, fraction_min + 1), i.e. only in the lowest decade
allowed by the prior distribution.
2) The initial values for the inlier standard deviation are drawn from a
gaussian distribution determined by the parameters `thermal_noise` and
`thermal_noise_std`.
"""
def sample_components():
"""Get samples from prior on line profile"""
for component_idx in range(fitter.n_disks):
yield np.random.uniform(fitter.fint_min, fitter.fint_max, n_sampler)
yield np.random.normal(fitter.v_center_mean[component_idx],
fitter.v_center_std[component_idx],
n_sampler)
yield np.random.gamma(fitter.v_rot_k,
fitter.v_rot_theta,
n_sampler)
yield fitter.turbulence_min + np.random.gamma(fitter.turbulence_k,
fitter.turbulence_theta,
n_sampler)
yield np.random.beta(fitter.fsolid_p, fitter.fsolid_q, n_sampler)
yield 2 * np.random.beta(fitter.asym_p, fitter.asym_q, n_sampler) - 1.0
def sample_gaussians():
"""Get samples from prior on gaussians"""
for component_idx in range(fitter.n_disks, fitter.n_disks + fitter.n_gaussians):
yield np.random.uniform(fitter.fint_min, fitter.fint_max, n_sampler)
yield np.random.normal(fitter.v_center_mean[component_idx],
fitter.v_center_std[component_idx],
n_sampler)
yield np.random.uniform(fitter.gauss_disp_min, fitter.gauss_disp_max, n_sampler)
def sample_baseline():
"""Get samples from prior on baseline"""
for _ in range(fitter.n_baseline):
yield np.random.normal(0, 0.1, n_sampler)
def sample_likelihood():
"""Get samples from prior on posterior parameters"""
yield np.random.uniform(fitter.fraction_min, fitter.fraction_min + 1, n_sampler)
std_in_values = np.clip(
np.random.normal(thermal_noise, thermal_noise_std, n_sampler),
1e-6, 1e6
)
std_in_values = np.log10(std_in_values)
yield np.clip(std_in_values, fitter.std_in_min, fitter.std_in_max)
yield np.random.normal(0., fitter.mu_out_std, n_sampler)
yield np.random.uniform(fitter.std_out_min, fitter.std_out_max, n_sampler)
prior_it = it.chain(sample_components(), sample_gaussians(), sample_baseline(), sample_likelihood())
return np.array([samples for samples in prior_it]).T.copy()
def resample_position(position, n_walkers, n_dim, fitter, ball_size=1e-2):
"""Use rejection sampling to resample the walker positions"""
scale_factors = np.ones(n_dim)
scale_factors[3:6 * fitter.n_disks:6] = 10
scale_factors[2:6 * fitter.n_disks:6] = 100
scale_factors[1:6 * fitter.n_disks:6] = 10
scale_factors *= ball_size
new_positions = np.array([position + scale_factors * np.random.randn(n_dim)
for _ in xrange(n_walkers)])
valid = np.array([np.isfinite(fitter.ln_prior(p))
for p in new_positions])
for _ in xrange(20):
n_invalid = np.sum(~valid)
if n_invalid == 0:
break
new_positions[~valid] = np.array([position + ball_size * np.random.randn(n_dim)
for _ in xrange(n_invalid)])
valid[~valid] = np.array([np.isfinite(fitter.ln_prior(p))
for p in new_positions[~valid]])
return new_positions
|
mit
| -1,081,512,766,663,581,800
| 46.77907
| 104
| 0.590655
| false
| 3.64273
| false
| false
| false
|
JPinSPACE/AdventOfCode
|
day07/02_override_wire/solution.py
|
1
|
1717
|
""" Solution to the second puzzle of Day 7 on adventofcode.com
"""
import os
PARTS = {}
CACHE = {}
def compute(value):
""" Recursion is dumb.
"""
if value in CACHE:
return CACHE[value]
if value.isdigit():
return int(value)
value = PARTS[value]
if 'NOT' in value:
value_a = value.split(' ')[1]
return ~ compute(value_a)
try:
(value_a, operation, value_b) = value.split(' ')
computed_a = compute(value_a)
CACHE[value_a] = computed_a
computed_b = compute(value_b)
CACHE[value_b] = computed_b
if operation == 'AND':
computed = compute(value_a) & compute(value_b)
elif operation == 'OR':
computed = compute(value_a) | compute(value_b)
elif operation == 'LSHIFT':
computed = compute(value_a) << compute(value_b)
elif operation == 'RSHIFT':
computed = compute(value_a) >> compute(value_b)
else:
print "Topaz lied!"
return computed
except ValueError:
return compute(value)
def main():
""" Read in circuit instructions and assemble them!
"""
# pylint: disable=W0603
global CACHE
basedir = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(basedir, 'input')
with open(file_path, 'r') as input_file:
for line in input_file:
line = line.strip()
(operation, name) = line.split(' -> ')
PARTS[name] = operation
signal_a = compute('a')
CACHE = {}
PARTS['b'] = str(signal_a)
solution = compute('a')
print solution
assert solution == 14710
if __name__ == '__main__':
main()
|
mit
| 4,521,620,091,914,956,300
| 21.298701
| 62
| 0.550379
| false
| 3.660981
| false
| false
| false
|
onshape-public/onshape-clients
|
python/onshape_client/oas/models/bt_translation_request_info.py
|
1
|
6823
|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
class BTTranslationRequestInfo(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("request_state",): {"ACTIVE": "ACTIVE", "DONE": "DONE", "FAILED": "FAILED",},
}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"document_id": (str,), # noqa: E501
"failure_reason": (str,), # noqa: E501
"href": (str,), # noqa: E501
"id": (str,), # noqa: E501
"name": (str,), # noqa: E501
"request_element_id": (str,), # noqa: E501
"request_state": (str,), # noqa: E501
"result_document_id": (str,), # noqa: E501
"result_element_ids": ([str],), # noqa: E501
"result_external_data_ids": ([str],), # noqa: E501
"result_workspace_id": (str,), # noqa: E501
"version_id": (str,), # noqa: E501
"view_ref": (str,), # noqa: E501
"workspace_id": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"document_id": "documentId", # noqa: E501
"failure_reason": "failureReason", # noqa: E501
"href": "href", # noqa: E501
"id": "id", # noqa: E501
"name": "name", # noqa: E501
"request_element_id": "requestElementId", # noqa: E501
"request_state": "requestState", # noqa: E501
"result_document_id": "resultDocumentId", # noqa: E501
"result_element_ids": "resultElementIds", # noqa: E501
"result_external_data_ids": "resultExternalDataIds", # noqa: E501
"result_workspace_id": "resultWorkspaceId", # noqa: E501
"version_id": "versionId", # noqa: E501
"view_ref": "viewRef", # noqa: E501
"workspace_id": "workspaceId", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_translation_request_info.BTTranslationRequestInfo - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
document_id (str): [optional] # noqa: E501
failure_reason (str): [optional] # noqa: E501
href (str): [optional] # noqa: E501
id (str): [optional] # noqa: E501
name (str): [optional] # noqa: E501
request_element_id (str): [optional] # noqa: E501
request_state (str): [optional] # noqa: E501
result_document_id (str): [optional] # noqa: E501
result_element_ids ([str]): [optional] # noqa: E501
result_external_data_ids ([str]): [optional] # noqa: E501
result_workspace_id (str): [optional] # noqa: E501
version_id (str): [optional] # noqa: E501
view_ref (str): [optional] # noqa: E501
workspace_id (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
mit
| -7,788,842,874,007,090,000
| 36.081522
| 92
| 0.556793
| false
| 4.044458
| true
| false
| false
|
damoxc/ganeti
|
lib/opcodes.py
|
1
|
68014
|
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""OpCodes module
This module implements the data structures which define the cluster
operations - the so-called opcodes.
Every operation which modifies the cluster state is expressed via
opcodes.
"""
# this are practically structures, so disable the message about too
# few public methods:
# pylint: disable=R0903
import logging
import re
import ipaddr
from ganeti import constants
from ganeti import errors
from ganeti import ht
from ganeti import objects
from ganeti import outils
# Common opcode attributes
#: output fields for a query operation
_POutputFields = ("output_fields", ht.NoDefault, ht.TListOf(ht.TNonEmptyString),
"Selected output fields")
#: the shutdown timeout
_PShutdownTimeout = \
("shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT, ht.TNonNegativeInt,
"How long to wait for instance to shut down")
#: the force parameter
_PForce = ("force", False, ht.TBool, "Whether to force the operation")
#: a required instance name (for single-instance LUs)
_PInstanceName = ("instance_name", ht.NoDefault, ht.TNonEmptyString,
"Instance name")
#: Whether to ignore offline nodes
_PIgnoreOfflineNodes = ("ignore_offline_nodes", False, ht.TBool,
"Whether to ignore offline nodes")
#: a required node name (for single-node LUs)
_PNodeName = ("node_name", ht.NoDefault, ht.TNonEmptyString, "Node name")
#: a required node group name (for single-group LUs)
_PGroupName = ("group_name", ht.NoDefault, ht.TNonEmptyString, "Group name")
#: Migration type (live/non-live)
_PMigrationMode = ("mode", None,
ht.TMaybe(ht.TElemOf(constants.HT_MIGRATION_MODES)),
"Migration mode")
#: Obsolete 'live' migration mode (boolean)
_PMigrationLive = ("live", None, ht.TMaybeBool,
"Legacy setting for live migration, do not use")
#: Tag type
_PTagKind = ("kind", ht.NoDefault, ht.TElemOf(constants.VALID_TAG_TYPES),
"Tag kind")
#: List of tag strings
_PTags = ("tags", ht.NoDefault, ht.TListOf(ht.TNonEmptyString),
"List of tag names")
_PForceVariant = ("force_variant", False, ht.TBool,
"Whether to force an unknown OS variant")
_PWaitForSync = ("wait_for_sync", True, ht.TBool,
"Whether to wait for the disk to synchronize")
_PWaitForSyncFalse = ("wait_for_sync", False, ht.TBool,
"Whether to wait for the disk to synchronize"
" (defaults to false)")
_PIgnoreConsistency = ("ignore_consistency", False, ht.TBool,
"Whether to ignore disk consistency")
_PStorageName = ("name", ht.NoDefault, ht.TMaybeString, "Storage name")
_PUseLocking = ("use_locking", False, ht.TBool,
"Whether to use synchronization")
_PNameCheck = ("name_check", True, ht.TBool, "Whether to check name")
_PNodeGroupAllocPolicy = \
("alloc_policy", None,
ht.TMaybe(ht.TElemOf(constants.VALID_ALLOC_POLICIES)),
"Instance allocation policy")
_PGroupNodeParams = ("ndparams", None, ht.TMaybeDict,
"Default node parameters for group")
_PQueryWhat = ("what", ht.NoDefault, ht.TElemOf(constants.QR_VIA_OP),
"Resource(s) to query for")
_PEarlyRelease = ("early_release", False, ht.TBool,
"Whether to release locks as soon as possible")
_PIpCheckDoc = "Whether to ensure instance's IP address is inactive"
#: Do not remember instance state changes
_PNoRemember = ("no_remember", False, ht.TBool,
"Do not remember the state change")
#: Target node for instance migration/failover
_PMigrationTargetNode = ("target_node", None, ht.TMaybeString,
"Target node for shared-storage instances")
_PStartupPaused = ("startup_paused", False, ht.TBool,
"Pause instance at startup")
_PVerbose = ("verbose", False, ht.TBool, "Verbose mode")
# Parameters for cluster verification
_PDebugSimulateErrors = ("debug_simulate_errors", False, ht.TBool,
"Whether to simulate errors (useful for debugging)")
_PErrorCodes = ("error_codes", False, ht.TBool, "Error codes")
_PSkipChecks = ("skip_checks", ht.EmptyList,
ht.TListOf(ht.TElemOf(constants.VERIFY_OPTIONAL_CHECKS)),
"Which checks to skip")
_PIgnoreErrors = ("ignore_errors", ht.EmptyList,
ht.TListOf(ht.TElemOf(constants.CV_ALL_ECODES_STRINGS)),
"List of error codes that should be treated as warnings")
# Disk parameters
_PDiskParams = \
("diskparams", None,
ht.TMaybe(ht.TDictOf(ht.TElemOf(constants.DISK_TEMPLATES), ht.TDict)),
"Disk templates' parameter defaults")
# Parameters for node resource model
_PHvState = ("hv_state", None, ht.TMaybeDict, "Set hypervisor states")
_PDiskState = ("disk_state", None, ht.TMaybeDict, "Set disk states")
#: Opportunistic locking
_POpportunisticLocking = \
("opportunistic_locking", False, ht.TBool,
("Whether to employ opportunistic locking for nodes, meaning nodes"
" already locked by another opcode won't be considered for instance"
" allocation (only when an iallocator is used)"))
_PIgnoreIpolicy = ("ignore_ipolicy", False, ht.TBool,
"Whether to ignore ipolicy violations")
# Allow runtime changes while migrating
_PAllowRuntimeChgs = ("allow_runtime_changes", True, ht.TBool,
"Allow runtime changes (eg. memory ballooning)")
#: IAllocator field builder
_PIAllocFromDesc = lambda desc: ("iallocator", None, ht.TMaybeString, desc)
#: a required network name
_PNetworkName = ("network_name", ht.NoDefault, ht.TNonEmptyString,
"Set network name")
_PTargetGroups = \
("target_groups", None, ht.TMaybeListOf(ht.TNonEmptyString),
"Destination group names or UUIDs (defaults to \"all but current group\")")
#: OP_ID conversion regular expression
_OPID_RE = re.compile("([a-z])([A-Z])")
#: Utility function for L{OpClusterSetParams}
_TestClusterOsListItem = \
ht.TAnd(ht.TIsLength(2), ht.TItems([
ht.TElemOf(constants.DDMS_VALUES),
ht.TNonEmptyString,
]))
_TestClusterOsList = ht.TMaybeListOf(_TestClusterOsListItem)
# TODO: Generate check from constants.INIC_PARAMS_TYPES
#: Utility function for testing NIC definitions
_TestNicDef = \
ht.Comment("NIC parameters")(ht.TDictOf(ht.TElemOf(constants.INIC_PARAMS),
ht.TMaybeString))
_TSetParamsResultItemItems = [
ht.Comment("name of changed parameter")(ht.TNonEmptyString),
ht.Comment("new value")(ht.TAny),
]
_TSetParamsResult = \
ht.TListOf(ht.TAnd(ht.TIsLength(len(_TSetParamsResultItemItems)),
ht.TItems(_TSetParamsResultItemItems)))
# In the disks option we can provide arbitrary parameters too, which
# we may not be able to validate at this level, so we just check the
# format of the dict here and the checks concerning IDISK_PARAMS will
# happen at the LU level
_TDiskParams = \
ht.Comment("Disk parameters")(ht.TDictOf(ht.TNonEmptyString,
ht.TOr(ht.TNonEmptyString, ht.TInt)))
_TQueryRow = \
ht.TListOf(ht.TAnd(ht.TIsLength(2),
ht.TItems([ht.TElemOf(constants.RS_ALL),
ht.TAny])))
_TQueryResult = ht.TListOf(_TQueryRow)
_TOldQueryRow = ht.TListOf(ht.TAny)
_TOldQueryResult = ht.TListOf(_TOldQueryRow)
_SUMMARY_PREFIX = {
"CLUSTER_": "C_",
"GROUP_": "G_",
"NODE_": "N_",
"INSTANCE_": "I_",
}
#: Attribute name for dependencies
DEPEND_ATTR = "depends"
#: Attribute name for comment
COMMENT_ATTR = "comment"
def _NameToId(name):
"""Convert an opcode class name to an OP_ID.
@type name: string
@param name: the class name, as OpXxxYyy
@rtype: string
@return: the name in the OP_XXXX_YYYY format
"""
if not name.startswith("Op"):
return None
# Note: (?<=[a-z])(?=[A-Z]) would be ideal, since it wouldn't
# consume any input, and hence we would just have all the elements
# in the list, one by one; but it seems that split doesn't work on
# non-consuming input, hence we have to process the input string a
# bit
name = _OPID_RE.sub(r"\1,\2", name)
elems = name.split(",")
return "_".join(n.upper() for n in elems)
def _GenerateObjectTypeCheck(obj, fields_types):
"""Helper to generate type checks for objects.
@param obj: The object to generate type checks
@param fields_types: The fields and their types as a dict
@return: A ht type check function
"""
assert set(obj.GetAllSlots()) == set(fields_types.keys()), \
"%s != %s" % (set(obj.GetAllSlots()), set(fields_types.keys()))
return ht.TStrictDict(True, True, fields_types)
_TQueryFieldDef = \
_GenerateObjectTypeCheck(objects.QueryFieldDefinition, {
"name": ht.TNonEmptyString,
"title": ht.TNonEmptyString,
"kind": ht.TElemOf(constants.QFT_ALL),
"doc": ht.TNonEmptyString,
})
def RequireFileStorage():
"""Checks that file storage is enabled.
While it doesn't really fit into this module, L{utils} was deemed too large
of a dependency to be imported for just one or two functions.
@raise errors.OpPrereqError: when file storage is disabled
"""
if not constants.ENABLE_FILE_STORAGE:
raise errors.OpPrereqError("File storage disabled at configure time",
errors.ECODE_INVAL)
def RequireSharedFileStorage():
"""Checks that shared file storage is enabled.
While it doesn't really fit into this module, L{utils} was deemed too large
of a dependency to be imported for just one or two functions.
@raise errors.OpPrereqError: when shared file storage is disabled
"""
if not constants.ENABLE_SHARED_FILE_STORAGE:
raise errors.OpPrereqError("Shared file storage disabled at"
" configure time", errors.ECODE_INVAL)
@ht.WithDesc("CheckFileStorage")
def _CheckFileStorage(value):
"""Ensures file storage is enabled if used.
"""
if value == constants.DT_FILE:
RequireFileStorage()
elif value == constants.DT_SHARED_FILE:
RequireSharedFileStorage()
return True
def _BuildDiskTemplateCheck(accept_none):
"""Builds check for disk template.
@type accept_none: bool
@param accept_none: whether to accept None as a correct value
@rtype: callable
"""
template_check = ht.TElemOf(constants.DISK_TEMPLATES)
if accept_none:
template_check = ht.TMaybe(template_check)
return ht.TAnd(template_check, _CheckFileStorage)
def _CheckStorageType(storage_type):
"""Ensure a given storage type is valid.
"""
if storage_type not in constants.VALID_STORAGE_TYPES:
raise errors.OpPrereqError("Unknown storage type: %s" % storage_type,
errors.ECODE_INVAL)
if storage_type == constants.ST_FILE:
# TODO: What about shared file storage?
RequireFileStorage()
return True
#: Storage type parameter
_PStorageType = ("storage_type", ht.NoDefault, _CheckStorageType,
"Storage type")
@ht.WithDesc("IPv4 network")
def _CheckCIDRNetNotation(value):
"""Ensure a given CIDR notation type is valid.
"""
try:
ipaddr.IPv4Network(value)
except ipaddr.AddressValueError:
return False
return True
@ht.WithDesc("IPv4 address")
def _CheckCIDRAddrNotation(value):
"""Ensure a given CIDR notation type is valid.
"""
try:
ipaddr.IPv4Address(value)
except ipaddr.AddressValueError:
return False
return True
@ht.WithDesc("IPv6 address")
def _CheckCIDR6AddrNotation(value):
"""Ensure a given CIDR notation type is valid.
"""
try:
ipaddr.IPv6Address(value)
except ipaddr.AddressValueError:
return False
return True
@ht.WithDesc("IPv6 network")
def _CheckCIDR6NetNotation(value):
"""Ensure a given CIDR notation type is valid.
"""
try:
ipaddr.IPv6Network(value)
except ipaddr.AddressValueError:
return False
return True
_TIpAddress4 = ht.TAnd(ht.TString, _CheckCIDRAddrNotation)
_TIpAddress6 = ht.TAnd(ht.TString, _CheckCIDR6AddrNotation)
_TIpNetwork4 = ht.TAnd(ht.TString, _CheckCIDRNetNotation)
_TIpNetwork6 = ht.TAnd(ht.TString, _CheckCIDR6NetNotation)
_TMaybeAddr4List = ht.TMaybe(ht.TListOf(_TIpAddress4))
class _AutoOpParamSlots(outils.AutoSlots):
"""Meta class for opcode definitions.
"""
def __new__(mcs, name, bases, attrs):
"""Called when a class should be created.
@param mcs: The meta class
@param name: Name of created class
@param bases: Base classes
@type attrs: dict
@param attrs: Class attributes
"""
assert "OP_ID" not in attrs, "Class '%s' defining OP_ID" % name
slots = mcs._GetSlots(attrs)
assert "OP_DSC_FIELD" not in attrs or attrs["OP_DSC_FIELD"] in slots, \
"Class '%s' uses unknown field in OP_DSC_FIELD" % name
assert ("OP_DSC_FORMATTER" not in attrs or
callable(attrs["OP_DSC_FORMATTER"])), \
("Class '%s' uses non-callable in OP_DSC_FORMATTER (%s)" %
(name, type(attrs["OP_DSC_FORMATTER"])))
attrs["OP_ID"] = _NameToId(name)
return outils.AutoSlots.__new__(mcs, name, bases, attrs)
@classmethod
def _GetSlots(mcs, attrs):
"""Build the slots out of OP_PARAMS.
"""
# Always set OP_PARAMS to avoid duplicates in BaseOpCode.GetAllParams
params = attrs.setdefault("OP_PARAMS", [])
# Use parameter names as slots
return [pname for (pname, _, _, _) in params]
class BaseOpCode(outils.ValidatedSlots):
"""A simple serializable object.
This object serves as a parent class for OpCode without any custom
field handling.
"""
# pylint: disable=E1101
# as OP_ID is dynamically defined
__metaclass__ = _AutoOpParamSlots
def __getstate__(self):
"""Generic serializer.
This method just returns the contents of the instance as a
dictionary.
@rtype: C{dict}
@return: the instance attributes and their values
"""
state = {}
for name in self.GetAllSlots():
if hasattr(self, name):
state[name] = getattr(self, name)
return state
def __setstate__(self, state):
"""Generic unserializer.
This method just restores from the serialized state the attributes
of the current instance.
@param state: the serialized opcode data
@type state: C{dict}
"""
if not isinstance(state, dict):
raise ValueError("Invalid data to __setstate__: expected dict, got %s" %
type(state))
for name in self.GetAllSlots():
if name not in state and hasattr(self, name):
delattr(self, name)
for name in state:
setattr(self, name, state[name])
@classmethod
def GetAllParams(cls):
"""Compute list of all parameters for an opcode.
"""
slots = []
for parent in cls.__mro__:
slots.extend(getattr(parent, "OP_PARAMS", []))
return slots
def Validate(self, set_defaults): # pylint: disable=W0221
"""Validate opcode parameters, optionally setting default values.
@type set_defaults: bool
@param set_defaults: Whether to set default values
@raise errors.OpPrereqError: When a parameter value doesn't match
requirements
"""
for (attr_name, default, test, _) in self.GetAllParams():
assert test == ht.NoType or callable(test)
if not hasattr(self, attr_name):
if default == ht.NoDefault:
raise errors.OpPrereqError("Required parameter '%s.%s' missing" %
(self.OP_ID, attr_name),
errors.ECODE_INVAL)
elif set_defaults:
if callable(default):
dval = default()
else:
dval = default
setattr(self, attr_name, dval)
if test == ht.NoType:
# no tests here
continue
if set_defaults or hasattr(self, attr_name):
attr_val = getattr(self, attr_name)
if not test(attr_val):
logging.error("OpCode %s, parameter %s, has invalid type %s/value"
" '%s' expecting type %s",
self.OP_ID, attr_name, type(attr_val), attr_val, test)
raise errors.OpPrereqError("Parameter '%s.%s' fails validation" %
(self.OP_ID, attr_name),
errors.ECODE_INVAL)
def _BuildJobDepCheck(relative):
"""Builds check for job dependencies (L{DEPEND_ATTR}).
@type relative: bool
@param relative: Whether to accept relative job IDs (negative)
@rtype: callable
"""
if relative:
job_id = ht.TOr(ht.TJobId, ht.TRelativeJobId)
else:
job_id = ht.TJobId
job_dep = \
ht.TAnd(ht.TOr(ht.TList, ht.TTuple),
ht.TIsLength(2),
ht.TItems([job_id,
ht.TListOf(ht.TElemOf(constants.JOBS_FINALIZED))]))
return ht.TMaybeListOf(job_dep)
TNoRelativeJobDependencies = _BuildJobDepCheck(False)
#: List of submission status and job ID as returned by C{SubmitManyJobs}
_TJobIdListItem = \
ht.TAnd(ht.TIsLength(2),
ht.TItems([ht.Comment("success")(ht.TBool),
ht.Comment("Job ID if successful, error message"
" otherwise")(ht.TOr(ht.TString,
ht.TJobId))]))
TJobIdList = ht.TListOf(_TJobIdListItem)
#: Result containing only list of submitted jobs
TJobIdListOnly = ht.TStrictDict(True, True, {
constants.JOB_IDS_KEY: ht.Comment("List of submitted jobs")(TJobIdList),
})
class OpCode(BaseOpCode):
"""Abstract OpCode.
This is the root of the actual OpCode hierarchy. All clases derived
from this class should override OP_ID.
@cvar OP_ID: The ID of this opcode. This should be unique amongst all
children of this class.
@cvar OP_DSC_FIELD: The name of a field whose value will be included in the
string returned by Summary(); see the docstring of that
method for details).
@cvar OP_DSC_FORMATTER: A callable that should format the OP_DSC_FIELD; if
not present, then the field will be simply converted
to string
@cvar OP_PARAMS: List of opcode attributes, the default values they should
get if not already defined, and types they must match.
@cvar OP_RESULT: Callable to verify opcode result
@cvar WITH_LU: Boolean that specifies whether this should be included in
mcpu's dispatch table
@ivar dry_run: Whether the LU should be run in dry-run mode, i.e. just
the check steps
@ivar priority: Opcode priority for queue
"""
# pylint: disable=E1101
# as OP_ID is dynamically defined
WITH_LU = True
OP_PARAMS = [
("dry_run", None, ht.TMaybeBool, "Run checks only, don't execute"),
("debug_level", None, ht.TMaybe(ht.TNonNegativeInt), "Debug level"),
("priority", constants.OP_PRIO_DEFAULT,
ht.TElemOf(constants.OP_PRIO_SUBMIT_VALID), "Opcode priority"),
(DEPEND_ATTR, None, _BuildJobDepCheck(True),
"Job dependencies; if used through ``SubmitManyJobs`` relative (negative)"
" job IDs can be used; see :doc:`design document <design-chained-jobs>`"
" for details"),
(COMMENT_ATTR, None, ht.TMaybeString,
"Comment describing the purpose of the opcode"),
]
OP_RESULT = None
def __getstate__(self):
"""Specialized getstate for opcodes.
This method adds to the state dictionary the OP_ID of the class,
so that on unload we can identify the correct class for
instantiating the opcode.
@rtype: C{dict}
@return: the state as a dictionary
"""
data = BaseOpCode.__getstate__(self)
data["OP_ID"] = self.OP_ID
return data
@classmethod
def LoadOpCode(cls, data):
"""Generic load opcode method.
The method identifies the correct opcode class from the dict-form
by looking for a OP_ID key, if this is not found, or its value is
not available in this module as a child of this class, we fail.
@type data: C{dict}
@param data: the serialized opcode
"""
if not isinstance(data, dict):
raise ValueError("Invalid data to LoadOpCode (%s)" % type(data))
if "OP_ID" not in data:
raise ValueError("Invalid data to LoadOpcode, missing OP_ID")
op_id = data["OP_ID"]
op_class = None
if op_id in OP_MAPPING:
op_class = OP_MAPPING[op_id]
else:
raise ValueError("Invalid data to LoadOpCode: OP_ID %s unsupported" %
op_id)
op = op_class()
new_data = data.copy()
del new_data["OP_ID"]
op.__setstate__(new_data)
return op
def Summary(self):
"""Generates a summary description of this opcode.
The summary is the value of the OP_ID attribute (without the "OP_"
prefix), plus the value of the OP_DSC_FIELD attribute, if one was
defined; this field should allow to easily identify the operation
(for an instance creation job, e.g., it would be the instance
name).
"""
assert self.OP_ID is not None and len(self.OP_ID) > 3
# all OP_ID start with OP_, we remove that
txt = self.OP_ID[3:]
field_name = getattr(self, "OP_DSC_FIELD", None)
if field_name:
field_value = getattr(self, field_name, None)
field_formatter = getattr(self, "OP_DSC_FORMATTER", None)
if callable(field_formatter):
field_value = field_formatter(field_value)
elif isinstance(field_value, (list, tuple)):
field_value = ",".join(str(i) for i in field_value)
txt = "%s(%s)" % (txt, field_value)
return txt
def TinySummary(self):
"""Generates a compact summary description of the opcode.
"""
assert self.OP_ID.startswith("OP_")
text = self.OP_ID[3:]
for (prefix, supplement) in _SUMMARY_PREFIX.items():
if text.startswith(prefix):
return supplement + text[len(prefix):]
return text
# cluster opcodes
class OpClusterPostInit(OpCode):
"""Post cluster initialization.
This opcode does not touch the cluster at all. Its purpose is to run hooks
after the cluster has been initialized.
"""
OP_RESULT = ht.TBool
class OpClusterDestroy(OpCode):
"""Destroy the cluster.
This opcode has no other parameters. All the state is irreversibly
lost after the execution of this opcode.
"""
OP_RESULT = ht.TNonEmptyString
class OpClusterQuery(OpCode):
"""Query cluster information."""
OP_RESULT = ht.TDictOf(ht.TNonEmptyString, ht.TAny)
class OpClusterVerify(OpCode):
"""Submits all jobs necessary to verify the cluster.
"""
OP_PARAMS = [
_PDebugSimulateErrors,
_PErrorCodes,
_PSkipChecks,
_PIgnoreErrors,
_PVerbose,
("group_name", None, ht.TMaybeString, "Group to verify"),
]
OP_RESULT = TJobIdListOnly
class OpClusterVerifyConfig(OpCode):
"""Verify the cluster config.
"""
OP_PARAMS = [
_PDebugSimulateErrors,
_PErrorCodes,
_PIgnoreErrors,
_PVerbose,
]
OP_RESULT = ht.TBool
class OpClusterVerifyGroup(OpCode):
"""Run verify on a node group from the cluster.
@type skip_checks: C{list}
@ivar skip_checks: steps to be skipped from the verify process; this
needs to be a subset of
L{constants.VERIFY_OPTIONAL_CHECKS}; currently
only L{constants.VERIFY_NPLUSONE_MEM} can be passed
"""
OP_DSC_FIELD = "group_name"
OP_PARAMS = [
_PGroupName,
_PDebugSimulateErrors,
_PErrorCodes,
_PSkipChecks,
_PIgnoreErrors,
_PVerbose,
]
OP_RESULT = ht.TBool
class OpClusterVerifyDisks(OpCode):
"""Verify the cluster disks.
"""
OP_RESULT = TJobIdListOnly
class OpGroupVerifyDisks(OpCode):
"""Verifies the status of all disks in a node group.
Result: a tuple of three elements:
- dict of node names with issues (values: error msg)
- list of instances with degraded disks (that should be activated)
- dict of instances with missing logical volumes (values: (node, vol)
pairs with details about the missing volumes)
In normal operation, all lists should be empty. A non-empty instance
list (3rd element of the result) is still ok (errors were fixed) but
non-empty node list means some node is down, and probably there are
unfixable drbd errors.
Note that only instances that are drbd-based are taken into
consideration. This might need to be revisited in the future.
"""
OP_DSC_FIELD = "group_name"
OP_PARAMS = [
_PGroupName,
]
OP_RESULT = \
ht.TAnd(ht.TIsLength(3),
ht.TItems([ht.TDictOf(ht.TString, ht.TString),
ht.TListOf(ht.TString),
ht.TDictOf(ht.TString,
ht.TListOf(ht.TListOf(ht.TString)))]))
class OpClusterRepairDiskSizes(OpCode):
"""Verify the disk sizes of the instances and fixes configuration
mimatches.
Parameters: optional instances list, in case we want to restrict the
checks to only a subset of the instances.
Result: a list of tuples, (instance, disk, new-size) for changed
configurations.
In normal operation, the list should be empty.
@type instances: list
@ivar instances: the list of instances to check, or empty for all instances
"""
OP_PARAMS = [
("instances", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), None),
]
OP_RESULT = ht.TListOf(ht.TAnd(ht.TIsLength(3),
ht.TItems([ht.TNonEmptyString,
ht.TNonNegativeInt,
ht.TNonNegativeInt])))
class OpClusterConfigQuery(OpCode):
"""Query cluster configuration values."""
OP_PARAMS = [
_POutputFields,
]
OP_RESULT = ht.TListOf(ht.TAny)
class OpClusterRename(OpCode):
"""Rename the cluster.
@type name: C{str}
@ivar name: The new name of the cluster. The name and/or the master IP
address will be changed to match the new name and its IP
address.
"""
OP_DSC_FIELD = "name"
OP_PARAMS = [
("name", ht.NoDefault, ht.TNonEmptyString, None),
]
OP_RESULT = ht.TNonEmptyString
class OpClusterSetParams(OpCode):
"""Change the parameters of the cluster.
@type vg_name: C{str} or C{None}
@ivar vg_name: The new volume group name or None to disable LVM usage.
"""
OP_PARAMS = [
_PHvState,
_PDiskState,
("vg_name", None, ht.TMaybe(ht.TString), "Volume group name"),
("enabled_hypervisors", None,
ht.TMaybe(ht.TAnd(ht.TListOf(ht.TElemOf(constants.HYPER_TYPES)),
ht.TTrue)),
"List of enabled hypervisors"),
("hvparams", None,
ht.TMaybe(ht.TDictOf(ht.TNonEmptyString, ht.TDict)),
"Cluster-wide hypervisor parameter defaults, hypervisor-dependent"),
("beparams", None, ht.TMaybeDict,
"Cluster-wide backend parameter defaults"),
("os_hvp", None, ht.TMaybe(ht.TDictOf(ht.TNonEmptyString, ht.TDict)),
"Cluster-wide per-OS hypervisor parameter defaults"),
("osparams", None,
ht.TMaybe(ht.TDictOf(ht.TNonEmptyString, ht.TDict)),
"Cluster-wide OS parameter defaults"),
_PDiskParams,
("candidate_pool_size", None, ht.TMaybe(ht.TPositiveInt),
"Master candidate pool size"),
("uid_pool", None, ht.NoType,
"Set UID pool, must be list of lists describing UID ranges (two items,"
" start and end inclusive)"),
("add_uids", None, ht.NoType,
"Extend UID pool, must be list of lists describing UID ranges (two"
" items, start and end inclusive) to be added"),
("remove_uids", None, ht.NoType,
"Shrink UID pool, must be list of lists describing UID ranges (two"
" items, start and end inclusive) to be removed"),
("maintain_node_health", None, ht.TMaybeBool,
"Whether to automatically maintain node health"),
("prealloc_wipe_disks", None, ht.TMaybeBool,
"Whether to wipe disks before allocating them to instances"),
("nicparams", None, ht.TMaybeDict, "Cluster-wide NIC parameter defaults"),
("ndparams", None, ht.TMaybeDict, "Cluster-wide node parameter defaults"),
("ipolicy", None, ht.TMaybeDict,
"Cluster-wide :ref:`instance policy <rapi-ipolicy>` specs"),
("drbd_helper", None, ht.TMaybe(ht.TString), "DRBD helper program"),
("default_iallocator", None, ht.TMaybe(ht.TString),
"Default iallocator for cluster"),
("master_netdev", None, ht.TMaybe(ht.TString),
"Master network device"),
("master_netmask", None, ht.TMaybe(ht.TNonNegativeInt),
"Netmask of the master IP"),
("reserved_lvs", None, ht.TMaybeListOf(ht.TNonEmptyString),
"List of reserved LVs"),
("hidden_os", None, _TestClusterOsList,
"Modify list of hidden operating systems: each modification must have"
" two items, the operation and the OS name; the operation can be"
" ``%s`` or ``%s``" % (constants.DDM_ADD, constants.DDM_REMOVE)),
("blacklisted_os", None, _TestClusterOsList,
"Modify list of blacklisted operating systems: each modification must"
" have two items, the operation and the OS name; the operation can be"
" ``%s`` or ``%s``" % (constants.DDM_ADD, constants.DDM_REMOVE)),
("use_external_mip_script", None, ht.TMaybeBool,
"Whether to use an external master IP address setup script"),
]
OP_RESULT = ht.TNone
class OpClusterRedistConf(OpCode):
"""Force a full push of the cluster configuration.
"""
OP_RESULT = ht.TNone
class OpClusterActivateMasterIp(OpCode):
"""Activate the master IP on the master node.
"""
OP_RESULT = ht.TNone
class OpClusterDeactivateMasterIp(OpCode):
"""Deactivate the master IP on the master node.
"""
OP_RESULT = ht.TNone
class OpQuery(OpCode):
"""Query for resources/items.
@ivar what: Resources to query for, must be one of L{constants.QR_VIA_OP}
@ivar fields: List of fields to retrieve
@ivar qfilter: Query filter
"""
OP_DSC_FIELD = "what"
OP_PARAMS = [
_PQueryWhat,
_PUseLocking,
("fields", ht.NoDefault, ht.TListOf(ht.TNonEmptyString),
"Requested fields"),
("qfilter", None, ht.TMaybe(ht.TList),
"Query filter"),
]
OP_RESULT = \
_GenerateObjectTypeCheck(objects.QueryResponse, {
"fields": ht.TListOf(_TQueryFieldDef),
"data": _TQueryResult,
})
class OpQueryFields(OpCode):
"""Query for available resource/item fields.
@ivar what: Resources to query for, must be one of L{constants.QR_VIA_OP}
@ivar fields: List of fields to retrieve
"""
OP_DSC_FIELD = "what"
OP_PARAMS = [
_PQueryWhat,
("fields", None, ht.TMaybeListOf(ht.TNonEmptyString),
"Requested fields; if not given, all are returned"),
]
OP_RESULT = \
_GenerateObjectTypeCheck(objects.QueryFieldsResponse, {
"fields": ht.TListOf(_TQueryFieldDef),
})
class OpOobCommand(OpCode):
"""Interact with OOB."""
OP_PARAMS = [
("node_names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"List of nodes to run the OOB command against"),
("command", ht.NoDefault, ht.TElemOf(constants.OOB_COMMANDS),
"OOB command to be run"),
("timeout", constants.OOB_TIMEOUT, ht.TInt,
"Timeout before the OOB helper will be terminated"),
("ignore_status", False, ht.TBool,
"Ignores the node offline status for power off"),
("power_delay", constants.OOB_POWER_DELAY, ht.TNonNegativeFloat,
"Time in seconds to wait between powering on nodes"),
]
# Fixme: Make it more specific with all the special cases in LUOobCommand
OP_RESULT = _TQueryResult
class OpRestrictedCommand(OpCode):
"""Runs a restricted command on node(s).
"""
OP_PARAMS = [
_PUseLocking,
("nodes", ht.NoDefault, ht.TListOf(ht.TNonEmptyString),
"Nodes on which the command should be run (at least one)"),
("command", ht.NoDefault, ht.TNonEmptyString,
"Command name (no parameters)"),
]
_RESULT_ITEMS = [
ht.Comment("success")(ht.TBool),
ht.Comment("output or error message")(ht.TString),
]
OP_RESULT = \
ht.TListOf(ht.TAnd(ht.TIsLength(len(_RESULT_ITEMS)),
ht.TItems(_RESULT_ITEMS)))
# node opcodes
class OpNodeRemove(OpCode):
"""Remove a node.
@type node_name: C{str}
@ivar node_name: The name of the node to remove. If the node still has
instances on it, the operation will fail.
"""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
]
OP_RESULT = ht.TNone
class OpNodeAdd(OpCode):
"""Add a node to the cluster.
@type node_name: C{str}
@ivar node_name: The name of the node to add. This can be a short name,
but it will be expanded to the FQDN.
@type primary_ip: IP address
@ivar primary_ip: The primary IP of the node. This will be ignored when the
opcode is submitted, but will be filled during the node
add (so it will be visible in the job query).
@type secondary_ip: IP address
@ivar secondary_ip: The secondary IP of the node. This needs to be passed
if the cluster has been initialized in 'dual-network'
mode, otherwise it must not be given.
@type readd: C{bool}
@ivar readd: Whether to re-add an existing node to the cluster. If
this is not passed, then the operation will abort if the node
name is already in the cluster; use this parameter to 'repair'
a node that had its configuration broken, or was reinstalled
without removal from the cluster.
@type group: C{str}
@ivar group: The node group to which this node will belong.
@type vm_capable: C{bool}
@ivar vm_capable: The vm_capable node attribute
@type master_capable: C{bool}
@ivar master_capable: The master_capable node attribute
"""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
_PHvState,
_PDiskState,
("primary_ip", None, ht.NoType, "Primary IP address"),
("secondary_ip", None, ht.TMaybeString, "Secondary IP address"),
("readd", False, ht.TBool, "Whether node is re-added to cluster"),
("group", None, ht.TMaybeString, "Initial node group"),
("master_capable", None, ht.TMaybeBool,
"Whether node can become master or master candidate"),
("vm_capable", None, ht.TMaybeBool,
"Whether node can host instances"),
("ndparams", None, ht.TMaybeDict, "Node parameters"),
]
OP_RESULT = ht.TNone
class OpNodeQuery(OpCode):
"""Compute the list of nodes."""
OP_PARAMS = [
_POutputFields,
_PUseLocking,
("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Empty list to query all nodes, node names otherwise"),
]
OP_RESULT = _TOldQueryResult
class OpNodeQueryvols(OpCode):
"""Get list of volumes on node."""
OP_PARAMS = [
_POutputFields,
("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Empty list to query all nodes, node names otherwise"),
]
OP_RESULT = ht.TListOf(ht.TAny)
class OpNodeQueryStorage(OpCode):
"""Get information on storage for node(s)."""
OP_PARAMS = [
_POutputFields,
_PStorageType,
("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "List of nodes"),
("name", None, ht.TMaybeString, "Storage name"),
]
OP_RESULT = _TOldQueryResult
class OpNodeModifyStorage(OpCode):
"""Modifies the properies of a storage unit"""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
_PStorageType,
_PStorageName,
("changes", ht.NoDefault, ht.TDict, "Requested changes"),
]
OP_RESULT = ht.TNone
class OpRepairNodeStorage(OpCode):
"""Repairs the volume group on a node."""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
_PStorageType,
_PStorageName,
_PIgnoreConsistency,
]
OP_RESULT = ht.TNone
class OpNodeSetParams(OpCode):
"""Change the parameters of a node."""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
_PForce,
_PHvState,
_PDiskState,
("master_candidate", None, ht.TMaybeBool,
"Whether the node should become a master candidate"),
("offline", None, ht.TMaybeBool,
"Whether the node should be marked as offline"),
("drained", None, ht.TMaybeBool,
"Whether the node should be marked as drained"),
("auto_promote", False, ht.TBool,
"Whether node(s) should be promoted to master candidate if necessary"),
("master_capable", None, ht.TMaybeBool,
"Denote whether node can become master or master candidate"),
("vm_capable", None, ht.TMaybeBool,
"Denote whether node can host instances"),
("secondary_ip", None, ht.TMaybeString,
"Change node's secondary IP address"),
("ndparams", None, ht.TMaybeDict, "Set node parameters"),
("powered", None, ht.TMaybeBool,
"Whether the node should be marked as powered"),
]
OP_RESULT = _TSetParamsResult
class OpNodePowercycle(OpCode):
"""Tries to powercycle a node."""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
_PForce,
]
OP_RESULT = ht.TMaybeString
class OpNodeMigrate(OpCode):
"""Migrate all instances from a node."""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
_PMigrationMode,
_PMigrationLive,
_PMigrationTargetNode,
_PAllowRuntimeChgs,
_PIgnoreIpolicy,
_PIAllocFromDesc("Iallocator for deciding the target node"
" for shared-storage instances"),
]
OP_RESULT = TJobIdListOnly
class OpNodeEvacuate(OpCode):
"""Evacuate instances off a number of nodes."""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PEarlyRelease,
_PNodeName,
("remote_node", None, ht.TMaybeString, "New secondary node"),
_PIAllocFromDesc("Iallocator for computing solution"),
("mode", ht.NoDefault, ht.TElemOf(constants.NODE_EVAC_MODES),
"Node evacuation mode"),
]
OP_RESULT = TJobIdListOnly
# instance opcodes
class OpInstanceCreate(OpCode):
"""Create an instance.
@ivar instance_name: Instance name
@ivar mode: Instance creation mode (one of L{constants.INSTANCE_CREATE_MODES})
@ivar source_handshake: Signed handshake from source (remote import only)
@ivar source_x509_ca: Source X509 CA in PEM format (remote import only)
@ivar source_instance_name: Previous name of instance (remote import only)
@ivar source_shutdown_timeout: Shutdown timeout used for source instance
(remote import only)
"""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PForceVariant,
_PWaitForSync,
_PNameCheck,
_PIgnoreIpolicy,
_POpportunisticLocking,
("beparams", ht.EmptyDict, ht.TDict, "Backend parameters for instance"),
("disks", ht.NoDefault, ht.TListOf(_TDiskParams),
"Disk descriptions, for example ``[{\"%s\": 100}, {\"%s\": 5}]``;"
" each disk definition must contain a ``%s`` value and"
" can contain an optional ``%s`` value denoting the disk access mode"
" (%s)" %
(constants.IDISK_SIZE, constants.IDISK_SIZE, constants.IDISK_SIZE,
constants.IDISK_MODE,
" or ".join("``%s``" % i for i in sorted(constants.DISK_ACCESS_SET)))),
("disk_template", ht.NoDefault, _BuildDiskTemplateCheck(True),
"Disk template"),
("file_driver", None, ht.TMaybe(ht.TElemOf(constants.FILE_DRIVER)),
"Driver for file-backed disks"),
("file_storage_dir", None, ht.TMaybeString,
"Directory for storing file-backed disks"),
("hvparams", ht.EmptyDict, ht.TDict,
"Hypervisor parameters for instance, hypervisor-dependent"),
("hypervisor", None, ht.TMaybeString, "Hypervisor"),
_PIAllocFromDesc("Iallocator for deciding which node(s) to use"),
("identify_defaults", False, ht.TBool,
"Reset instance parameters to default if equal"),
("ip_check", True, ht.TBool, _PIpCheckDoc),
("conflicts_check", True, ht.TBool, "Check for conflicting IPs"),
("mode", ht.NoDefault, ht.TElemOf(constants.INSTANCE_CREATE_MODES),
"Instance creation mode"),
("nics", ht.NoDefault, ht.TListOf(_TestNicDef),
"List of NIC (network interface) definitions, for example"
" ``[{}, {}, {\"%s\": \"198.51.100.4\"}]``; each NIC definition can"
" contain the optional values %s" %
(constants.INIC_IP,
", ".join("``%s``" % i for i in sorted(constants.INIC_PARAMS)))),
("no_install", None, ht.TMaybeBool,
"Do not install the OS (will disable automatic start)"),
("osparams", ht.EmptyDict, ht.TDict, "OS parameters for instance"),
("os_type", None, ht.TMaybeString, "Operating system"),
("pnode", None, ht.TMaybeString, "Primary node"),
("snode", None, ht.TMaybeString, "Secondary node"),
("source_handshake", None, ht.TMaybe(ht.TList),
"Signed handshake from source (remote import only)"),
("source_instance_name", None, ht.TMaybeString,
"Source instance name (remote import only)"),
("source_shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT,
ht.TNonNegativeInt,
"How long source instance was given to shut down (remote import only)"),
("source_x509_ca", None, ht.TMaybeString,
"Source X509 CA in PEM format (remote import only)"),
("src_node", None, ht.TMaybeString, "Source node for import"),
("src_path", None, ht.TMaybeString, "Source directory for import"),
("start", True, ht.TBool, "Whether to start instance after creation"),
("tags", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "Instance tags"),
]
OP_RESULT = ht.Comment("instance nodes")(ht.TListOf(ht.TNonEmptyString))
class OpInstanceMultiAlloc(OpCode):
"""Allocates multiple instances.
"""
OP_PARAMS = [
_POpportunisticLocking,
_PIAllocFromDesc("Iallocator used to allocate all the instances"),
("instances", ht.EmptyList, ht.TListOf(ht.TInstanceOf(OpInstanceCreate)),
"List of instance create opcodes describing the instances to allocate"),
]
_JOB_LIST = ht.Comment("List of submitted jobs")(TJobIdList)
ALLOCATABLE_KEY = "allocatable"
FAILED_KEY = "allocatable"
OP_RESULT = ht.TStrictDict(True, True, {
constants.JOB_IDS_KEY: _JOB_LIST,
ALLOCATABLE_KEY: ht.TListOf(ht.TNonEmptyString),
FAILED_KEY: ht.TListOf(ht.TNonEmptyString),
})
def __getstate__(self):
"""Generic serializer.
"""
state = OpCode.__getstate__(self)
if hasattr(self, "instances"):
# pylint: disable=E1101
state["instances"] = [inst.__getstate__() for inst in self.instances]
return state
def __setstate__(self, state):
"""Generic unserializer.
This method just restores from the serialized state the attributes
of the current instance.
@param state: the serialized opcode data
@type state: C{dict}
"""
if not isinstance(state, dict):
raise ValueError("Invalid data to __setstate__: expected dict, got %s" %
type(state))
if "instances" in state:
state["instances"] = map(OpCode.LoadOpCode, state["instances"])
return OpCode.__setstate__(self, state)
def Validate(self, set_defaults):
"""Validates this opcode.
We do this recursively.
"""
OpCode.Validate(self, set_defaults)
for inst in self.instances: # pylint: disable=E1101
inst.Validate(set_defaults)
class OpInstanceReinstall(OpCode):
"""Reinstall an instance's OS."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PForceVariant,
("os_type", None, ht.TMaybeString, "Instance operating system"),
("osparams", None, ht.TMaybeDict, "Temporary OS parameters"),
]
OP_RESULT = ht.TNone
class OpInstanceRemove(OpCode):
"""Remove an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PShutdownTimeout,
("ignore_failures", False, ht.TBool,
"Whether to ignore failures during removal"),
]
OP_RESULT = ht.TNone
class OpInstanceRename(OpCode):
"""Rename an instance."""
OP_PARAMS = [
_PInstanceName,
_PNameCheck,
("new_name", ht.NoDefault, ht.TNonEmptyString, "New instance name"),
("ip_check", False, ht.TBool, _PIpCheckDoc),
]
OP_RESULT = ht.Comment("New instance name")(ht.TNonEmptyString)
class OpInstanceStartup(OpCode):
"""Startup an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PForce,
_PIgnoreOfflineNodes,
("hvparams", ht.EmptyDict, ht.TDict,
"Temporary hypervisor parameters, hypervisor-dependent"),
("beparams", ht.EmptyDict, ht.TDict, "Temporary backend parameters"),
_PNoRemember,
_PStartupPaused,
]
OP_RESULT = ht.TNone
class OpInstanceShutdown(OpCode):
"""Shutdown an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PForce,
_PIgnoreOfflineNodes,
("timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT, ht.TNonNegativeInt,
"How long to wait for instance to shut down"),
_PNoRemember,
]
OP_RESULT = ht.TNone
class OpInstanceReboot(OpCode):
"""Reboot an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PShutdownTimeout,
("ignore_secondaries", False, ht.TBool,
"Whether to start the instance even if secondary disks are failing"),
("reboot_type", ht.NoDefault, ht.TElemOf(constants.REBOOT_TYPES),
"How to reboot instance"),
("reason", (constants.INSTANCE_REASON_SOURCE_UNKNOWN, None),
ht.TAnd(ht.TIsLength(2),
ht.TItems([
ht.TElemOf(constants.INSTANCE_REASON_SOURCES),
ht.TMaybeString,
])),
"The reason why the reboot is happening"),
]
OP_RESULT = ht.TNone
class OpInstanceReplaceDisks(OpCode):
"""Replace the disks of an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PEarlyRelease,
_PIgnoreIpolicy,
("mode", ht.NoDefault, ht.TElemOf(constants.REPLACE_MODES),
"Replacement mode"),
("disks", ht.EmptyList, ht.TListOf(ht.TNonNegativeInt),
"Disk indexes"),
("remote_node", None, ht.TMaybeString, "New secondary node"),
_PIAllocFromDesc("Iallocator for deciding new secondary node"),
]
OP_RESULT = ht.TNone
class OpInstanceFailover(OpCode):
"""Failover an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PShutdownTimeout,
_PIgnoreConsistency,
_PMigrationTargetNode,
_PIgnoreIpolicy,
_PIAllocFromDesc("Iallocator for deciding the target node for"
" shared-storage instances"),
]
OP_RESULT = ht.TNone
class OpInstanceMigrate(OpCode):
"""Migrate an instance.
This migrates (without shutting down an instance) to its secondary
node.
@ivar instance_name: the name of the instance
@ivar mode: the migration mode (live, non-live or None for auto)
"""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PMigrationMode,
_PMigrationLive,
_PMigrationTargetNode,
_PAllowRuntimeChgs,
_PIgnoreIpolicy,
("cleanup", False, ht.TBool,
"Whether a previously failed migration should be cleaned up"),
_PIAllocFromDesc("Iallocator for deciding the target node for"
" shared-storage instances"),
("allow_failover", False, ht.TBool,
"Whether we can fallback to failover if migration is not possible"),
]
OP_RESULT = ht.TNone
class OpInstanceMove(OpCode):
"""Move an instance.
This move (with shutting down an instance and data copying) to an
arbitrary node.
@ivar instance_name: the name of the instance
@ivar target_node: the destination node
"""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PShutdownTimeout,
_PIgnoreIpolicy,
("target_node", ht.NoDefault, ht.TNonEmptyString, "Target node"),
_PIgnoreConsistency,
]
OP_RESULT = ht.TNone
class OpInstanceConsole(OpCode):
"""Connect to an instance's console."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
]
OP_RESULT = ht.TDict
class OpInstanceActivateDisks(OpCode):
"""Activate an instance's disks."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
("ignore_size", False, ht.TBool, "Whether to ignore recorded size"),
_PWaitForSyncFalse,
]
OP_RESULT = ht.TListOf(ht.TAnd(ht.TIsLength(3),
ht.TItems([ht.TNonEmptyString,
ht.TNonEmptyString,
ht.TNonEmptyString])))
class OpInstanceDeactivateDisks(OpCode):
"""Deactivate an instance's disks."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PForce,
]
OP_RESULT = ht.TNone
class OpInstanceRecreateDisks(OpCode):
"""Recreate an instance's disks."""
_TDiskChanges = \
ht.TAnd(ht.TIsLength(2),
ht.TItems([ht.Comment("Disk index")(ht.TNonNegativeInt),
ht.Comment("Parameters")(_TDiskParams)]))
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
("disks", ht.EmptyList,
ht.TOr(ht.TListOf(ht.TNonNegativeInt), ht.TListOf(_TDiskChanges)),
"List of disk indexes (deprecated) or a list of tuples containing a disk"
" index and a possibly empty dictionary with disk parameter changes"),
("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"New instance nodes, if relocation is desired"),
_PIAllocFromDesc("Iallocator for deciding new nodes"),
]
OP_RESULT = ht.TNone
class OpInstanceQuery(OpCode):
"""Compute the list of instances."""
OP_PARAMS = [
_POutputFields,
_PUseLocking,
("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Empty list to query all instances, instance names otherwise"),
]
OP_RESULT = _TOldQueryResult
class OpInstanceQueryData(OpCode):
"""Compute the run-time status of instances."""
OP_PARAMS = [
_PUseLocking,
("instances", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Instance names"),
("static", False, ht.TBool,
"Whether to only return configuration data without querying"
" nodes"),
]
OP_RESULT = ht.TDictOf(ht.TNonEmptyString, ht.TDict)
def _TestInstSetParamsModList(fn):
"""Generates a check for modification lists.
"""
# Old format
# TODO: Remove in version 2.8 including support in LUInstanceSetParams
old_mod_item_fn = \
ht.TAnd(ht.TIsLength(2), ht.TItems([
ht.TOr(ht.TElemOf(constants.DDMS_VALUES), ht.TNonNegativeInt),
fn,
]))
# New format, supporting adding/removing disks/NICs at arbitrary indices
mod_item_fn = \
ht.TAnd(ht.TIsLength(3), ht.TItems([
ht.TElemOf(constants.DDMS_VALUES_WITH_MODIFY),
ht.Comment("Disk index, can be negative, e.g. -1 for last disk")(ht.TInt),
fn,
]))
return ht.TOr(ht.Comment("Recommended")(ht.TListOf(mod_item_fn)),
ht.Comment("Deprecated")(ht.TListOf(old_mod_item_fn)))
class OpInstanceSetParams(OpCode):
"""Change the parameters of an instance.
"""
TestNicModifications = _TestInstSetParamsModList(_TestNicDef)
TestDiskModifications = _TestInstSetParamsModList(_TDiskParams)
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PForce,
_PForceVariant,
_PIgnoreIpolicy,
("nics", ht.EmptyList, TestNicModifications,
"List of NIC changes: each item is of the form ``(op, index, settings)``,"
" ``op`` is one of ``%s``, ``%s`` or ``%s``, ``index`` can be either -1"
" to refer to the last position, or a zero-based index number; a"
" deprecated version of this parameter used the form ``(op, settings)``,"
" where ``op`` can be ``%s`` to add a new NIC with the specified"
" settings, ``%s`` to remove the last NIC or a number to modify the"
" settings of the NIC with that index" %
(constants.DDM_ADD, constants.DDM_MODIFY, constants.DDM_REMOVE,
constants.DDM_ADD, constants.DDM_REMOVE)),
("disks", ht.EmptyList, TestDiskModifications,
"List of disk changes; see ``nics``"),
("beparams", ht.EmptyDict, ht.TDict, "Per-instance backend parameters"),
("runtime_mem", None, ht.TMaybePositiveInt, "New runtime memory"),
("hvparams", ht.EmptyDict, ht.TDict,
"Per-instance hypervisor parameters, hypervisor-dependent"),
("disk_template", None, ht.TMaybe(_BuildDiskTemplateCheck(False)),
"Disk template for instance"),
("remote_node", None, ht.TMaybeString,
"Secondary node (used when changing disk template)"),
("os_name", None, ht.TMaybeString,
"Change the instance's OS without reinstalling the instance"),
("osparams", None, ht.TMaybeDict, "Per-instance OS parameters"),
("wait_for_sync", True, ht.TBool,
"Whether to wait for the disk to synchronize, when changing template"),
("offline", None, ht.TMaybeBool, "Whether to mark instance as offline"),
("conflicts_check", True, ht.TBool, "Check for conflicting IPs"),
]
OP_RESULT = _TSetParamsResult
class OpInstanceGrowDisk(OpCode):
"""Grow a disk of an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PWaitForSync,
("disk", ht.NoDefault, ht.TInt, "Disk index"),
("amount", ht.NoDefault, ht.TNonNegativeInt,
"Amount of disk space to add (megabytes)"),
("absolute", False, ht.TBool,
"Whether the amount parameter is an absolute target or a relative one"),
]
OP_RESULT = ht.TNone
class OpInstanceChangeGroup(OpCode):
"""Moves an instance to another node group."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PEarlyRelease,
_PIAllocFromDesc("Iallocator for computing solution"),
_PTargetGroups,
]
OP_RESULT = TJobIdListOnly
# Node group opcodes
class OpGroupAdd(OpCode):
"""Add a node group to the cluster."""
OP_DSC_FIELD = "group_name"
OP_PARAMS = [
_PGroupName,
_PNodeGroupAllocPolicy,
_PGroupNodeParams,
_PDiskParams,
_PHvState,
_PDiskState,
("ipolicy", None, ht.TMaybeDict,
"Group-wide :ref:`instance policy <rapi-ipolicy>` specs"),
]
OP_RESULT = ht.TNone
class OpGroupAssignNodes(OpCode):
"""Assign nodes to a node group."""
OP_DSC_FIELD = "group_name"
OP_PARAMS = [
_PGroupName,
_PForce,
("nodes", ht.NoDefault, ht.TListOf(ht.TNonEmptyString),
"List of nodes to assign"),
]
OP_RESULT = ht.TNone
class OpGroupQuery(OpCode):
"""Compute the list of node groups."""
OP_PARAMS = [
_POutputFields,
("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Empty list to query all groups, group names otherwise"),
]
OP_RESULT = _TOldQueryResult
class OpGroupSetParams(OpCode):
"""Change the parameters of a node group."""
OP_DSC_FIELD = "group_name"
OP_PARAMS = [
_PGroupName,
_PNodeGroupAllocPolicy,
_PGroupNodeParams,
_PDiskParams,
_PHvState,
_PDiskState,
("ipolicy", None, ht.TMaybeDict, "Group-wide instance policy specs"),
]
OP_RESULT = _TSetParamsResult
class OpGroupRemove(OpCode):
"""Remove a node group from the cluster."""
OP_DSC_FIELD = "group_name"
OP_PARAMS = [
_PGroupName,
]
OP_RESULT = ht.TNone
class OpGroupRename(OpCode):
"""Rename a node group in the cluster."""
OP_PARAMS = [
_PGroupName,
("new_name", ht.NoDefault, ht.TNonEmptyString, "New group name"),
]
OP_RESULT = ht.Comment("New group name")(ht.TNonEmptyString)
class OpGroupEvacuate(OpCode):
"""Evacuate a node group in the cluster."""
OP_DSC_FIELD = "group_name"
OP_PARAMS = [
_PGroupName,
_PEarlyRelease,
_PIAllocFromDesc("Iallocator for computing solution"),
_PTargetGroups,
]
OP_RESULT = TJobIdListOnly
# OS opcodes
class OpOsDiagnose(OpCode):
"""Compute the list of guest operating systems."""
OP_PARAMS = [
_POutputFields,
("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Which operating systems to diagnose"),
]
OP_RESULT = _TOldQueryResult
# ExtStorage opcodes
class OpExtStorageDiagnose(OpCode):
"""Compute the list of external storage providers."""
OP_PARAMS = [
_POutputFields,
("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Which ExtStorage Provider to diagnose"),
]
OP_RESULT = _TOldQueryResult
# Exports opcodes
class OpBackupQuery(OpCode):
"""Compute the list of exported images."""
OP_PARAMS = [
_PUseLocking,
("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Empty list to query all nodes, node names otherwise"),
]
OP_RESULT = ht.TDictOf(ht.TNonEmptyString,
ht.TOr(ht.Comment("False on error")(ht.TBool),
ht.TListOf(ht.TNonEmptyString)))
class OpBackupPrepare(OpCode):
"""Prepares an instance export.
@ivar instance_name: Instance name
@ivar mode: Export mode (one of L{constants.EXPORT_MODES})
"""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
("mode", ht.NoDefault, ht.TElemOf(constants.EXPORT_MODES),
"Export mode"),
]
OP_RESULT = ht.TMaybeDict
class OpBackupExport(OpCode):
"""Export an instance.
For local exports, the export destination is the node name. For
remote exports, the export destination is a list of tuples, each
consisting of hostname/IP address, port, magic, HMAC and HMAC
salt. The HMAC is calculated using the cluster domain secret over
the value "${index}:${hostname}:${port}". The destination X509 CA
must be a signed certificate.
@ivar mode: Export mode (one of L{constants.EXPORT_MODES})
@ivar target_node: Export destination
@ivar x509_key_name: X509 key to use (remote export only)
@ivar destination_x509_ca: Destination X509 CA in PEM format (remote export
only)
"""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PShutdownTimeout,
# TODO: Rename target_node as it changes meaning for different export modes
# (e.g. "destination")
("target_node", ht.NoDefault, ht.TOr(ht.TNonEmptyString, ht.TList),
"Destination information, depends on export mode"),
("shutdown", True, ht.TBool, "Whether to shutdown instance before export"),
("remove_instance", False, ht.TBool,
"Whether to remove instance after export"),
("ignore_remove_failures", False, ht.TBool,
"Whether to ignore failures while removing instances"),
("mode", constants.EXPORT_MODE_LOCAL, ht.TElemOf(constants.EXPORT_MODES),
"Export mode"),
("x509_key_name", None, ht.TMaybe(ht.TList),
"Name of X509 key (remote export only)"),
("destination_x509_ca", None, ht.TMaybeString,
"Destination X509 CA (remote export only)"),
]
OP_RESULT = \
ht.TAnd(ht.TIsLength(2), ht.TItems([
ht.Comment("Finalizing status")(ht.TBool),
ht.Comment("Status for every exported disk")(ht.TListOf(ht.TBool)),
]))
class OpBackupRemove(OpCode):
"""Remove an instance's export."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
]
OP_RESULT = ht.TNone
# Tags opcodes
class OpTagsGet(OpCode):
"""Returns the tags of the given object."""
OP_DSC_FIELD = "name"
OP_PARAMS = [
_PTagKind,
# Not using _PUseLocking as the default is different for historical reasons
("use_locking", True, ht.TBool, "Whether to use synchronization"),
# Name is only meaningful for nodes and instances
("name", ht.NoDefault, ht.TMaybeString,
"Name of object to retrieve tags from"),
]
OP_RESULT = ht.TListOf(ht.TNonEmptyString)
class OpTagsSearch(OpCode):
"""Searches the tags in the cluster for a given pattern."""
OP_DSC_FIELD = "pattern"
OP_PARAMS = [
("pattern", ht.NoDefault, ht.TNonEmptyString,
"Search pattern (regular expression)"),
]
OP_RESULT = ht.TListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
ht.TNonEmptyString,
ht.TNonEmptyString,
])))
class OpTagsSet(OpCode):
"""Add a list of tags on a given object."""
OP_PARAMS = [
_PTagKind,
_PTags,
# Name is only meaningful for groups, nodes and instances
("name", ht.NoDefault, ht.TMaybeString,
"Name of object where tag(s) should be added"),
]
OP_RESULT = ht.TNone
class OpTagsDel(OpCode):
"""Remove a list of tags from a given object."""
OP_PARAMS = [
_PTagKind,
_PTags,
# Name is only meaningful for groups, nodes and instances
("name", ht.NoDefault, ht.TMaybeString,
"Name of object where tag(s) should be deleted"),
]
OP_RESULT = ht.TNone
# Test opcodes
class OpTestDelay(OpCode):
"""Sleeps for a configured amount of time.
This is used just for debugging and testing.
Parameters:
- duration: the time to sleep, in seconds
- on_master: if true, sleep on the master
- on_nodes: list of nodes in which to sleep
If the on_master parameter is true, it will execute a sleep on the
master (before any node sleep).
If the on_nodes list is not empty, it will sleep on those nodes
(after the sleep on the master, if that is enabled).
As an additional feature, the case of duration < 0 will be reported
as an execution error, so this opcode can be used as a failure
generator. The case of duration == 0 will not be treated specially.
"""
OP_DSC_FIELD = "duration"
OP_PARAMS = [
("duration", ht.NoDefault, ht.TNumber, None),
("on_master", True, ht.TBool, None),
("on_nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), None),
("repeat", 0, ht.TNonNegativeInt, None),
]
def OP_DSC_FORMATTER(self, value): # pylint: disable=C0103,R0201
"""Custom formatter for duration.
"""
try:
v = float(value)
except TypeError:
v = value
return str(v)
class OpTestAllocator(OpCode):
"""Allocator framework testing.
This opcode has two modes:
- gather and return allocator input for a given mode (allocate new
or replace secondary) and a given instance definition (direction
'in')
- run a selected allocator for a given operation (as above) and
return the allocator output (direction 'out')
"""
OP_DSC_FIELD = "iallocator"
OP_PARAMS = [
("direction", ht.NoDefault,
ht.TElemOf(constants.VALID_IALLOCATOR_DIRECTIONS), None),
("mode", ht.NoDefault, ht.TElemOf(constants.VALID_IALLOCATOR_MODES), None),
("name", ht.NoDefault, ht.TNonEmptyString, None),
("nics", ht.NoDefault,
ht.TMaybeListOf(ht.TDictOf(ht.TElemOf([constants.INIC_MAC,
constants.INIC_IP,
"bridge"]),
ht.TMaybeString)),
None),
("disks", ht.NoDefault, ht.TMaybe(ht.TList), None),
("hypervisor", None, ht.TMaybeString, None),
_PIAllocFromDesc(None),
("tags", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), None),
("memory", None, ht.TMaybe(ht.TNonNegativeInt), None),
("vcpus", None, ht.TMaybe(ht.TNonNegativeInt), None),
("os", None, ht.TMaybeString, None),
("disk_template", None, ht.TMaybeString, None),
("instances", None, ht.TMaybeListOf(ht.TNonEmptyString), None),
("evac_mode", None,
ht.TMaybe(ht.TElemOf(constants.IALLOCATOR_NEVAC_MODES)), None),
("target_groups", None, ht.TMaybeListOf(ht.TNonEmptyString), None),
("spindle_use", 1, ht.TNonNegativeInt, None),
("count", 1, ht.TNonNegativeInt, None),
]
class OpTestJqueue(OpCode):
"""Utility opcode to test some aspects of the job queue.
"""
OP_PARAMS = [
("notify_waitlock", False, ht.TBool, None),
("notify_exec", False, ht.TBool, None),
("log_messages", ht.EmptyList, ht.TListOf(ht.TString), None),
("fail", False, ht.TBool, None),
]
class OpTestDummy(OpCode):
"""Utility opcode used by unittests.
"""
OP_PARAMS = [
("result", ht.NoDefault, ht.NoType, None),
("messages", ht.NoDefault, ht.NoType, None),
("fail", ht.NoDefault, ht.NoType, None),
("submit_jobs", None, ht.NoType, None),
]
WITH_LU = False
# Network opcodes
# Add a new network in the cluster
class OpNetworkAdd(OpCode):
"""Add an IP network to the cluster."""
OP_DSC_FIELD = "network_name"
OP_PARAMS = [
_PNetworkName,
("network", ht.NoDefault, _TIpNetwork4, "IPv4 subnet"),
("gateway", None, ht.TMaybe(_TIpAddress4), "IPv4 gateway"),
("network6", None, ht.TMaybe(_TIpNetwork6), "IPv6 subnet"),
("gateway6", None, ht.TMaybe(_TIpAddress6), "IPv6 gateway"),
("mac_prefix", None, ht.TMaybeString,
"MAC address prefix that overrides cluster one"),
("add_reserved_ips", None, _TMaybeAddr4List,
"Which IP addresses to reserve"),
("conflicts_check", True, ht.TBool,
"Whether to check for conflicting IP addresses"),
("tags", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "Network tags"),
]
OP_RESULT = ht.TNone
class OpNetworkRemove(OpCode):
"""Remove an existing network from the cluster.
Must not be connected to any nodegroup.
"""
OP_DSC_FIELD = "network_name"
OP_PARAMS = [
_PNetworkName,
_PForce,
]
OP_RESULT = ht.TNone
class OpNetworkSetParams(OpCode):
"""Modify Network's parameters except for IPv4 subnet"""
OP_DSC_FIELD = "network_name"
OP_PARAMS = [
_PNetworkName,
("gateway", None, ht.TMaybeValueNone(_TIpAddress4), "IPv4 gateway"),
("network6", None, ht.TMaybeValueNone(_TIpNetwork6), "IPv6 subnet"),
("gateway6", None, ht.TMaybeValueNone(_TIpAddress6), "IPv6 gateway"),
("mac_prefix", None, ht.TMaybeValueNone(ht.TString),
"MAC address prefix that overrides cluster one"),
("add_reserved_ips", None, _TMaybeAddr4List,
"Which external IP addresses to reserve"),
("remove_reserved_ips", None, _TMaybeAddr4List,
"Which external IP addresses to release"),
]
OP_RESULT = ht.TNone
class OpNetworkConnect(OpCode):
"""Connect a Network to a specific Nodegroup with the defined netparams
(mode, link). Nics in this Network will inherit those params.
Produce errors if a NIC (that its not already assigned to a network)
has an IP that is contained in the Network this will produce error unless
--no-conflicts-check is passed.
"""
OP_DSC_FIELD = "network_name"
OP_PARAMS = [
_PGroupName,
_PNetworkName,
("network_mode", ht.NoDefault, ht.TElemOf(constants.NIC_VALID_MODES),
"Connectivity mode"),
("network_link", ht.NoDefault, ht.TString, "Connectivity link"),
("conflicts_check", True, ht.TBool, "Whether to check for conflicting IPs"),
]
OP_RESULT = ht.TNone
class OpNetworkDisconnect(OpCode):
"""Disconnect a Network from a Nodegroup. Produce errors if NICs are
present in the Network unless --no-conficts-check option is passed.
"""
OP_DSC_FIELD = "network_name"
OP_PARAMS = [
_PGroupName,
_PNetworkName,
]
OP_RESULT = ht.TNone
class OpNetworkQuery(OpCode):
"""Compute the list of networks."""
OP_PARAMS = [
_POutputFields,
_PUseLocking,
("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Empty list to query all groups, group names otherwise"),
]
OP_RESULT = _TOldQueryResult
def _GetOpList():
"""Returns list of all defined opcodes.
Does not eliminate duplicates by C{OP_ID}.
"""
return [v for v in globals().values()
if (isinstance(v, type) and issubclass(v, OpCode) and
hasattr(v, "OP_ID") and v is not OpCode)]
OP_MAPPING = dict((v.OP_ID, v) for v in _GetOpList())
|
gpl-2.0
| 7,356,497,005,016,279,000
| 30.285189
| 80
| 0.657835
| false
| 3.517298
| false
| false
| false
|
ronggong/jingjuSingingPhraseMatching
|
phoneticSimilarity/phonemeDurationStat.py
|
1
|
5978
|
'''
* Copyright (C) 2017 Music Technology Group - Universitat Pompeu Fabra
*
* This file is part of jingjuSingingPhraseMatching
*
* pypYIN is free software: you can redistribute it and/or modify it under
* the terms of the GNU Affero General Public License as published by the Free
* Software Foundation (FSF), either version 3 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the Affero GNU General Public License
* version 3 along with this program. If not, see http://www.gnu.org/licenses/
*
* If you have any problem about this python version code, please contact: Rong Gong
* rong.gong@upf.edu
*
*
* If you want to refer this code, please use this article:
*
'''
from general.trainTestSeparation import getRecordingNamesSimi
from general.textgridParser import syllableTextgridExtraction
import matplotlib.pyplot as plt
from scipy.misc import factorial
from scipy.optimize import curve_fit
from scipy.stats import gamma,expon
from general.filePath import *
from general.parameters import *
from general.phonemeMap import dic_pho_map
import json
import numpy as np
import os
def phoDurCollection(recordings):
'''
collect durations of pho into dictionary
:param recordings:
:return:
'''
dict_duration_pho = {}
for recording in recordings:
nestedPhonemeLists, numSyllables, numPhonemes \
= syllableTextgridExtraction(textgrid_path,recording,syllableTierName,phonemeTierName)
for pho in nestedPhonemeLists:
for p in pho[1]:
dur_pho = p[1] - p[0]
sampa_pho = dic_pho_map[p[2]]
if sampa_pho not in dict_duration_pho.keys():
dict_duration_pho[sampa_pho] = [dur_pho]
else:
dict_duration_pho[sampa_pho].append(dur_pho)
return dict_duration_pho
def poisson(k, lamb):
return (lamb**k/factorial(k)) * np.exp(-lamb)
def durPhoDistribution(array_durPho,sampa_pho,plot=False):
'''
pho durations histogram
:param array_durPho:
:return:
'''
# plt.figure(figsize=(10, 6))
# integer bin edges
offset_bin = 0.005
bins = np.arange(0, max(array_durPho)+2, 2*offset_bin) - offset_bin
# histogram
entries, bin_edges, patches = plt.hist(array_durPho, bins=bins, normed=True, fc=(0, 0, 1, 0.7),label='pho: '+sampa_pho+' duration histogram')
# centroid duration
bin_centres = bin_edges-offset_bin
bin_centres = bin_centres[:-1]
centroid = np.sum(bin_centres*entries)/np.sum(entries)
##-- fit with poisson distribution
# bin_middles = 0.5*(bin_edges[1:] + bin_edges[:-1])
#
# parameters, cov_matrix = curve_fit(poisson, bin_middles, entries)
#
# x = np.linspace(0, max(array_durPho), 1000)
# x = np.arange(0,max(array_durPho),hopsize_t)
#
# p = poisson(x, *parameters)
##-- fit with gamma distribution
# discard some outlier durations by applying 2 standard deviations interval
mean_array_durPho=np.mean(array_durPho)
std_array_durPho=np.std(array_durPho)
index_keep = np.where(array_durPho<mean_array_durPho+2*std_array_durPho)
array_durPho_keep = array_durPho[index_keep]
# discard some duration in histogram to make the fitting reasonable
if class_name == 'laosheng':
if sampa_pho == 'in':
array_durPho_keep = array_durPho_keep[np.where(array_durPho_keep<2.5)]
elif sampa_pho == '@n':
array_durPho_keep = array_durPho_keep[np.where(array_durPho_keep<3)]
elif sampa_pho == 'eI^':
array_durPho_keep = array_durPho_keep[np.where(array_durPho_keep<1.5)]
elif sampa_pho == 'EnEn':
array_durPho_keep = array_durPho_keep[np.where(array_durPho_keep<2.0)]
elif sampa_pho == 'UN':
array_durPho_keep = array_durPho_keep[np.where(array_durPho_keep<2.5)]
# step is the hopsize_t, corresponding to each frame
# maximum length is the 4 times of the effective length
x = np.arange(0, 8*max(array_durPho_keep),hopsize_t_phoneticSimilarity)
param = gamma.fit(array_durPho_keep,floc = 0)
y = gamma.pdf(x, *param)
# y = expon.pdf(x)
if plot:
# possion fitting curve
# plt.plot(x,p,'r',linewidth=2,label='Poisson distribution fitting curve')
# gamma fitting curve
# plt.plot(x, y, 'r-', lw=2, alpha=0.6, label='gamma pdf')
plt.axvline(centroid, linewidth = 3, color = 'r', label = 'centroid frequency')
plt.legend(fontsize=18)
plt.xlabel('Pho duration distribution ',fontsize=18)
plt.ylabel('Probability',fontsize=18)
plt.axis('tight')
plt.tight_layout()
plt.show()
y /= np.sum(y)
return y.tolist(),centroid
if __name__ == '__main__':
rp = os.path.dirname(__file__)
for cn in ['danAll', 'laosheng']:
recordings_train = getRecordingNamesSimi('TRAIN',cn)
dict_duration_pho = phoDurCollection(recordings_train)
dict_centroid_dur = {}
dict_dur_dist = {}
for pho in dict_duration_pho:
durDist,centroid_dur = durPhoDistribution(np.array(dict_duration_pho[pho]),pho,plot=False)
dict_centroid_dur[pho] = centroid_dur
dict_dur_dist[pho] = durDist # the first proba is always 0
# dump duration centroid
with open(os.path.join(rp, 'lyricsRecognizer' ,'dict_centroid_dur'+cn+'.json'),'wb') as outfile:
json.dump(dict_centroid_dur,outfile)
# the gamma occupancy duration distribution is never used
# with open('dict_dur_dist_'+class_name+'.json','wb') as outfile:
# json.dump(dict_dur_dist,outfile)
|
agpl-3.0
| -4,279,212,085,165,763,000
| 34.583333
| 145
| 0.652559
| false
| 3.319267
| false
| false
| false
|
MatKallada/nbgrader
|
nbgrader/tests/apps/base.py
|
1
|
1462
|
import os
import shutil
import pytest
import stat
from IPython.nbformat import write as write_nb
from IPython.nbformat.v4 import new_notebook
@pytest.mark.usefixtures("temp_cwd")
class BaseTestApp(object):
def _empty_notebook(self, path):
nb = new_notebook()
full_dest = os.path.join(os.getcwd(), path)
if not os.path.exists(os.path.dirname(full_dest)):
os.makedirs(os.path.dirname(full_dest))
if os.path.exists(full_dest):
os.remove(full_dest)
with open(full_dest, 'w') as f:
write_nb(nb, f, 4)
def _copy_file(self, src, dest):
full_src = os.path.join(os.path.dirname(__file__), src)
full_dest = os.path.join(os.getcwd(), dest)
if not os.path.exists(os.path.dirname(full_dest)):
os.makedirs(os.path.dirname(full_dest))
shutil.copy(full_src, full_dest)
def _make_file(self, path, contents=""):
full_dest = os.path.join(os.getcwd(), path)
if not os.path.exists(os.path.dirname(full_dest)):
os.makedirs(os.path.dirname(full_dest))
if os.path.exists(full_dest):
os.remove(full_dest)
with open(full_dest, "w") as fh:
fh.write(contents)
def _get_permissions(self, filename):
return oct(os.stat(filename).st_mode)[-3:]
def _file_contents(self, path):
with open(path, "r") as fh:
contents = fh.read()
return contents
|
bsd-3-clause
| 2,815,919,129,602,504,700
| 31.488889
| 63
| 0.604651
| false
| 3.330296
| false
| false
| false
|
philgyford/django-ditto
|
ditto/lastfm/urls.py
|
1
|
2255
|
from django.conf.urls import url
from . import views
app_name = "lastfm"
# The pattern for matching an Album/Artist/Track slug:
slug_chars = "[\w.,:;=@&+%()$!°’~-]+" # noqa: W605
urlpatterns = [
url(regex=r"^$", view=views.HomeView.as_view(), name="home"),
url(
regex=r"^library/$", view=views.ScrobbleListView.as_view(), name="scrobble_list"
),
url(
regex=r"^library/albums/$",
view=views.AlbumListView.as_view(),
name="album_list",
),
url(
regex=r"^library/artists/$",
view=views.ArtistListView.as_view(),
name="artist_list",
),
url(
regex=r"^library/tracks/$",
view=views.TrackListView.as_view(),
name="track_list",
),
url(
regex=r"^music/(?P<artist_slug>%s)/$" % slug_chars,
view=views.ArtistDetailView.as_view(),
name="artist_detail",
),
url(
regex=r"^music/(?P<artist_slug>%s)/\+albums/$" % slug_chars,
view=views.ArtistAlbumsView.as_view(),
name="artist_albums",
),
url(
regex=r"^music/(?P<artist_slug>%s)/(?P<album_slug>%s)/$"
% (slug_chars, slug_chars),
view=views.AlbumDetailView.as_view(),
name="album_detail",
),
url(
regex=r"^music/(?P<artist_slug>%s)/_/(?P<track_slug>%s)/$"
% (slug_chars, slug_chars),
view=views.TrackDetailView.as_view(),
name="track_detail",
),
# User pages.
url(
regex=r"^user/(?P<username>[a-z0-9]+)/$",
view=views.UserDetailView.as_view(),
name="user_detail",
),
url(
regex=r"^user/(?P<username>[a-z0-9]+)/library/$",
view=views.UserScrobbleListView.as_view(),
name="user_scrobble_list",
),
url(
regex=r"^user/(?P<username>[a-z0-9]+)/library/albums/$",
view=views.UserAlbumListView.as_view(),
name="user_album_list",
),
url(
regex=r"^user/(?P<username>[a-z0-9]+)/library/artists/$",
view=views.UserArtistListView.as_view(),
name="user_artist_list",
),
url(
regex=r"^user/(?P<username>[a-z0-9]+)/library/tracks/$",
view=views.UserTrackListView.as_view(),
name="user_track_list",
),
]
|
mit
| 1,310,448,974,129,350,100
| 26.802469
| 88
| 0.543961
| false
| 3.171831
| false
| false
| false
|
OpenTechFund/WebApp
|
opentech/apply/review/migrations/0001_initial.py
|
1
|
1068
|
# Generated by Django 2.0.2 on 2018-03-13 17:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('funds', '0028_update_on_delete_django2'),
]
operations = [
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('review', models.TextField()),
('author', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
('submission', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='funds.ApplicationSubmission', related_name='reviews')),
],
),
migrations.AlterUniqueTogether(
name='review',
unique_together={('author', 'submission')},
),
]
|
gpl-2.0
| -8,155,020,913,151,318,000
| 33.451613
| 153
| 0.618914
| false
| 4.238095
| false
| false
| false
|
yinglanma/AI-project
|
examples/OpenAIGym/run-atari.py
|
1
|
3274
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: run-atari.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import numpy as np
import tensorflow as tf
import os, sys, re, time
import random
import argparse
import six
from tensorpack import *
from tensorpack.RL import *
IMAGE_SIZE = (84, 84)
FRAME_HISTORY = 4
CHANNEL = FRAME_HISTORY * 3
IMAGE_SHAPE3 = IMAGE_SIZE + (CHANNEL,)
NUM_ACTIONS = None
ENV_NAME = None
from common import play_one_episode
def get_player(dumpdir=None):
pl = GymEnv(ENV_NAME, dumpdir=dumpdir, auto_restart=False)
pl = MapPlayerState(pl, lambda img: cv2.resize(img, IMAGE_SIZE[::-1]))
global NUM_ACTIONS
NUM_ACTIONS = pl.get_action_space().num_actions()
pl = HistoryFramePlayer(pl, FRAME_HISTORY)
return pl
class Model(ModelDesc):
def _get_input_vars(self):
assert NUM_ACTIONS is not None
return [InputVar(tf.float32, (None,) + IMAGE_SHAPE3, 'state'),
InputVar(tf.int32, (None,), 'action'),
InputVar(tf.float32, (None,), 'futurereward') ]
def _get_NN_prediction(self, image):
image = image / 255.0
with argscope(Conv2D, nl=tf.nn.relu):
l = Conv2D('conv0', image, out_channel=32, kernel_shape=5)
l = MaxPooling('pool0', l, 2)
l = Conv2D('conv1', l, out_channel=32, kernel_shape=5)
l = MaxPooling('pool1', l, 2)
l = Conv2D('conv2', l, out_channel=64, kernel_shape=4)
l = MaxPooling('pool2', l, 2)
l = Conv2D('conv3', l, out_channel=64, kernel_shape=3)
l = FullyConnected('fc0', l, 512, nl=tf.identity)
l = PReLU('prelu', l)
policy = FullyConnected('fc-pi', l, out_dim=NUM_ACTIONS, nl=tf.identity)
return policy
def _build_graph(self, inputs):
state, action, futurereward = inputs
policy = self._get_NN_prediction(state)
self.logits = tf.nn.softmax(policy, name='logits')
def run_submission(cfg, output, nr):
player = get_player(dumpdir=output)
predfunc = get_predict_func(cfg)
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("Total:", score)
def do_submit(output):
gym.upload(output, api_key='xxx')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') # nargs='*' in multi mode
parser.add_argument('--load', help='load model', required=True)
parser.add_argument('--env', help='environment name', required=True)
parser.add_argument('--episode', help='number of episodes to run',
type=int, default=100)
parser.add_argument('--output', help='output directory', default='gym-submit')
args = parser.parse_args()
ENV_NAME = args.env
assert ENV_NAME
logger.info("Environment Name: {}".format(ENV_NAME))
p = get_player(); del p # set NUM_ACTIONS
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
cfg = PredictConfig(
model=Model(),
session_init=SaverRestore(args.load),
input_var_names=['state'],
output_var_names=['logits'])
run_submission(cfg, args.output, args.episode)
|
apache-2.0
| -3,475,211,309,922,932,000
| 32.070707
| 105
| 0.618815
| false
| 3.320487
| false
| false
| false
|
pydcs/dcs
|
dcs/coalition.py
|
1
|
13638
|
import sys
from typing import Dict, Union, List, TYPE_CHECKING
import dcs.countries as countries
import dcs.unitgroup as unitgroup
import dcs.planes as planes
import dcs.helicopters as helicopters
import dcs.ships as ships
from dcs.unit import Vehicle, Static, Ship, FARP, SingleHeliPad
from dcs.flyingunit import Plane, Helicopter
from dcs.point import MovingPoint, StaticPoint
from dcs.country import Country
from dcs.status_message import StatusMessage, MessageType, MessageSeverity
if TYPE_CHECKING:
from . import Mission
class Coalition:
def __init__(self, name, bullseye=None):
self.name = name
self.countries = {} # type: Dict[str, Country]
self.bullseye = bullseye
self.nav_points = [] # TODO
@staticmethod
def _sort_keys(points):
keys = []
for imp_point_idx in points:
keys.append(int(imp_point_idx))
keys.sort()
return keys
@staticmethod
def _import_moving_point(mission, group: unitgroup.Group, imp_group) -> unitgroup.Group:
keys = Coalition._sort_keys(imp_group["route"]["points"])
for imp_point_idx in keys:
imp_point = imp_group["route"]["points"][imp_point_idx]
point = MovingPoint()
point.load_from_dict(imp_point, mission.translation)
group.add_point(point)
return group
@staticmethod
def _import_static_point(mission, group: unitgroup.Group, imp_group) -> unitgroup.Group:
keys = Coalition._sort_keys(imp_group["route"]["points"])
for imp_point_idx in keys:
imp_point = imp_group["route"]["points"][imp_point_idx]
point = StaticPoint()
point.load_from_dict(imp_point, mission.translation)
group.add_point(point)
return group
@staticmethod
def _park_unit_on_airport(
mission: 'Mission',
group: unitgroup.Group,
unit: Union[Plane, Helicopter]) -> List[StatusMessage]:
ret = []
if group.points[0].airdrome_id is not None and unit.parking is not None:
airport = mission.terrain.airport_by_id(group.points[0].airdrome_id)
slot = airport.parking_slot(unit.parking)
if slot is not None:
unit.set_parking(slot)
else:
msg = "Parking slot id '{i}' for unit '{u}' in group '{p}' on airport '{a}' " \
"not valid, placing on next free".format(i=unit.parking, u=unit.name,
a=airport.name, p=group.name)
print("WARN", msg, file=sys.stderr)
ret.append(StatusMessage(msg, MessageType.PARKING_SLOT_NOT_VALID, MessageSeverity.WARN))
slot = airport.free_parking_slot(unit.unit_type)
if slot is not None:
unit.set_parking(slot)
else:
msg = "No free parking slots for unit '{u}' in unit group '{p}' on airport '{a}', ignoring"\
.format(u=unit.name, a=airport.name, p=group.name)
print("ERRO", msg, file=sys.stderr)
ret.append(StatusMessage(msg, MessageType.PARKING_SLOTS_FULL, MessageSeverity.ERROR))
return ret
@staticmethod
def get_name(mission: "Mission", name: str) -> str:
# Group, unit names are not localized for missions are created in 2.7.
if mission.version < 19:
return str(mission.translation.get_string(name))
else:
return name
def load_from_dict(self, mission, d) -> List[StatusMessage]:
status: List[StatusMessage] = []
for country_idx in d["country"]:
imp_country = d["country"][country_idx]
_country = countries.get_by_id(imp_country["id"])
if "vehicle" in imp_country:
for vgroup_idx in imp_country["vehicle"]["group"]:
vgroup = imp_country["vehicle"]["group"][vgroup_idx]
vg = unitgroup.VehicleGroup(vgroup["groupId"], self.get_name(mission, vgroup["name"]),
vgroup["start_time"])
vg.load_from_dict(vgroup)
mission.current_group_id = max(mission.current_group_id, vg.id)
Coalition._import_moving_point(mission, vg, vgroup)
# units
for imp_unit_idx in vgroup["units"]:
imp_unit = vgroup["units"][imp_unit_idx]
unit = Vehicle(
id=imp_unit["unitId"],
name=self.get_name(mission, imp_unit["name"]),
_type=imp_unit["type"])
unit.load_from_dict(imp_unit)
mission.current_unit_id = max(mission.current_unit_id, unit.id)
vg.add_unit(unit)
_country.add_vehicle_group(vg)
if "ship" in imp_country:
for group_idx in imp_country["ship"]["group"]:
imp_group = imp_country["ship"]["group"][group_idx]
vg = unitgroup.ShipGroup(imp_group["groupId"], self.get_name(mission, imp_group["name"]),
imp_group["start_time"])
vg.load_from_dict(imp_group)
mission.current_group_id = max(mission.current_group_id, vg.id)
Coalition._import_moving_point(mission, vg, imp_group)
# units
for imp_unit_idx in imp_group["units"]:
imp_unit = imp_group["units"][imp_unit_idx]
unit = Ship(
id=imp_unit["unitId"],
name=self.get_name(mission, imp_unit["name"]),
_type=ships.ship_map[imp_unit["type"]])
unit.load_from_dict(imp_unit)
mission.current_unit_id = max(mission.current_unit_id, unit.id)
vg.add_unit(unit)
_country.add_ship_group(vg)
if "plane" in imp_country:
for pgroup_idx in imp_country["plane"]["group"]:
pgroup = imp_country["plane"]["group"][pgroup_idx]
plane_group = unitgroup.PlaneGroup(pgroup["groupId"],
self.get_name(mission, pgroup["name"]),
pgroup["start_time"])
plane_group.load_from_dict(pgroup)
mission.current_group_id = max(mission.current_group_id, plane_group.id)
Coalition._import_moving_point(mission, plane_group, pgroup)
# units
for imp_unit_idx in pgroup["units"]:
imp_unit = pgroup["units"][imp_unit_idx]
plane = Plane(
_id=imp_unit["unitId"],
name=self.get_name(mission, imp_unit["name"]),
_type=planes.plane_map[imp_unit["type"]],
_country=_country)
plane.load_from_dict(imp_unit)
if _country.reserve_onboard_num(plane.onboard_num):
msg = "{c} Plane '{p}' already using tail number: {t}".format(
c=self.name.upper(), p=plane.name, t=plane.onboard_num)
status.append(StatusMessage(msg, MessageType.ONBOARD_NUM_DUPLICATE, MessageSeverity.WARN))
print("WARN:", msg, file=sys.stderr)
status += self._park_unit_on_airport(mission, plane_group, plane)
mission.current_unit_id = max(mission.current_unit_id, plane.id)
plane_group.add_unit(plane)
# check runway start
# if plane_group.points[0].airdrome_id is not None and plane_group.units[0].parking is None:
# airport = mission.terrain.airport_by_id(plane_group.points[0].airdrome_id)
# airport.occupy_runway(plane_group)
_country.add_plane_group(plane_group)
if "helicopter" in imp_country:
for pgroup_idx in imp_country["helicopter"]["group"]:
pgroup = imp_country["helicopter"]["group"][pgroup_idx]
helicopter_group = unitgroup.HelicopterGroup(
pgroup["groupId"],
self.get_name(mission, pgroup["name"]),
pgroup["start_time"])
helicopter_group.load_from_dict(pgroup)
mission.current_group_id = max(mission.current_group_id, helicopter_group.id)
Coalition._import_moving_point(mission, helicopter_group, pgroup)
# units
for imp_unit_idx in pgroup["units"]:
imp_unit = pgroup["units"][imp_unit_idx]
heli = Helicopter(
_id=imp_unit["unitId"],
name=self.get_name(mission, imp_unit["name"]),
_type=helicopters.helicopter_map[imp_unit["type"]],
_country=_country)
heli.load_from_dict(imp_unit)
if _country.reserve_onboard_num(heli.onboard_num):
msg = "{c} Helicopter '{h}' already using tail number: {t}".format(
c=self.name.upper(), h=heli.name, t=heli.onboard_num)
status.append(StatusMessage(msg, MessageType.ONBOARD_NUM_DUPLICATE, MessageSeverity.WARN))
print("WARN:", msg, file=sys.stderr)
status += self._park_unit_on_airport(mission, helicopter_group, heli)
mission.current_unit_id = max(mission.current_unit_id, heli.id)
helicopter_group.add_unit(heli)
# check runway start
# if helicopter_group.points[0].airdrome_id is not None and helicopter_group.units[0].parking is None:
# airport = mission.terrain.airport_by_id(helicopter_group.points[0].airdrome_id)
# airport.occupy_runway(helicopter_group)
_country.add_helicopter_group(helicopter_group)
if "static" in imp_country:
for sgroup_idx in imp_country["static"]["group"]:
sgroup = imp_country["static"]["group"][sgroup_idx]
static_group = unitgroup.StaticGroup(sgroup["groupId"],
self.get_name(mission, sgroup["name"]))
static_group.load_from_dict(sgroup)
mission.current_group_id = max(mission.current_group_id, static_group.id)
Coalition._import_static_point(mission, static_group, sgroup)
# units
for imp_unit_idx in sgroup["units"]:
imp_unit = sgroup["units"][imp_unit_idx]
if imp_unit["type"] == "FARP":
static = FARP(
unit_id=imp_unit["unitId"],
name=self.get_name(mission, imp_unit["name"]))
elif imp_unit["type"] == "SINGLE_HELIPAD":
static = SingleHeliPad(
unit_id=imp_unit["unitId"],
name=self.get_name(mission, imp_unit["name"]))
else:
static = Static(
unit_id=imp_unit["unitId"],
name=self.get_name(mission, imp_unit["name"]),
_type=imp_unit["type"])
static.load_from_dict(imp_unit)
mission.current_unit_id = max(mission.current_unit_id, static.id)
static_group.add_unit(static)
_country.add_static_group(static_group)
self.add_country(_country)
return status
def set_bullseye(self, bulls):
self.bullseye = bulls
def add_country(self, country):
self.countries[country.name] = country
return country
def remove_country(self, name):
return self.countries.pop(name)
def swap_country(self, coalition, name):
return coalition.add_country(self.remove_country(name))
def country(self, country_name: str):
return self.countries.get(country_name, None)
def country_by_id(self, _id: int):
for cn in self.countries:
c = self.countries[cn]
if c.id == _id:
return c
return None
def find_group(self, group_name, search="exact"):
for c in self.countries:
g = self.countries[c].find_group(group_name, search)
if g:
return g
return None
def dict(self):
d = {"name": self.name}
if self.bullseye:
d["bullseye"] = self.bullseye
d["country"] = {}
i = 1
for country in sorted(self.countries.keys()):
d["country"][i] = self.country(country).dict()
i += 1
d["nav_points"] = {}
return d
|
lgpl-3.0
| -5,853,055,812,100,301,000
| 45.546075
| 122
| 0.510045
| false
| 4.048085
| false
| false
| false
|
compsci-hfh/app
|
project/project/defaults.py
|
1
|
8179
|
import os
from django.contrib import messages
SETTINGS_DIR = os.path.dirname(os.path.abspath(__file__))
PROJECT_DIR = os.path.dirname(SETTINGS_DIR)
BUILDOUT_DIR = os.path.dirname(PROJECT_DIR)
VAR_DIR = os.path.join(BUILDOUT_DIR, "var")
##########################################################################
#
# Secret settings
#
##########################################################################
# If a secret_settings file isn't defined, open a new one and save a
# SECRET_KEY in it. Then import it. All passwords and other secret
# settings should be stored in secret_settings.py. NOT in settings.py
try:
from secret_settings import *
except ImportError:
print "Couldn't find secret_settings.py file. Creating a new one."
secret_path = os.path.join(SETTINGS_DIR, "secret_settings.py")
with open(secret_path, 'w') as secret_settings:
secret_key = ''.join([chr(ord(x) % 90 + 33) for x in os.urandom(40)])
secret_settings.write("SECRET_KEY = '''%s'''\n" % secret_key)
from secret_settings import *
##########################################################################
#
# Authentication settings
#
##########################################################################
# When a user successfully logs in, redirect here by default
LOGIN_REDIRECT_URL = '/'
# The address to redirect to when a user must authenticate
LOGIN_URL = '/accounts/google/login/?process=login'
ACCOUNT_SIGNUP_FORM_CLASS = 'project.profiles.forms.SignupForm'
# Require that users who are signing up provide an email address
ACCOUNT_EMAIL_REQUIRED = True
# Don't store login tokens. We don't need them.
SOCIALACCOUNT_STORE_TOKENS = False
# Try to pull username/email from provider.
SOCIALACCOUNT_AUTO_SIGNUP = False
SOCIALACCOUNT_PROVIDERS = {
'google': {
'SCOPE': ['profile', 'email'],
'AUTH_PARAMS': { 'access_type': 'online' }
},
}
AUTHENTICATION_BACKENDS = (
'allauth.account.auth_backends.AuthenticationBackend',
)
ABSOLUTE_URL_OVERRIDES = {
'auth.user': lambda u: "/profile/%s/" % u.username,
}
##########################################################################
#
# Email Settings
#
##########################################################################
# These should be added to secret_settings.py
# EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# EMAIL_HOST = ''
# EMAIL_PORT = 587
# EMAIL_HOST_USER = ''
# EMAIL_HOST_PASSWORD = ''
# EMAIL_USE_TLS = True
# DEFAULT_FROM_EMAIL = ''
##########################################################################
#
# API settings
#
##########################################################################
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
)
}
##########################################################################
#
# Bleach settings
#
##########################################################################
import bleach
ALLOWED_HTML_TAGS = bleach.ALLOWED_TAGS + ['h1', 'h2', 'h3', 'p', 'img']
ALLOWED_HTML_ATTRS = bleach.ALLOWED_ATTRIBUTES
ALLOWED_HTML_ATTRS.update({
'img': ['src', 'alt'],
})
##########################################################################
#
# Crispy settings
#
##########################################################################
CRISPY_TEMPLATE_PACK = "bootstrap3"
##########################################################################
#
# Messages settings
#
##########################################################################
# Change the default messgae tags to play nice with Bootstrap
MESSAGE_TAGS = {
messages.DEBUG: 'alert-info',
messages.INFO: 'alert-info',
messages.SUCCESS: 'alert-success',
messages.WARNING: 'alert-warning',
messages.ERROR: 'alert-danger',
}
##########################################################################
#
# Database settings
#
##########################################################################
# Should be overridden by development.py or production.py
DATABASES = None
##########################################################################
#
# Location settings
#
##########################################################################
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
USE_TZ = True
##########################################################################
#
# Static files settings
#
##########################################################################
MEDIA_ROOT = os.path.join(VAR_DIR, "uploads")
MEDIA_URL = '/uploads/'
STATIC_ROOT = os.path.join(VAR_DIR, "static")
STATIC_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, "static"),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
COMPRESS_PRECOMPILERS = (
('text/coffeescript', 'coffee --compile --stdio'),
('text/x-sass', 'sass {infile} {outfile}'),
('text/x-scss', 'sass --scss {infile} {outfile}'),
)
COMPRESS_ENABLED = True
##########################################################################
#
# Template settings
#
##########################################################################
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(PROJECT_DIR, "templates")],
'OPTIONS': {
'context_processors': [
# Django
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.csrf',
'django.template.context_processors.debug',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.template.context_processors.static',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
]
},
},
]
##########################################################################
#
# Middleware settings
#
##########################################################################
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
##########################################################################
#
# URL settings
#
##########################################################################
ROOT_URLCONF = 'project.project.urls'
##########################################################################
#
# Installed apps settings
#
##########################################################################
INSTALLED_APPS = (
# Django Content types *must* be first.
'django.contrib.contenttypes',
# AllAuth
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
# Admin Tools
'admin_tools',
'admin_tools.theming',
'admin_tools.menu',
'admin_tools.dashboard',
# Django
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
# Crispy Forms
'crispy_forms',
# Rest Framework
'rest_framework',
# Django Extensions
'django_extensions',
# Compressor
'compressor',
# H4H apps
'project.teams',
'project.profiles',
'project.submission',
# Sentry client
'raven.contrib.django.raven_compat',
)
|
mit
| 8,719,486,345,774,488,000
| 26.354515
| 77
| 0.493948
| false
| 4.511307
| false
| false
| false
|
rudhir-upretee/Sumo17_With_Netsim
|
tools/build/checkSvnProps.py
|
1
|
6351
|
#!/usr/bin/env python
"""
@file checkSvnProps.py
@author Michael Behrisch
@date 2010
@version $Id: checkSvnProps.py 13811 2013-05-01 20:31:43Z behrisch $
Checks svn property settings for all files.
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2010-2013 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
import os, subprocess, sys, xml.sax
from optparse import OptionParser
_SOURCE_EXT = [".h", ".cpp", ".py", ".pl", ".java", ".am"]
_TESTDATA_EXT = [".xml", ".prog", ".csv",
".complex", ".dfrouter", ".duarouter", ".jtrrouter",
".astar", ".chrouter", ".tcl", ".txt",
".netconvert", ".netgen", ".od2trips", ".polyconvert", ".sumo",
".meso", ".tools", ".traci", ".activitygen", ".scenario",
".sumocfg", ".netccfg", ".netgcfg"]
_VS_EXT = [".vsprops", ".sln", ".vcproj", ".bat", ".props", ".vcxproj", ".filters"]
_KEYWORDS = "HeadURL Id LastChangedBy LastChangedDate LastChangedRevision"
class PropertyReader(xml.sax.handler.ContentHandler):
"""Reads the svn properties of files as written by svn pl -v --xml"""
def __init__(self, doFix):
self._fix = doFix
self._file = ""
self._property = None
self._value = ""
self._hadEOL = False
self._hadKeywords = False
def startElement(self, name, attrs):
if name == 'target':
self._file = attrs['path']
seen.add(os.path.join(svnRoot, self._file))
if name == 'property':
self._property = attrs['name']
def characters(self, content):
if self._property:
self._value += content
def endElement(self, name):
ext = os.path.splitext(self._file)[1]
if name == 'property' and self._property == "svn:eol-style":
self._hadEOL = True
if name == 'property' and self._property == "svn:keywords":
self._hadKeywords = True
if ext in _SOURCE_EXT or ext in _TESTDATA_EXT or ext in _VS_EXT:
if name == 'property' and self._property == "svn:executable" and ext not in [".py", ".pl", ".bat"]:
print self._file, self._property, self._value
if self._fix:
subprocess.call(["svn", "pd", "svn:executable", self._file])
if name == 'property' and self._property == "svn:mime-type":
print self._file, self._property, self._value
if self._fix:
subprocess.call(["svn", "pd", "svn:mime-type", self._file])
if ext in _SOURCE_EXT or ext in _TESTDATA_EXT:
if name == 'property' and self._property == "svn:eol-style" and self._value != "LF"\
or name == "target" and not self._hadEOL:
print self._file, "svn:eol-style", self._value
if self._fix:
if os.name == "posix":
subprocess.call(["sed", "-i", r's/\r$//', self._file])
subprocess.call(["sed", "-i", r's/\r/\n/g', self._file])
subprocess.call(["svn", "ps", "svn:eol-style", "LF", self._file])
if ext in _SOURCE_EXT:
if name == 'property' and self._property == "svn:keywords" and self._value != _KEYWORDS\
or name == "target" and not self._hadKeywords:
print self._file, "svn:keywords", self._value
if self._fix:
subprocess.call(["svn", "ps", "svn:keywords", _KEYWORDS, self._file])
if ext in _VS_EXT:
if name == 'property' and self._property == "svn:eol-style" and self._value != "CRLF"\
or name == "target" and not self._hadEOL:
print self._file, "svn:eol-style", self._value
if self._fix:
subprocess.call(["svn", "ps", "svn:eol-style", "CRLF", self._file])
if name == 'property':
self._value = ""
self._property = None
if name == 'target':
self._hadEOL = False
self._hadKeywords = False
optParser = OptionParser()
optParser.add_option("-v", "--verbose", action="store_true",
default=False, help="tell me what you are doing")
optParser.add_option("-f", "--fix", action="store_true",
default=False, help="fix invalid svn properties")
(options, args) = optParser.parse_args()
seen = set()
sumoRoot = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
svnRoots = [sumoRoot]
if len(args) > 0:
svnRoots = [os.path.abspath(a) for a in args]
else:
upDir = os.path.dirname(sumoRoot)
for l in subprocess.Popen(["svn", "pg", "svn:externals", upDir], stdout=subprocess.PIPE, stderr=open(os.devnull, 'w')).communicate()[0].splitlines():
if l[:5] == "sumo/":
svnRoots.append(os.path.join(upDir, l.split()[0]))
for svnRoot in svnRoots:
if options.verbose:
print "checking", svnRoot
output = subprocess.Popen(["svn", "pl", "-v", "-R", "--xml", svnRoot], stdout=subprocess.PIPE).communicate()[0]
xml.sax.parseString(output, PropertyReader(options.fix))
if options.verbose:
print "re-checking tree at", sumoRoot
for root, dirs, files in os.walk(sumoRoot):
for name in files:
fullName = os.path.join(root, name)
if fullName in seen or subprocess.call(["svn", "ls", fullName], stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT):
continue
ext = os.path.splitext(name)[1]
if ext in _SOURCE_EXT or ext in _TESTDATA_EXT or ext in _VS_EXT:
print fullName, "svn:eol-style"
if options.fix:
if ext in _VS_EXT:
subprocess.call(["svn", "ps", "svn:eol-style", "CRLF", fullName])
else:
if os.name == "posix":
subprocess.call(["sed", "-i", 's/\r$//', fullName])
subprocess.call(["svn", "ps", "svn:eol-style", "LF", fullName])
if ext in _SOURCE_EXT:
print fullName, "svn:keywords"
if options.fix:
subprocess.call(["svn", "ps", "svn:keywords", _KEYWORDS, fullName])
for ignoreDir in ['.svn', 'foreign', 'contributed']:
if ignoreDir in dirs:
dirs.remove(ignoreDir)
|
gpl-3.0
| -8,763,534,845,186,761,000
| 44.364286
| 153
| 0.551724
| false
| 3.625
| false
| false
| false
|
frew/simpleproto
|
scons-local-1.1.0/SCons/Tool/packaging/src_targz.py
|
1
|
1623
|
"""SCons.Tool.Packaging.targz
The targz SRC packager.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/packaging/src_targz.py 3603 2008/10/10 05:46:45 scons"
from SCons.Tool.packaging import putintopackageroot
def package(env, target, source, PACKAGEROOT, **kw):
bld = env['BUILDERS']['Tar']
bld.set_suffix('.tar.gz')
target, source = putintopackageroot(target, source, env, PACKAGEROOT, honor_install_location=0)
return bld(env, target, source, TARFLAGS='-zc')
|
bsd-2-clause
| 757,621,664,930,149,500
| 42.864865
| 99
| 0.754775
| false
| 3.836879
| false
| false
| false
|
splotz90/urh
|
src/urh/ui/ui_signal_frame.py
|
1
|
41337
|
# -*- coding: utf-8 -*-
#
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_SignalFrame(object):
def setupUi(self, SignalFrame):
SignalFrame.setObjectName("SignalFrame")
SignalFrame.resize(1057, 509)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(SignalFrame.sizePolicy().hasHeightForWidth())
SignalFrame.setSizePolicy(sizePolicy)
SignalFrame.setMinimumSize(QtCore.QSize(0, 0))
SignalFrame.setMaximumSize(QtCore.QSize(16777215, 16777215))
SignalFrame.setSizeIncrement(QtCore.QSize(0, 0))
SignalFrame.setBaseSize(QtCore.QSize(0, 0))
SignalFrame.setMouseTracking(False)
SignalFrame.setAcceptDrops(True)
SignalFrame.setAutoFillBackground(False)
SignalFrame.setStyleSheet("")
SignalFrame.setFrameShape(QtWidgets.QFrame.NoFrame)
SignalFrame.setFrameShadow(QtWidgets.QFrame.Raised)
SignalFrame.setLineWidth(1)
self.horizontalLayout = QtWidgets.QHBoxLayout(SignalFrame)
self.horizontalLayout.setObjectName("horizontalLayout")
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setSizeConstraint(QtWidgets.QLayout.SetFixedSize)
self.gridLayout_2.setObjectName("gridLayout_2")
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem, 12, 0, 1, 1)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setSpacing(7)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.cbModulationType = QtWidgets.QComboBox(SignalFrame)
self.cbModulationType.setObjectName("cbModulationType")
self.cbModulationType.addItem("")
self.cbModulationType.addItem("")
self.cbModulationType.addItem("")
self.horizontalLayout_5.addWidget(self.cbModulationType)
self.btnAdvancedModulationSettings = QtWidgets.QToolButton(SignalFrame)
icon = QtGui.QIcon.fromTheme("configure")
self.btnAdvancedModulationSettings.setIcon(icon)
self.btnAdvancedModulationSettings.setIconSize(QtCore.QSize(16, 16))
self.btnAdvancedModulationSettings.setObjectName("btnAdvancedModulationSettings")
self.horizontalLayout_5.addWidget(self.btnAdvancedModulationSettings)
self.gridLayout_2.addLayout(self.horizontalLayout_5, 9, 1, 1, 1)
self.labelModulation = QtWidgets.QLabel(SignalFrame)
self.labelModulation.setObjectName("labelModulation")
self.gridLayout_2.addWidget(self.labelModulation, 9, 0, 1, 1)
self.chkBoxSyncSelection = QtWidgets.QCheckBox(SignalFrame)
self.chkBoxSyncSelection.setChecked(True)
self.chkBoxSyncSelection.setObjectName("chkBoxSyncSelection")
self.gridLayout_2.addWidget(self.chkBoxSyncSelection, 22, 0, 1, 1)
self.sliderSpectrogramMin = QtWidgets.QSlider(SignalFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.sliderSpectrogramMin.sizePolicy().hasHeightForWidth())
self.sliderSpectrogramMin.setSizePolicy(sizePolicy)
self.sliderSpectrogramMin.setMinimum(-150)
self.sliderSpectrogramMin.setMaximum(10)
self.sliderSpectrogramMin.setOrientation(QtCore.Qt.Horizontal)
self.sliderSpectrogramMin.setObjectName("sliderSpectrogramMin")
self.gridLayout_2.addWidget(self.sliderSpectrogramMin, 19, 1, 1, 1)
self.spinBoxNoiseTreshold = QtWidgets.QDoubleSpinBox(SignalFrame)
self.spinBoxNoiseTreshold.setDecimals(4)
self.spinBoxNoiseTreshold.setMaximum(1.0)
self.spinBoxNoiseTreshold.setSingleStep(0.0001)
self.spinBoxNoiseTreshold.setObjectName("spinBoxNoiseTreshold")
self.gridLayout_2.addWidget(self.spinBoxNoiseTreshold, 2, 1, 1, 1)
self.chkBoxShowProtocol = QtWidgets.QCheckBox(SignalFrame)
self.chkBoxShowProtocol.setObjectName("chkBoxShowProtocol")
self.gridLayout_2.addWidget(self.chkBoxShowProtocol, 21, 0, 1, 1)
self.labelNoise = QtWidgets.QLabel(SignalFrame)
self.labelNoise.setObjectName("labelNoise")
self.gridLayout_2.addWidget(self.labelNoise, 2, 0, 1, 1)
self.lineEditSignalName = QtWidgets.QLineEdit(SignalFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEditSignalName.sizePolicy().hasHeightForWidth())
self.lineEditSignalName.setSizePolicy(sizePolicy)
self.lineEditSignalName.setMinimumSize(QtCore.QSize(214, 0))
self.lineEditSignalName.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.lineEditSignalName.setAcceptDrops(False)
self.lineEditSignalName.setObjectName("lineEditSignalName")
self.gridLayout_2.addWidget(self.lineEditSignalName, 1, 0, 1, 2)
self.cbProtoView = QtWidgets.QComboBox(SignalFrame)
self.cbProtoView.setObjectName("cbProtoView")
self.cbProtoView.addItem("")
self.cbProtoView.addItem("")
self.cbProtoView.addItem("")
self.gridLayout_2.addWidget(self.cbProtoView, 21, 1, 1, 1)
self.lInfoLenText = QtWidgets.QLabel(SignalFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lInfoLenText.sizePolicy().hasHeightForWidth())
self.lInfoLenText.setSizePolicy(sizePolicy)
self.lInfoLenText.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.lInfoLenText.setObjectName("lInfoLenText")
self.gridLayout_2.addWidget(self.lInfoLenText, 4, 0, 1, 1)
self.spinBoxInfoLen = QtWidgets.QSpinBox(SignalFrame)
self.spinBoxInfoLen.setMinimumSize(QtCore.QSize(100, 0))
self.spinBoxInfoLen.setMinimum(1)
self.spinBoxInfoLen.setMaximum(999999999)
self.spinBoxInfoLen.setObjectName("spinBoxInfoLen")
self.gridLayout_2.addWidget(self.spinBoxInfoLen, 4, 1, 1, 1)
self.spinBoxTolerance = QtWidgets.QSpinBox(SignalFrame)
self.spinBoxTolerance.setMinimumSize(QtCore.QSize(100, 0))
self.spinBoxTolerance.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.spinBoxTolerance.setMaximum(9999)
self.spinBoxTolerance.setObjectName("spinBoxTolerance")
self.gridLayout_2.addWidget(self.spinBoxTolerance, 7, 1, 1, 1)
self.lErrorTolerance = QtWidgets.QLabel(SignalFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lErrorTolerance.sizePolicy().hasHeightForWidth())
self.lErrorTolerance.setSizePolicy(sizePolicy)
self.lErrorTolerance.setMinimumSize(QtCore.QSize(0, 0))
self.lErrorTolerance.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.lErrorTolerance.setObjectName("lErrorTolerance")
self.gridLayout_2.addWidget(self.lErrorTolerance, 7, 0, 1, 1)
self.lSignalViewText = QtWidgets.QLabel(SignalFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lSignalViewText.sizePolicy().hasHeightForWidth())
self.lSignalViewText.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setUnderline(False)
self.lSignalViewText.setFont(font)
self.lSignalViewText.setObjectName("lSignalViewText")
self.gridLayout_2.addWidget(self.lSignalViewText, 15, 0, 1, 1)
self.line = QtWidgets.QFrame(SignalFrame)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.gridLayout_2.addWidget(self.line, 13, 0, 1, 2)
self.lCenterOffset = QtWidgets.QLabel(SignalFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lCenterOffset.sizePolicy().hasHeightForWidth())
self.lCenterOffset.setSizePolicy(sizePolicy)
self.lCenterOffset.setMinimumSize(QtCore.QSize(0, 0))
self.lCenterOffset.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.lCenterOffset.setWhatsThis("")
self.lCenterOffset.setObjectName("lCenterOffset")
self.gridLayout_2.addWidget(self.lCenterOffset, 3, 0, 1, 1)
self.spinBoxCenterOffset = QtWidgets.QDoubleSpinBox(SignalFrame)
self.spinBoxCenterOffset.setMinimumSize(QtCore.QSize(100, 0))
self.spinBoxCenterOffset.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.spinBoxCenterOffset.setDecimals(4)
self.spinBoxCenterOffset.setMinimum(-3.15)
self.spinBoxCenterOffset.setMaximum(6.28)
self.spinBoxCenterOffset.setSingleStep(0.0001)
self.spinBoxCenterOffset.setObjectName("spinBoxCenterOffset")
self.gridLayout_2.addWidget(self.spinBoxCenterOffset, 3, 1, 1, 1)
self.btnAutoDetect = QtWidgets.QPushButton(SignalFrame)
icon = QtGui.QIcon.fromTheme("system-software-update")
self.btnAutoDetect.setIcon(icon)
self.btnAutoDetect.setIconSize(QtCore.QSize(16, 16))
self.btnAutoDetect.setCheckable(True)
self.btnAutoDetect.setChecked(True)
self.btnAutoDetect.setObjectName("btnAutoDetect")
self.gridLayout_2.addWidget(self.btnAutoDetect, 11, 0, 1, 2)
self.cbSignalView = QtWidgets.QComboBox(SignalFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cbSignalView.sizePolicy().hasHeightForWidth())
self.cbSignalView.setSizePolicy(sizePolicy)
self.cbSignalView.setObjectName("cbSignalView")
self.cbSignalView.addItem("")
self.cbSignalView.addItem("")
self.cbSignalView.addItem("")
self.gridLayout_2.addWidget(self.cbSignalView, 15, 1, 1, 1)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.btnSaveSignal = QtWidgets.QToolButton(SignalFrame)
self.btnSaveSignal.setMinimumSize(QtCore.QSize(24, 24))
self.btnSaveSignal.setMaximumSize(QtCore.QSize(24, 24))
icon = QtGui.QIcon.fromTheme("document-save")
self.btnSaveSignal.setIcon(icon)
self.btnSaveSignal.setObjectName("btnSaveSignal")
self.gridLayout.addWidget(self.btnSaveSignal, 0, 3, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(10, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem1, 0, 2, 1, 1)
self.btnCloseSignal = QtWidgets.QToolButton(SignalFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnCloseSignal.sizePolicy().hasHeightForWidth())
self.btnCloseSignal.setSizePolicy(sizePolicy)
self.btnCloseSignal.setMinimumSize(QtCore.QSize(24, 24))
self.btnCloseSignal.setMaximumSize(QtCore.QSize(24, 24))
self.btnCloseSignal.setStyleSheet("color:red;")
icon = QtGui.QIcon.fromTheme("window-close")
self.btnCloseSignal.setIcon(icon)
self.btnCloseSignal.setObjectName("btnCloseSignal")
self.gridLayout.addWidget(self.btnCloseSignal, 0, 9, 1, 1)
self.lSignalTyp = QtWidgets.QLabel(SignalFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lSignalTyp.sizePolicy().hasHeightForWidth())
self.lSignalTyp.setSizePolicy(sizePolicy)
self.lSignalTyp.setObjectName("lSignalTyp")
self.gridLayout.addWidget(self.lSignalTyp, 0, 1, 1, 1)
self.lSignalNr = QtWidgets.QLabel(SignalFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lSignalNr.sizePolicy().hasHeightForWidth())
self.lSignalNr.setSizePolicy(sizePolicy)
self.lSignalNr.setWordWrap(False)
self.lSignalNr.setIndent(-1)
self.lSignalNr.setObjectName("lSignalNr")
self.gridLayout.addWidget(self.lSignalNr, 0, 0, 1, 1)
self.btnInfo = QtWidgets.QToolButton(SignalFrame)
self.btnInfo.setMinimumSize(QtCore.QSize(24, 24))
self.btnInfo.setMaximumSize(QtCore.QSize(24, 24))
icon = QtGui.QIcon.fromTheme("dialog-information")
self.btnInfo.setIcon(icon)
self.btnInfo.setObjectName("btnInfo")
self.gridLayout.addWidget(self.btnInfo, 0, 6, 1, 1)
self.btnReplay = QtWidgets.QToolButton(SignalFrame)
self.btnReplay.setMinimumSize(QtCore.QSize(24, 24))
self.btnReplay.setMaximumSize(QtCore.QSize(24, 24))
self.btnReplay.setText("")
icon = QtGui.QIcon.fromTheme("media-playback-start")
self.btnReplay.setIcon(icon)
self.btnReplay.setObjectName("btnReplay")
self.gridLayout.addWidget(self.btnReplay, 0, 5, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 2)
self.labelFFTWindowSize = QtWidgets.QLabel(SignalFrame)
self.labelFFTWindowSize.setObjectName("labelFFTWindowSize")
self.gridLayout_2.addWidget(self.labelFFTWindowSize, 18, 0, 1, 1)
self.sliderFFTWindowSize = QtWidgets.QSlider(SignalFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.sliderFFTWindowSize.sizePolicy().hasHeightForWidth())
self.sliderFFTWindowSize.setSizePolicy(sizePolicy)
self.sliderFFTWindowSize.setMinimum(6)
self.sliderFFTWindowSize.setMaximum(15)
self.sliderFFTWindowSize.setOrientation(QtCore.Qt.Horizontal)
self.sliderFFTWindowSize.setObjectName("sliderFFTWindowSize")
self.gridLayout_2.addWidget(self.sliderFFTWindowSize, 18, 1, 1, 1)
self.sliderSpectrogramMax = QtWidgets.QSlider(SignalFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.sliderSpectrogramMax.sizePolicy().hasHeightForWidth())
self.sliderSpectrogramMax.setSizePolicy(sizePolicy)
self.sliderSpectrogramMax.setMinimum(-150)
self.sliderSpectrogramMax.setMaximum(10)
self.sliderSpectrogramMax.setOrientation(QtCore.Qt.Horizontal)
self.sliderSpectrogramMax.setObjectName("sliderSpectrogramMax")
self.gridLayout_2.addWidget(self.sliderSpectrogramMax, 20, 1, 1, 1)
self.labelSpectrogramMin = QtWidgets.QLabel(SignalFrame)
self.labelSpectrogramMin.setObjectName("labelSpectrogramMin")
self.gridLayout_2.addWidget(self.labelSpectrogramMin, 19, 0, 1, 1)
self.labelSpectrogramMax = QtWidgets.QLabel(SignalFrame)
self.labelSpectrogramMax.setObjectName("labelSpectrogramMax")
self.gridLayout_2.addWidget(self.labelSpectrogramMax, 20, 0, 1, 1)
self.horizontalLayout.addLayout(self.gridLayout_2)
self.splitter = QtWidgets.QSplitter(SignalFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.splitter.sizePolicy().hasHeightForWidth())
self.splitter.setSizePolicy(sizePolicy)
self.splitter.setStyleSheet("QSplitter::handle:vertical {\n"
"margin: 4px 0px;\n"
" background-color: qlineargradient(x1:0, y1:0, x2:1, y2:0, \n"
"stop:0 rgba(255, 255, 255, 0), \n"
"stop:0.5 rgba(100, 100, 100, 100), \n"
"stop:1 rgba(255, 255, 255, 0));\n"
" image: url(:/icons/data/icons/splitter_handle_horizontal.svg);\n"
"}")
self.splitter.setFrameShape(QtWidgets.QFrame.NoFrame)
self.splitter.setLineWidth(1)
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setHandleWidth(6)
self.splitter.setChildrenCollapsible(False)
self.splitter.setObjectName("splitter")
self.layoutWidget = QtWidgets.QWidget(self.splitter)
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.stackedWidget = QtWidgets.QStackedWidget(self.layoutWidget)
self.stackedWidget.setLineWidth(0)
self.stackedWidget.setObjectName("stackedWidget")
self.pageSignal = QtWidgets.QWidget()
self.pageSignal.setObjectName("pageSignal")
self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.pageSignal)
self.horizontalLayout_6.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_6.setSpacing(0)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.gvLegend = LegendGraphicView(self.pageSignal)
self.gvLegend.setMinimumSize(QtCore.QSize(0, 150))
self.gvLegend.setMaximumSize(QtCore.QSize(30, 16777215))
self.gvLegend.setFrameShape(QtWidgets.QFrame.NoFrame)
self.gvLegend.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.gvLegend.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.gvLegend.setInteractive(False)
self.gvLegend.setResizeAnchor(QtWidgets.QGraphicsView.AnchorViewCenter)
self.gvLegend.setRubberBandSelectionMode(QtCore.Qt.ContainsItemShape)
self.gvLegend.setOptimizationFlags(QtWidgets.QGraphicsView.DontSavePainterState)
self.gvLegend.setObjectName("gvLegend")
self.horizontalLayout_6.addWidget(self.gvLegend)
self.gvSignal = EpicGraphicView(self.pageSignal)
self.gvSignal.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.gvSignal.sizePolicy().hasHeightForWidth())
self.gvSignal.setSizePolicy(sizePolicy)
self.gvSignal.setMinimumSize(QtCore.QSize(0, 150))
self.gvSignal.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.gvSignal.setMouseTracking(True)
self.gvSignal.setFocusPolicy(QtCore.Qt.WheelFocus)
self.gvSignal.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.gvSignal.setAutoFillBackground(False)
self.gvSignal.setStyleSheet("")
self.gvSignal.setFrameShape(QtWidgets.QFrame.NoFrame)
self.gvSignal.setFrameShadow(QtWidgets.QFrame.Raised)
self.gvSignal.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.gvSignal.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.gvSignal.setInteractive(False)
self.gvSignal.setRenderHints(QtGui.QPainter.Antialiasing|QtGui.QPainter.TextAntialiasing)
self.gvSignal.setDragMode(QtWidgets.QGraphicsView.NoDrag)
self.gvSignal.setCacheMode(QtWidgets.QGraphicsView.CacheNone)
self.gvSignal.setTransformationAnchor(QtWidgets.QGraphicsView.NoAnchor)
self.gvSignal.setResizeAnchor(QtWidgets.QGraphicsView.NoAnchor)
self.gvSignal.setViewportUpdateMode(QtWidgets.QGraphicsView.MinimalViewportUpdate)
self.gvSignal.setRubberBandSelectionMode(QtCore.Qt.ContainsItemShape)
self.gvSignal.setOptimizationFlags(QtWidgets.QGraphicsView.DontClipPainter|QtWidgets.QGraphicsView.DontSavePainterState)
self.gvSignal.setObjectName("gvSignal")
self.horizontalLayout_6.addWidget(self.gvSignal)
self.stackedWidget.addWidget(self.pageSignal)
self.pageSpectrogram = QtWidgets.QWidget()
self.pageSpectrogram.setObjectName("pageSpectrogram")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.pageSpectrogram)
self.horizontalLayout_4.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_4.setSpacing(0)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.gvSpectrogram = SpectrogramGraphicView(self.pageSpectrogram)
self.gvSpectrogram.setMouseTracking(True)
self.gvSpectrogram.setFrameShape(QtWidgets.QFrame.NoFrame)
self.gvSpectrogram.setInteractive(False)
self.gvSpectrogram.setRenderHints(QtGui.QPainter.TextAntialiasing)
self.gvSpectrogram.setCacheMode(QtWidgets.QGraphicsView.CacheNone)
self.gvSpectrogram.setTransformationAnchor(QtWidgets.QGraphicsView.NoAnchor)
self.gvSpectrogram.setViewportUpdateMode(QtWidgets.QGraphicsView.MinimalViewportUpdate)
self.gvSpectrogram.setOptimizationFlags(QtWidgets.QGraphicsView.DontClipPainter|QtWidgets.QGraphicsView.DontSavePainterState)
self.gvSpectrogram.setObjectName("gvSpectrogram")
self.horizontalLayout_4.addWidget(self.gvSpectrogram)
self.stackedWidget.addWidget(self.pageSpectrogram)
self.horizontalLayout_2.addWidget(self.stackedWidget)
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.lYScale = QtWidgets.QLabel(self.layoutWidget)
self.lYScale.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.lYScale.setObjectName("lYScale")
self.verticalLayout_5.addWidget(self.lYScale)
self.sliderYScale = QtWidgets.QSlider(self.layoutWidget)
self.sliderYScale.setMinimum(1)
self.sliderYScale.setMaximum(100)
self.sliderYScale.setOrientation(QtCore.Qt.Vertical)
self.sliderYScale.setTickPosition(QtWidgets.QSlider.TicksBelow)
self.sliderYScale.setObjectName("sliderYScale")
self.verticalLayout_5.addWidget(self.sliderYScale)
self.horizontalLayout_2.addLayout(self.verticalLayout_5)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.btnShowHideStartEnd = QtWidgets.QToolButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnShowHideStartEnd.sizePolicy().hasHeightForWidth())
self.btnShowHideStartEnd.setSizePolicy(sizePolicy)
self.btnShowHideStartEnd.setAutoFillBackground(False)
self.btnShowHideStartEnd.setStyleSheet("")
icon = QtGui.QIcon.fromTheme("arrow-down-double")
self.btnShowHideStartEnd.setIcon(icon)
self.btnShowHideStartEnd.setCheckable(True)
self.btnShowHideStartEnd.setObjectName("btnShowHideStartEnd")
self.horizontalLayout_3.addWidget(self.btnShowHideStartEnd)
self.lNumSelectedSamples = QtWidgets.QLabel(self.layoutWidget)
self.lNumSelectedSamples.setObjectName("lNumSelectedSamples")
self.horizontalLayout_3.addWidget(self.lNumSelectedSamples)
self.lTextSelectedSamples = QtWidgets.QLabel(self.layoutWidget)
self.lTextSelectedSamples.setObjectName("lTextSelectedSamples")
self.horizontalLayout_3.addWidget(self.lTextSelectedSamples)
self.line_3 = QtWidgets.QFrame(self.layoutWidget)
self.line_3.setFrameShape(QtWidgets.QFrame.VLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.horizontalLayout_3.addWidget(self.line_3)
self.lDuration = QtWidgets.QLabel(self.layoutWidget)
self.lDuration.setObjectName("lDuration")
self.horizontalLayout_3.addWidget(self.lDuration)
self.line_2 = QtWidgets.QFrame(self.layoutWidget)
self.line_2.setFrameShape(QtWidgets.QFrame.VLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.horizontalLayout_3.addWidget(self.line_2)
self.labelRSSI = QtWidgets.QLabel(self.layoutWidget)
self.labelRSSI.setObjectName("labelRSSI")
self.horizontalLayout_3.addWidget(self.labelRSSI)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem2)
self.btnFilter = QtWidgets.QToolButton(self.layoutWidget)
icon = QtGui.QIcon.fromTheme("view-filter")
self.btnFilter.setIcon(icon)
self.btnFilter.setPopupMode(QtWidgets.QToolButton.MenuButtonPopup)
self.btnFilter.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.btnFilter.setArrowType(QtCore.Qt.NoArrow)
self.btnFilter.setObjectName("btnFilter")
self.horizontalLayout_3.addWidget(self.btnFilter)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.additionalInfos = QtWidgets.QHBoxLayout()
self.additionalInfos.setSpacing(6)
self.additionalInfos.setObjectName("additionalInfos")
self.lStart = QtWidgets.QLabel(self.layoutWidget)
self.lStart.setObjectName("lStart")
self.additionalInfos.addWidget(self.lStart)
self.spinBoxSelectionStart = QtWidgets.QSpinBox(self.layoutWidget)
self.spinBoxSelectionStart.setReadOnly(False)
self.spinBoxSelectionStart.setMaximum(99999999)
self.spinBoxSelectionStart.setObjectName("spinBoxSelectionStart")
self.additionalInfos.addWidget(self.spinBoxSelectionStart)
self.lEnd = QtWidgets.QLabel(self.layoutWidget)
self.lEnd.setObjectName("lEnd")
self.additionalInfos.addWidget(self.lEnd)
self.spinBoxSelectionEnd = QtWidgets.QSpinBox(self.layoutWidget)
self.spinBoxSelectionEnd.setMaximum(99999999)
self.spinBoxSelectionEnd.setObjectName("spinBoxSelectionEnd")
self.additionalInfos.addWidget(self.spinBoxSelectionEnd)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.additionalInfos.addItem(spacerItem3)
self.lZoomText = QtWidgets.QLabel(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lZoomText.sizePolicy().hasHeightForWidth())
self.lZoomText.setSizePolicy(sizePolicy)
self.lZoomText.setMinimumSize(QtCore.QSize(0, 0))
self.lZoomText.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setItalic(False)
font.setUnderline(False)
self.lZoomText.setFont(font)
self.lZoomText.setTextFormat(QtCore.Qt.PlainText)
self.lZoomText.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.lZoomText.setObjectName("lZoomText")
self.additionalInfos.addWidget(self.lZoomText)
self.spinBoxXZoom = QtWidgets.QSpinBox(self.layoutWidget)
self.spinBoxXZoom.setMinimum(100)
self.spinBoxXZoom.setMaximum(999999999)
self.spinBoxXZoom.setObjectName("spinBoxXZoom")
self.additionalInfos.addWidget(self.spinBoxXZoom)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.additionalInfos.addItem(spacerItem4)
self.lSamplesInView = QtWidgets.QLabel(self.layoutWidget)
self.lSamplesInView.setObjectName("lSamplesInView")
self.additionalInfos.addWidget(self.lSamplesInView)
self.lStrich = QtWidgets.QLabel(self.layoutWidget)
self.lStrich.setObjectName("lStrich")
self.additionalInfos.addWidget(self.lStrich)
self.lSamplesTotal = QtWidgets.QLabel(self.layoutWidget)
self.lSamplesTotal.setObjectName("lSamplesTotal")
self.additionalInfos.addWidget(self.lSamplesTotal)
self.lSamplesViewText = QtWidgets.QLabel(self.layoutWidget)
self.lSamplesViewText.setObjectName("lSamplesViewText")
self.additionalInfos.addWidget(self.lSamplesViewText)
self.verticalLayout.addLayout(self.additionalInfos)
self.txtEdProto = TextEditProtocolView(self.splitter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.txtEdProto.sizePolicy().hasHeightForWidth())
self.txtEdProto.setSizePolicy(sizePolicy)
self.txtEdProto.setMinimumSize(QtCore.QSize(0, 80))
self.txtEdProto.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.txtEdProto.setBaseSize(QtCore.QSize(0, 0))
self.txtEdProto.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.txtEdProto.setAcceptDrops(False)
self.txtEdProto.setObjectName("txtEdProto")
self.horizontalLayout.addWidget(self.splitter)
self.retranslateUi(SignalFrame)
self.stackedWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(SignalFrame)
SignalFrame.setTabOrder(self.btnSaveSignal, self.btnInfo)
SignalFrame.setTabOrder(self.btnInfo, self.btnCloseSignal)
SignalFrame.setTabOrder(self.btnCloseSignal, self.lineEditSignalName)
SignalFrame.setTabOrder(self.lineEditSignalName, self.spinBoxNoiseTreshold)
SignalFrame.setTabOrder(self.spinBoxNoiseTreshold, self.spinBoxCenterOffset)
SignalFrame.setTabOrder(self.spinBoxCenterOffset, self.spinBoxInfoLen)
SignalFrame.setTabOrder(self.spinBoxInfoLen, self.spinBoxTolerance)
SignalFrame.setTabOrder(self.spinBoxTolerance, self.chkBoxShowProtocol)
SignalFrame.setTabOrder(self.chkBoxShowProtocol, self.cbProtoView)
SignalFrame.setTabOrder(self.cbProtoView, self.chkBoxSyncSelection)
SignalFrame.setTabOrder(self.chkBoxSyncSelection, self.txtEdProto)
SignalFrame.setTabOrder(self.txtEdProto, self.btnShowHideStartEnd)
SignalFrame.setTabOrder(self.btnShowHideStartEnd, self.spinBoxSelectionStart)
SignalFrame.setTabOrder(self.spinBoxSelectionStart, self.spinBoxSelectionEnd)
def retranslateUi(self, SignalFrame):
_translate = QtCore.QCoreApplication.translate
SignalFrame.setWindowTitle(_translate("SignalFrame", "Frame"))
self.cbModulationType.setToolTip(_translate("SignalFrame", "<html><head/><body><p>Choose signals modulation:</p><ul><li>Amplitude Shift Keying (ASK)</li><li>Frequency Shift Keying (FSK)</li><li>Phase Shift Keying (PSK)</li></ul></body></html>"))
self.cbModulationType.setItemText(0, _translate("SignalFrame", "ASK"))
self.cbModulationType.setItemText(1, _translate("SignalFrame", "FSK"))
self.cbModulationType.setItemText(2, _translate("SignalFrame", "PSK"))
self.btnAdvancedModulationSettings.setText(_translate("SignalFrame", "..."))
self.labelModulation.setText(_translate("SignalFrame", "Modulation:"))
self.chkBoxSyncSelection.setToolTip(_translate("SignalFrame", "If this is set to true, your selected protocol bits will show up in the signal view, and vice versa."))
self.chkBoxSyncSelection.setText(_translate("SignalFrame", "Sync Selection"))
self.spinBoxNoiseTreshold.setToolTip(_translate("SignalFrame", "<html><head/><body><p>Set the <span style=\" font-weight:600;\">noise magnitude</span> of your signal. You can tune this value to mute noise in your signal and reveal the true data.</p></body></html>"))
self.chkBoxShowProtocol.setToolTip(_translate("SignalFrame", "Show the extracted protocol based on the parameters InfoLen, PauseLen and ZeroTreshold (in QuadratureDemod-View).\n"
"\n"
"If you want your protocol to be better seperated, edit the PauseLen using right-click menu from a selection in SignalView or ProtocolView."))
self.chkBoxShowProtocol.setText(_translate("SignalFrame", "Show Signal as"))
self.labelNoise.setToolTip(_translate("SignalFrame", "<html><head/><body><p>Set the <span style=\" font-weight:600;\">noise magnitude</span> of your signal. You can tune this value to mute noise in your signal and reveal the true data.</p></body></html>"))
self.labelNoise.setText(_translate("SignalFrame", "Noise:"))
self.lineEditSignalName.setText(_translate("SignalFrame", "SignalName"))
self.cbProtoView.setItemText(0, _translate("SignalFrame", "Bits"))
self.cbProtoView.setItemText(1, _translate("SignalFrame", "Hex"))
self.cbProtoView.setItemText(2, _translate("SignalFrame", "ASCII"))
self.lInfoLenText.setToolTip(_translate("SignalFrame", "<html><head/><body><p>This is the length of one (raw) bit <span style=\" font-weight:600;\">in samples</span>.</p><p><br/></p><p>Tune this value using either <span style=\" font-style:italic;\">the spinbox on the right</span> or the <span style=\" font-style:italic;\">context-menu of the SignalView</span>.</p></body></html>"))
self.lInfoLenText.setText(_translate("SignalFrame", "Bit Length:"))
self.spinBoxInfoLen.setToolTip(_translate("SignalFrame", "<html><head/><body><p>This is the length of one (raw) bit <span style=\" font-weight:600;\">in samples</span>.</p><p><br/></p><p>Tune this value using either <span style=\" font-style:italic;\">the spinbox on the right</span> or the <span style=\" font-style:italic;\">context-menu of the SignalView</span>.</p></body></html>"))
self.spinBoxTolerance.setToolTip(_translate("SignalFrame", "<html><head/><body><p>This is the error tolerance for determining the <span style=\" font-weight:600;\">pulse lengths</span> in the demodulated signal.</p><p><span style=\" font-weight:400; font-style:italic;\">Example:</span> Say, we are reading a ones pulse and the tolerance value was set to 5. Then 5 errors (which must follow sequentially) are accepted.</p><p>Tune this value if you have <span style=\" font-weight:600;\">spiky data</span> after demodulation.</p></body></html>"))
self.lErrorTolerance.setToolTip(_translate("SignalFrame", "<html><head/><body><p>This is the error tolerance for determining the <span style=\" font-weight:600;\">pulse lengths</span> in the demodulated signal.</p><p><span style=\" font-weight:400; font-style:italic;\">Example:</span> Say, we are reading a ones pulse and the tolerance value was set to 5. Then 5 errors (which must follow sequentially) are accepted.</p><p>Tune this value if you have <span style=\" font-weight:600;\">spiky data</span> after demodulation.</p></body></html>"))
self.lErrorTolerance.setText(_translate("SignalFrame", "Error Tolerance:"))
self.lSignalViewText.setText(_translate("SignalFrame", "Signal View:"))
self.lCenterOffset.setToolTip(_translate("SignalFrame", "<html><head/><body><p>This is the threshold used for determining if a <span style=\" font-weight:600;\">bit is one or zero</span>. You can set it here or grab the middle of the area in <span style=\" font-style:italic;\">Quadrature Demod View.</span></p></body></html>"))
self.lCenterOffset.setText(_translate("SignalFrame", "Center:"))
self.spinBoxCenterOffset.setToolTip(_translate("SignalFrame", "<html><head/><body><p>This is the threshold used for determining if a <span style=\" font-weight:600;\">bit is one or zero</span>. You can set it here or grab the middle of the area in <span style=\" font-style:italic;\">Quadrature Demod View</span>.</p></body></html>"))
self.btnAutoDetect.setToolTip(_translate("SignalFrame", "<html><head/><body><p>Automatically detect Center and Bit Length, when you change the demodulation type. You can disable this behaviour for faster switching between demodulations.</p></body></html>"))
self.btnAutoDetect.setText(_translate("SignalFrame", "Autodetect parameters"))
self.cbSignalView.setToolTip(_translate("SignalFrame", "<html><head/><body><p>Choose the view of your signal. Analog, Demodulated or Spectrogram.</p><p>The quadrature demodulation uses a <span style=\" font-weight:600;\">treshold of magnitude,</span> to <span style=\" font-weight:600;\">supress noise</span>. All samples with a magnitude lower than this treshold will be eliminated (set to <span style=\" font-style:italic;\">-127</span>) after demod.</p><p>Tune this value by selecting a <span style=\" font-style:italic;\">noisy area</span> and mark it as noise using <span style=\" font-weight:600;\">context menu</span>.</p><p>Current noise treshold is: </p></body></html>"))
self.cbSignalView.setItemText(0, _translate("SignalFrame", "Analog"))
self.cbSignalView.setItemText(1, _translate("SignalFrame", "Demodulated"))
self.cbSignalView.setItemText(2, _translate("SignalFrame", "Spectrogram"))
self.btnSaveSignal.setText(_translate("SignalFrame", "..."))
self.btnCloseSignal.setText(_translate("SignalFrame", "X"))
self.lSignalTyp.setText(_translate("SignalFrame", "<Signaltyp>"))
self.lSignalNr.setText(_translate("SignalFrame", "1:"))
self.btnInfo.setText(_translate("SignalFrame", "..."))
self.btnReplay.setToolTip(_translate("SignalFrame", "Replay signal"))
self.labelFFTWindowSize.setText(_translate("SignalFrame", "FFT Window Size:"))
self.labelSpectrogramMin.setText(_translate("SignalFrame", "Data<sub>min</sub>:"))
self.labelSpectrogramMax.setText(_translate("SignalFrame", "Data<sub>max</sub>:"))
self.lYScale.setText(_translate("SignalFrame", "Y-Scale"))
self.btnShowHideStartEnd.setText(_translate("SignalFrame", "-"))
self.lNumSelectedSamples.setToolTip(_translate("SignalFrame", "Number of currently selected samples."))
self.lNumSelectedSamples.setText(_translate("SignalFrame", "0"))
self.lTextSelectedSamples.setToolTip(_translate("SignalFrame", "Number of currently selected samples."))
self.lTextSelectedSamples.setText(_translate("SignalFrame", "selected"))
self.lDuration.setText(_translate("SignalFrame", "42 µs"))
self.labelRSSI.setText(_translate("SignalFrame", "RSSI: 0,434"))
self.btnFilter.setText(_translate("SignalFrame", "Filter (moving average)"))
self.lStart.setText(_translate("SignalFrame", "Start:"))
self.lEnd.setText(_translate("SignalFrame", "End:"))
self.lZoomText.setToolTip(_translate("SignalFrame", "<html><head/><body><p>Current (relative) Zoom. Standard is 100%, if you zoom in, this factor increases. You can directly set a value in the spinbox or use the <span style=\" font-weight:600;\">mousewheel to zoom</span>.</p></body></html>"))
self.lZoomText.setText(_translate("SignalFrame", "X-Zoom:"))
self.spinBoxXZoom.setToolTip(_translate("SignalFrame", "<html><head/><body><p>Current (relative) Zoom. Standard is 100%, if you zoom in, this factor increases. You can directly set a value in the spinbox or use the <span style=\" font-weight:600;\">mousewheel to zoom</span>.</p></body></html>"))
self.spinBoxXZoom.setSuffix(_translate("SignalFrame", "%"))
self.lSamplesInView.setText(_translate("SignalFrame", "0"))
self.lStrich.setText(_translate("SignalFrame", "/"))
self.lSamplesTotal.setText(_translate("SignalFrame", "0"))
self.lSamplesViewText.setText(_translate("SignalFrame", "Samples in view"))
from urh.ui.views.EpicGraphicView import EpicGraphicView
from urh.ui.views.LegendGraphicView import LegendGraphicView
from urh.ui.views.SpectrogramGraphicView import SpectrogramGraphicView
from urh.ui.views.TextEditProtocolView import TextEditProtocolView
from . import urh_rc
|
gpl-3.0
| -743,312,303,342,755,300
| 68.472269
| 688
| 0.734614
| false
| 3.828471
| false
| false
| false
|
appsembler/roles
|
logstash/templates/remove_old_indices.py
|
1
|
1444
|
#!/usr/bin/env python
import logging
import sys
import curator
import elasticsearch
import certifi
HOSTS = ["{{ logstash_output_elasticsearch_hosts | join('\", \"') }}"]
USERNAME = '{{ logstash_output_elasticsearch_user }}'
PASSWORD = '{{ logstash_output_elasticsearch_password }}'
DELETE_OLDER_THAN = {{ logstash_remove_older_than }}
def main():
for host in HOSTS:
scheme, _, domain = host.rpartition('://')
scheme = scheme if scheme else 'http'
basic_auth_uri = '{}://{}:{}@{}'.format(scheme, USERNAME, PASSWORD, domain)
client = elasticsearch.Elasticsearch([basic_auth_uri], verify_certs=True,
ca_certs=certifi.where())
index_list = curator.IndexList(client)
index_list.filter_by_regex(kind='prefix', value='logstash-')
index_list.filter_by_age(source='name', direction='older',
timestring='%Y.%m.%d', unit='days',
unit_count=DELETE_OLDER_THAN)
if len(index_list.indices):
logging.info('Deleting indices: {}'
.format(', '.join(index_list.indices)))
delete_indices = curator.DeleteIndices(index_list)
delete_indices.do_action()
else:
logging.info('No indices to delete')
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
main()
|
mit
| -3,186,540,673,562,931,000
| 33.380952
| 83
| 0.580332
| false
| 4.011111
| false
| false
| false
|
cogitare-ai/cogitare
|
cogitare/data/dataholder.py
|
1
|
15342
|
import torch
import math
from abc import ABCMeta, abstractmethod
from cogitare import utils
from six import add_metaclass
import numpy
from dask import threaded, delayed, compute, multiprocessing
def _identity(x):
return x
@add_metaclass(ABCMeta)
class AbsDataHolder(object):
"""
An abstract object that acts as a data holder. A data holder is a utility to hold
datasets, which provide some simple functions to work with the dataset, such as
sorting, splitting, dividing it into chunks, loading batches using multi-thread, and so on.
It's the recommended way to pass data to Cogitare's models because it already
provides a compatible interface to iterate over batches.
To improve the performance, the data holder loads batches using multiprocessing
and multithreading data loader with `Dask <http://dask.pydata.org/>`_.
Usually, this object should not be used directly, only if you are developing a custom
data loader. Cogitare already provides the following implementations for the most
common data types:
- Tensors: :class:`~cogitare.data.TensorHolder`
- Numpy: :class:`~cogitare.data.NumpyHolder`
- Callable (functions that receive the sample id, and returns its
data): :class:`~cogitare.data.CallableHolder`
- :class:`~cogitare.data.AutoHolder`: inspect the data to choose one of the available data holders.
Args:
data (torch.Tensor, numpy.ndarray, callable): the data to be managed by the data holder.
batch_size (int): the size of the batch.
shuffle (bool): if True, shuffles the dataset after each iteration.
drop_last (bool): if True, then skip the batch if its size is lower that **batch_size** (can
occur in the last batch).
total_samples (int): the number of total samples. If provided, this will limit the
number of samples to be accessed in the data.
mode (str): must be one of: 'sequential', 'threaded', 'multiprocessing'. Use one of them
to choose the batch loading methods. Take a loook
here: https://dask.pydata.org/en/latest/scheduler-choice.html for an overview of the advantage of each mode.
single (bool): if True, returns only the first element of each batch.
Is designed to be used with models where you only use one sample per
batch (batch_size == 1). So instead of returning a list with a single sample, with
``single == True``, the sample itself will be returned and not the list.
on_sample_loaded (callable): if provided, this function will be called when a new sample is loaded. It must
receive one argument, the sample. And return one value that will replace the sample data.
This is used to apply pre-processing on single samples while loading.
on_batch_loaded (callable): if provided, this function will be called when a new batch is loaded. It must
receive one argument, the batch data. And return the batch after applying some operation on the data. This
can be used to apply pre-processing functions on a batch of data (such as image filtering, moving the
data to GPU, and etc).
"""
@property
def total_samples(self):
"""Returns the number of individual samples in this dataset.
"""
return self._total_samples
@total_samples.setter
def total_samples(self, value):
if hasattr(self._data, '__len__'):
size = len(self._data)
else:
size = None
if size is not None:
utils.assert_raise(value <= size, ValueError,
'The value must be lesser or equal to the'
'length of the input data')
utils.assert_raise(value >= 1, ValueError,
'number of samples must be greater or equal to 1')
self._total_samples = value
self._remaining_samples = value
self._requires_reset = True
@property
def indices(self):
if self._indices is None:
self._indices = numpy.arange(self.total_samples)
return self._indices
@property
def batch_size(self):
"""The size of the mini-batch used by the iterator.
When a new batch_size is set, the iterator will reset.
"""
return self._batch_size
@batch_size.setter
def batch_size(self, value):
self._batch_size = value
self._requires_reset = True
def __init__(self, data, batch_size=1, shuffle=True, drop_last=False,
total_samples=None, mode='sequential', single=False,
on_sample_loaded=None, on_batch_loaded=None):
valid_modes = ['threaded', 'multiprocessing', 'sequential']
utils.assert_raise(mode in valid_modes, ValueError,
'"mode" must be one of: ' + ', '.join(valid_modes))
if on_sample_loaded is None:
on_sample_loaded = _identity
if on_batch_loaded is None:
on_batch_loaded = _identity
self._indices = None
self._single = single
self._mode = mode
self._total_samples = total_samples
self._remaining_samples = None
self._on_sample_loaded = on_sample_loaded
self._on_batch_loaded = on_batch_loaded
self._data = data
self._batch_size = batch_size
self._current_batch = 0
self._drop_last = drop_last
self._shuffle = shuffle
self._requires_reset = True
if mode == 'sequential':
self._get = None
elif mode == 'threaded':
self._get = threaded.get
else:
self._get = multiprocessing.get
def _clone(self):
return type(self)(data=self._data, batch_size=self._batch_size, shuffle=self._shuffle,
drop_last=self._drop_last, total_samples=self._total_samples,
mode=self._mode, single=self._single, on_sample_loaded=self._on_sample_loaded,
on_batch_loaded=self._on_batch_loaded)
def __repr__(self):
"""Using repr(data) or str(data), display the shape of the data.
"""
return '{} with {}x{} samples'.format(type(self).__name__, len(self), self._batch_size)
def __getitem__(self, key):
"""Get a sample in the dataset using its indices.
Example::
sample = data[0]
sample2 = data[1]
"""
return self._on_sample_loaded(self.get_sample(self.indices[key]))
def _get_batch_size(self):
batch_size = min(self._batch_size, self._remaining_samples)
if batch_size < self._batch_size and self._drop_last:
self._requires_reset = True
raise StopIteration
if batch_size == 0:
self._requires_reset = True
raise StopIteration
return batch_size
def _get_batch(self):
if self._requires_reset:
self.reset()
batch_size = self._get_batch_size()
def load(loader):
return [loader(self.__getitem__)(self._current_batch * self._batch_size + i)
for i in range(batch_size)]
if self._get:
# use dask
jobs = load(lambda x: delayed(x, traverse=False))
results = compute(jobs, scheduler=self._get)[0]
else:
results = load(_identity)
self._current_batch += 1
self._remaining_samples -= batch_size
results = self._on_batch_loaded(results)
if self._single:
return results[0]
return results
@abstractmethod
def get_sample(self, key):
pass
def __len__(self):
"""Return the number of batches in the dataset.
"""
if self._drop_last:
return self.total_samples // self._batch_size
else:
return (self.total_samples + self._batch_size - 1) // self._batch_size
def __iter__(self):
"""Creates an iterator to iterate over batches in the dataset.
After each iteration over the batches, the dataset will be shuffled if
the **shuffle** parameter is True.
Example::
for sample in data:
print(sample)
"""
return self
def __next__(self):
return self._get_batch()
next = __next__
def reset(self):
"""Reset the batch iterator.
This method returns the iterator to the first sample, and shuffle the
dataset if shuffle is enabled.
"""
self._requires_reset = False
self._current_batch = 0
self._remaining_samples = self.total_samples
if self._shuffle:
self.shuffle()
def shuffle(self):
"""Shuffle the samples in the dataset.
This operation will not affect the original data.
"""
numpy.random.shuffle(self.indices)
def split(self, ratio):
"""Split the data holder into two data holders.
The first one will receive *total_samples * ratio* samples, and the second
data holder will receive the remaining samples.
Args:
ratio (:obj:`float`): ratio of the split. Must be between 0 and 1.
Returns:
(data1, data2): two data holder, in the same type that the original.
Example::
>>> print(data)
TensorHolder with 875x64 samples
>>> data1, data2 = data.split(0.8)
>>> print(data1)
TensorHolder with 700x64 samples
>>> print(data2)
TensorHolder with 175x64 samples
"""
utils.assert_raise(0 < ratio < 1, ValueError, '"ratio" must be between 0 and 1')
pos = int(math.floor(self.total_samples * ratio))
data1 = self._clone()
data2 = self._clone()
data1._indices = self.indices[:pos]
data2._indices = self.indices[pos:]
data1._total_samples = pos
data2._total_samples = self.total_samples - pos
return data1, data2
def split_chunks(self, n):
"""Split the data holder into N data holders with the sample number of samples each.
Args:
n (int): number of new splits.
Returns:
output (list): list of N data holders.
Example::
>>> print(data)
TensorHolder with 875x64 samples
>>> data1, data2, data3 = data.split_chunks(3)
>>> print(data1)
TensorHolder with 292x64 samples
>>> print(data2)
TensorHolder with 292x64 samples
>>> print(data3)
TensorHolder with 292x64 samples
"""
size = self.total_samples // n
data = []
for i in range(n):
begin, end = i * size, min((i + 1) * size, self.total_samples)
holder = self._clone()
holder._indices = self.indices[begin:end]
holder._total_samples = end - begin
data.append(holder)
return data
class CallableHolder(AbsDataHolder):
"""CallableHolder is a data holder for abritary data type.
As data input, it uses a callable that receive the sample index as parameter,
and must return the sample.
It can be used to load non-Tensor or non-numpy datasets, such as texts, dicts, and anything else.
You are free to use CallableHolder with any data type.
.. note:: When using CallableHolder, you must specify the number of samples
in the dataset. The callable will be called asking for samples from 0 to (total_samples - 1).
Example::
>>> def load_sample(idx):
... return list(range(idx, idx + 10))
>>> # when using the CallableHolder. you must pass the number of samples to
>>> # be loaded.
>>> # you can set the total_samples using the parameter in the constructor
>>> data = CallableHolder(load_sample, batch_size=8, total_samples=20)
>>> # or by setting the property
>>> data.total_samples = 20
>>> next(data)
[[8, 9, 10, 11, 12, 13, 14, 15, 16, 17],
[9, 10, 11, 12, 13, 14, 15, 16, 17, 18],
[6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
[11, 12, 13, 14, 15, 16, 17, 18, 19, 20],
[13, 14, 15, 16, 17, 18, 19, 20, 21, 22],
[7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
[18, 19, 20, 21, 22, 23, 24, 25, 26, 27],
[17, 18, 19, 20, 21, 22, 23, 24, 25, 26]]
"""
@property
def total_samples(self):
"""The number of samples in the dataset. You must set this value before accessing the data.
"""
if self._total_samples is None:
raise ValueError('"total_samples" not defined. Callable objects requires the'
' number of total_samples before being used')
return super(CallableHolder, self).total_samples
@total_samples.setter
def total_samples(self, value):
return super(CallableHolder, self.__class__).total_samples.fset(self, value)
def __init__(self, *args, **kwargs):
super(CallableHolder, self).__init__(*args, **kwargs)
def get_sample(self, key):
return self._data(key)
class TensorHolder(AbsDataHolder):
"""
A data holder to work with :class:`torch.Tensor` objects.
Example::
>>> tensor = torch.Tensor([[1,2,3], [4,5,6], [7,8,9]])
>>> tensor
1 2 3
4 5 6
7 8 9
[torch.FloatTensor of size 3x3]
>>> data = TensorHolder(tensor, batch_size=2)
>>> for sample in data:
... print('Sample:')
... print(sample)
... print('Sample as tensor:')
... print(utils.to_tensor(sample))
Sample:
[
7
8
9
[torch.FloatTensor of size 3]
,
4
5
6
[torch.FloatTensor of size 3]
]
Sample as tensor:
7 8 9
4 5 6
[torch.FloatTensor of size 2x3]
Sample:
[
1
2
3
[torch.FloatTensor of size 3]
]
Sample as tensor:
1 2 3
[torch.FloatTensor of size 1x3]
"""
def __init__(self, *args, **kwargs):
super(TensorHolder, self).__init__(*args, **kwargs)
size = len(self._data)
if self._total_samples is None:
self.total_samples = size
def get_sample(self, key):
return self._data[key]
def NumpyHolder(data, *args, **kwargs):
"""
When creating the object, it converts the numpy data to Tensor using
:func:`torch.from_numpy` and then creates an :class:`~cogitare.data.TensorHolder`
instance.
"""
data = torch.from_numpy(data)
return TensorHolder(data, *args, **kwargs)
def AutoHolder(data, *args, **kwargs):
"""Check the data type to infer which data holder to use.
"""
if torch.is_tensor(data):
return TensorHolder(data, *args, **kwargs)
elif isinstance(data, numpy.ndarray):
return NumpyHolder(data, *args, **kwargs)
elif callable(data):
return CallableHolder(data, *args, **kwargs)
else:
raise ValueError('Unable to infer data type!')
|
mit
| -4,970,938,022,273,481,000
| 32.352174
| 120
| 0.584865
| false
| 4.183801
| false
| false
| false
|
Aeva/voxelpress
|
old_stuff/old_python_stuff/arduino/acm_firmwares/acm_kind.py
|
1
|
1717
|
from glob import glob
from ..reprap_kind import ReprapKind
class ReprapACM(ReprapKind):
"""Repraps which are controlled by an ACM device of some kind
(usually an Arduino)."""
def __init__(self, connection, firmware="Unknown", *args, **kargs):
self.__serial = connection
self.__buffer = False
self.info = {}
# Set a plausible printer uuid, which may be overridden by the
# firmware driver.
self.info["uuid"] = self.__serial.make_uuid(firmware)
ReprapKind.__init__(self, *args, **kargs)
def shutdown(self, disconnected=False):
"""Callback used to turn off the backend and release any
resources."""
self.__serial.disconnect(disconnected)
def gcode(self, line):
"""Send a line of gcode to the printer, and returns data if
applicable."""
self.__serial.send(line)
return self.__serial.poll()
def __stream(self, fobject):
"""Extracts gcode commands from a file like object, removes
comments and blank lines, and then streams the commands to the
printer."""
self.hold()
for line in fobject:
if line.startswith(";"):
continue
code = line.split(";")[0].strip()
self.gcode(code)
def run_job(self, target):
"""Run a print job. Target can be a file path or file-like
object."""
fobject = None
if type(target) in [unicode, str]:
found = glob(target)
if found:
# FIXME, should cue up multiple jobs, not just do one...?
fobject = open(found[0])
if fobject:
self.__stream(fobject)
|
gpl-3.0
| 1,986,246,930,187,726,600
| 32.666667
| 73
| 0.576005
| false
| 4.25
| false
| false
| false
|
Tjorriemorrie/trading
|
07_reinforcement/signals/sarsa.py
|
1
|
7351
|
'''
signals by MA and RSI and Ichimoku
'''
import pandas as pd
import numpy as np
from features import FeatureFactory
import pickle
from random import random, choice
from pprint import pprint
import time
currencies = [
# 'AUDUSD',
# 'EURGBP',
# 'EURJPY',
'EURUSD',
# 'GBPJPY',
# 'GBPUSD',
# 'NZDUSD',
# 'USDCAD',
# 'USDCHF',
# 'USDJPY',
]
intervals = [
# '60',
'1440',
]
actions = [
'stay-out',
'enter-long',
'stay-long',
'exit-long',
'enter-short',
'stay-short',
'exit-short',
]
def loadData(currency, interval):
# print 'loading dataframe...'
df = pd.read_csv(
r'../data/' + currency.upper() + interval + '.csv',
names=['date', 'time', 'open', 'high', 'low', 'close', 'volume'],
dtype={'open': 'float', 'high': 'float', 'low': 'float', 'close': 'float', 'volume': 'int'},
# parse_dates=[[0, 1]],
# index_col=0,
)
# print df.tail()
data = df.as_matrix()
opens = data[:, 2].astype(float)
highs = data[:, 3].astype(float)
lows = data[:, 4].astype(float)
closes = data[:, 5].astype(float)
volumes = data[:, 6].astype(int)
# print 'dataframe loaded'
return opens, highs, lows, closes, volumes
def loadThetas(currency, interval, cntFeatures):
# print 'loading thetas'
try:
with open('models/{0}_{1}.thts'.format(currency, interval), 'r') as f:
thetas = pickle.load(f)
except IOError:
thetas = [np.random.rand(cntFeatures) for a in actions]
# pprint(thetas)
# print 'thetas loaded'
return thetas
def saveThetas(currency, interval, thetas):
# print 'saving thetas'
with open('models/{0}_{1}.thts'.format(currency, interval), 'w') as f:
pickle.dump(thetas, f)
# print 'thetas saved'
def getReward(rewards, s, a):
'''
if action is stay-out: obviously no reward: we will then only enter trades if we expect positive returns
if action is exiting: no reward as well: we will not enforce exiting positions, we will only exit when we expect negative returns.
we get rewards only for entering and keeping positions (as long as positive returns are expected)
'''
if a == 0:
r = 0
elif a in [3, 6]:
r = 0
else:
r = rewards[s]
return r
def getActionStateValue(thetas, Fsa, a):
# pprint(Fsa)
# pprint(thetas[a])
Qsa = sum(f * t for f, t in zip(Fsa, thetas[a]))
return float(Qsa)
def getActionsAvailable(a):
# stay-out: stay-out & enter-long & enter-short
if a == 0:
return [0, 1, 4]
elif a == 1:
return [2]
elif a == 2:
return [2, 3]
elif a == 4:
return [5]
elif a == 5:
return [5, 6]
else:
raise Exception('no available actions for {0}'.format(a))
def getAction(thetas, features, a):
# exploration
actionsAvailable = getActionsAvailable(a)
# print 'actions available', actionsAvailable
if random() < epsilon:
a = choice(actionsAvailable)
# exploitation
else:
aMax = None
QsaHighest = -1000
for a in actionsAvailable:
Qsa = getActionStateValue(thetas, features[a], a)
if Qsa > QsaHighest:
QsaHighest = Qsa
aMax = a
a = aMax
return a
ff = FeatureFactory()
alpha = 0.1
epsilon = 0.1
gamma = 0.9
if __name__ == '__main__':
interval = '1440'
# interval = choice(intervals)
for currency in currencies:
print '\n', currency, interval
# load data
opens, highs, lows, closes, volumes = loadData(currency, interval)
print 'data loaded'
dataSize = len(closes)
# extract features
features = ff.getFeatures(opens, highs, lows, closes, volumes)
print 'get features'
cntFeatures = len(features)
# pprint(features)
# get rewards
print 'get rewards'
rewards = ff.getRewardsCycle(closes)
# load thetas
print 'load thetas'
thetas = loadThetas(currency, interval, cntFeatures)
# train
outcomes = []
durations = []
print 'start'
for i in xrange(100):
# initialize state and action
a = actions.index('stay-out')
# print 'a start', a, actions[a]
# print 'len closes', len(closes)
# pprint(range(len(closes)))
s = choice(range(len(closes)))
# print 's start', s
iniS = s
# keep going until we hit an exit (that will be 1 episode/trade)
while a not in [3, 6]:
# set of features at state/index and action/noeffect
Fsa = features[s]
# take action a
# observe r
r = getReward(rewards, s, a)
# print s, 'r of', r, 'for', actions[a], 'from', iniS, 'till', s
# next state
ss = s + 1
if ss >= dataSize:
break
# Qsa (action-state-values)
Qsa = getActionStateValue(thetas, Fsa, a)
# print s, 'Qsa', Qsa
# start delta
delta = r - Qsa
# print s, 'delta start', delta
# get next action
aa = getAction(thetas, features, a)
# print s, 'a', aa, actions[aa]
# get features and Qsa
Fsa = features[aa]
Qsa = getActionStateValue(thetas, Fsa, aa)
# end delta
delta += gamma * Qsa
# print s, 'delta end', delta
# update thetas
thetas[a] = [theta + alpha * delta for theta in thetas[a]]
# pprint(thetas[a])
# normalize thetas
# pprint(thetas[a])
mmin = min(thetas[a])
mmax = max(thetas[a])
rrange = mmax - mmin
# print 'N', 'min', mmin, 'max', mmax, 'range', rrange
thetas[a] = [(mmax - t) / rrange for t in thetas[a]]
# print s, 'normalized', min(thetas[a]), max(thetas[a])
# until s is terminal
if aa in [3, 6]:
outcomes.append(closes[s] - closes[iniS] if aa == 3 else closes[iniS] - closes[s])
durations.append(s - iniS)
print '\n', '#', len(outcomes), actions[a], r
print 'Net outcomes', sum(outcomes)
print 'Avg durations', int(sum(durations) / len(durations))
wins = sum([1. for o in outcomes if o > 0])
print currency, 'Win ratio', int(wins / len(outcomes) * 100)
# time.sleep(0.3)
# if iniS not set, then set it
if a == 0 and aa in [1, 4]:
iniS = s
# s <- s' a <- a'
s = ss
a = aa
# save periodically
if i % 100 == 99:
saveThetas(currency, interval, thetas)
# print 'Net outcomes', sum(outcomes)
# print currency, 'Win ratio', int(wins / len(outcomes) * 100)
saveThetas(currency, interval, thetas)
|
mit
| 1,470,180,100,407,404,800
| 27.492248
| 134
| 0.513264
| false
| 3.628332
| false
| false
| false
|
TougalooCSC/CSC455Spring15Prototypes
|
prototype01/migrations/versions/47f6450771a6_.py
|
1
|
2742
|
"""empty message
Revision ID: 47f6450771a6
Revises: None
Create Date: 2015-04-15 16:44:40.764749
"""
# revision identifiers, used by Alembic.
revision = '47f6450771a6'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('decks')
op.drop_table('users')
op.drop_table('flashcard_responses')
op.drop_table('flashcards')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('flashcards',
sa.Column('created_at', sa.DATETIME(), nullable=True),
sa.Column('updated_at', sa.DATETIME(), nullable=True),
sa.Column('is_active', sa.BOOLEAN(), nullable=True),
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('question_text', sa.VARCHAR(length=256), nullable=True),
sa.Column('question_answer', sa.VARCHAR(length=127), nullable=True),
sa.Column('created_by', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['created_by'], [u'users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('flashcard_responses',
sa.Column('created_at', sa.DATETIME(), nullable=True),
sa.Column('updated_at', sa.DATETIME(), nullable=True),
sa.Column('is_active', sa.BOOLEAN(), nullable=True),
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('response', sa.VARCHAR(length=127), nullable=True),
sa.Column('flashcard_id', sa.INTEGER(), nullable=True),
sa.Column('user_id', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['flashcard_id'], [u'flashcards.id'], ),
sa.ForeignKeyConstraint(['user_id'], [u'users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('created_at', sa.DATETIME(), nullable=True),
sa.Column('updated_at', sa.DATETIME(), nullable=True),
sa.Column('is_active', sa.BOOLEAN(), nullable=True),
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('name', sa.VARCHAR(length=120), nullable=True),
sa.Column('email', sa.VARCHAR(length=120), nullable=True),
sa.Column('password', sa.VARCHAR(length=30), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('decks',
sa.Column('created_at', sa.DATETIME(), nullable=True),
sa.Column('updated_at', sa.DATETIME(), nullable=True),
sa.Column('is_active', sa.BOOLEAN(), nullable=True),
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('title', sa.VARCHAR(length=127), nullable=True),
sa.Column('created_by', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['created_by'], [u'users.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
|
gpl-3.0
| 7,827,077,548,879,713,000
| 37.619718
| 72
| 0.660832
| false
| 3.43179
| false
| false
| false
|
gitprouser/appengine-bottle-skeleton
|
lib/ndb/msgprop_test.py
|
1
|
17472
|
#
# Copyright 2008 The ndb Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for msgprop.py."""
from protorpc import messages
from . import model
from . import msgprop
from . import test_utils
from .google_imports import datastore_errors
from .google_test_imports import unittest
class Color(messages.Enum):
RED = 620
GREEN = 495
BLUE = 450
SAMPLE_PB = r"""key <
app: "ndb-test-app-id"
path <
Element {
type: "Storage"
id: 1
}
>
>
entity_group <
Element {
type: "Storage"
id: 1
}
>
property <
name: "greet.text"
value <
stringValue: "abc"
>
multiple: false
>
raw_property <
meaning: 14
name: "greet.__protobuf__"
value <
stringValue: "\n\003abc\020{"
>
multiple: false
>
"""
class MsgPropTests(test_utils.NDBTest):
the_module = msgprop
def setUp(self):
super(MsgPropTests, self).setUp()
global Greeting
class Greeting(messages.Message):
text = messages.StringField(1, required=True)
when = messages.IntegerField(2)
color = messages.EnumField(Color, 3)
def testBasics(self):
class Storage(model.Model):
greet = msgprop.MessageProperty(Greeting, indexed_fields=['text'],
verbose_name='The Greeting')
self.assertEqual(Storage.greet._verbose_name, 'The Greeting')
greet = Greeting(text='abc', when=123)
store = Storage(greet=greet)
key = store.put()
result = key.get()
self.assertFalse(result is store)
self.assertEqual(result.greet.text, 'abc')
self.assertEqual(result.greet.when, 123)
self.assertEqual(result.greet, Greeting(when=123, text='abc'))
self.assertEqual(result,
Storage(greet=Greeting(when=123, text='abc'), key=key))
self.assertEqual(str(result._to_pb()), SAMPLE_PB)
def testValidator(self):
logs = []
def validator(prop, value):
logs.append((prop, value))
return value
class Storage(model.Model):
greet = msgprop.MessageProperty(Greeting, indexed_fields=['text'],
validator=validator)
greet = Greeting(text='abc', when=123)
store = Storage(greet=greet)
self.assertEqual(logs, [(Storage.greet, greet)])
def testReprMessageProperty(self):
greet1 = msgprop.MessageProperty(Greeting, 'foo')
self.assertEqual(repr(greet1), "MessageProperty(Greeting, 'foo')")
greet2 = msgprop.MessageProperty(Greeting, 'foo', protocol='protojson')
self.assertEqual(repr(greet2),
"MessageProperty(Greeting, 'foo', protocol='protojson')")
greet3 = msgprop.MessageProperty(Greeting, 'foo', indexed_fields=['text'])
self.assertEqual(
repr(greet3),
"MessageProperty(Greeting, 'foo', indexed_fields=('text',))")
greets = msgprop.MessageProperty(Greeting, 'foo', repeated=True)
self.assertEqual(repr(greets),
"MessageProperty(Greeting, 'foo', repeated=True)")
def testReprEnumProperty(self):
color = msgprop.EnumProperty(Color, 'bar')
self.assertEqual(repr(color), "EnumProperty(Color, 'bar')")
colors = msgprop.EnumProperty(Color, 'bar', repeated=True)
self.assertEqual(repr(colors), "EnumProperty(Color, 'bar', repeated=True)")
def testQuery(self):
class Storage(model.Model):
greet = msgprop.MessageProperty(Greeting, indexed_fields=['text'])
greet1 = Greeting(text='abc', when=123)
store1 = Storage(greet=greet1)
store1.put()
greet2 = Greeting(text='def', when=456)
store2 = Storage(greet=greet2)
store2.put()
q = Storage.query(Storage.greet.text == 'abc')
self.assertEqual(q.fetch(), [store1])
self.assertRaises(AttributeError, lambda: Storage.greet.when)
def testErrors(self):
class Storage(model.Model):
greet = msgprop.MessageProperty(Greeting, indexed_fields=['text'])
# Call MessageProperty(x) where x is not a Message class.
self.assertRaises(TypeError, msgprop.MessageProperty, Storage)
self.assertRaises(TypeError, msgprop.MessageProperty, 42)
self.assertRaises(TypeError, msgprop.MessageProperty, None)
# Call MessageProperty(Greeting, indexed_fields=x) where x
# includes invalid field names.
self.assertRaises(ValueError, msgprop.MessageProperty,
Greeting, indexed_fields=['text', 'nope'])
self.assertRaises(TypeError, msgprop.MessageProperty,
Greeting, indexed_fields=['text', 42])
self.assertRaises(TypeError, msgprop.MessageProperty,
Greeting, indexed_fields=['text', None])
self.assertRaises(ValueError, msgprop.MessageProperty,
Greeting, indexed_fields=['text', 'text']) # Duplicate.
# Set a MessageProperty value to a non-Message instance.
self.assertRaises(TypeError, Storage, greet=42)
def testNothingIndexed(self):
class Store(model.Model):
gr = msgprop.MessageProperty(Greeting)
gr = Greeting(text='abc', when=123)
st = Store(gr=gr)
st.put()
self.assertEqual(Store.query().fetch(), [st])
self.assertRaises(AttributeError, lambda: Store.gr.when)
def testForceProtocol(self):
class Store(model.Model):
gr = msgprop.MessageProperty(Greeting, protocol='protobuf')
gr = Greeting(text='abc', when=123)
st = Store(gr=gr)
st.put()
self.assertEqual(Store.query().fetch(), [st])
def testRepeatedMessageProperty(self):
class StoreSeveral(model.Model):
greets = msgprop.MessageProperty(Greeting, repeated=True,
indexed_fields=['text', 'when'])
ga = Greeting(text='abc', when=123)
gb = Greeting(text='abc', when=456)
gc = Greeting(text='def', when=123)
gd = Greeting(text='def', when=456)
s1 = StoreSeveral(greets=[ga, gb])
k1 = s1.put()
s2 = StoreSeveral(greets=[gc, gd])
k2 = s2.put()
res1 = k1.get()
self.assertEqual(res1, s1)
self.assertFalse(res1 is s1)
self.assertEqual(res1.greets, [ga, gb])
res = StoreSeveral.query(StoreSeveral.greets.text == 'abc').fetch()
self.assertEqual(res, [s1])
res = StoreSeveral.query(StoreSeveral.greets.when == 123).fetch()
self.assertEqual(res, [s1, s2])
def testIndexedEnumField(self):
class Storage(model.Model):
greet = msgprop.MessageProperty(Greeting, indexed_fields=['color'])
gred = Greeting(text='red', color=Color.RED)
gblue = Greeting(text='blue', color=Color.BLUE)
s1 = Storage(greet=gred)
s1.put()
s2 = Storage(greet=gblue)
s2.put()
self.assertEqual(Storage.query(Storage.greet.color == Color.RED).fetch(),
[s1])
self.assertEqual(Storage.query(Storage.greet.color < Color.RED).fetch(),
[s2])
def testRepeatedIndexedField(self):
class AltGreeting(messages.Message):
lines = messages.StringField(1, repeated=True)
when = messages.IntegerField(2)
class Store(model.Model):
altg = msgprop.MessageProperty(AltGreeting, indexed_fields=['lines'])
s1 = Store(altg=AltGreeting(lines=['foo', 'bar'], when=123))
s1.put()
s2 = Store(altg=AltGreeting(lines=['baz', 'bletch'], when=456))
s2.put()
res = Store.query(Store.altg.lines == 'foo').fetch()
self.assertEqual(res, [s1])
def testRepeatedIndexedFieldInRepeatedMessageProperty(self):
class AltGreeting(messages.Message):
lines = messages.StringField(1, repeated=True)
when = messages.IntegerField(2)
self.assertRaises(TypeError, msgprop.MessageProperty,
AltGreeting, indexed_fields=['lines'], repeated=True)
def testBytesField(self):
class BytesGreeting(messages.Message):
data = messages.BytesField(1)
when = messages.IntegerField(2)
class Store(model.Model):
greet = msgprop.MessageProperty(BytesGreeting, indexed_fields=['data'])
bg = BytesGreeting(data='\xff', when=123)
st = Store(greet=bg)
st.put()
res = Store.query(Store.greet.data == '\xff').fetch()
self.assertEqual(res, [st])
def testNestedMessageField(self):
class Inner(messages.Message):
count = messages.IntegerField(1)
greet = messages.MessageField(Greeting, 2)
class Outer(messages.Message):
inner = messages.MessageField(Inner, 1)
extra = messages.StringField(2)
class Store(model.Model):
outer = msgprop.MessageProperty(Outer,
indexed_fields=['inner.greet.text'])
greet = Greeting(text='abc', when=123)
inner = Inner(count=42, greet=greet)
outer = Outer(inner=inner)
st = Store(outer=outer)
st.put()
res = Store.query(Store.outer.inner.greet.text == 'abc').fetch()
self.assertEqual(res, [st])
def testNestedMessageFieldIsNone(self):
class Outer(messages.Message):
greeting = messages.MessageField(Greeting, 1)
class Store(model.Model):
outer = msgprop.MessageProperty(Outer, indexed_fields=['greeting.text'])
outer1 = Outer(greeting=None)
store1 = Store(outer=outer1)
store1.put()
res = Store.query(Store.outer.greeting.text == 'abc').fetch()
self.assertEqual(res, [])
def testRepeatedNestedMessageField(self):
class Outer(messages.Message):
greeting = messages.MessageField(Greeting, 1)
extra = messages.IntegerField(2)
class Store(model.Model):
outers = msgprop.MessageProperty(Outer, repeated=True,
indexed_fields=['greeting.text'])
gr1 = Greeting(text='abc', when=123)
gr2 = Greeting(text='def', when=456)
outer1 = Outer(greeting=gr1, extra=1)
outer2 = Outer(greeting=gr2, extra=2)
store1 = Store(outers=[outer1])
store1.put()
store2 = Store(outers=[outer2])
store2.put()
store3 = Store(outers=[outer1, outer2])
store3.put()
res = Store.query(Store.outers.greeting.text == 'abc').fetch()
self.assertEqual(res, [store1, store3])
def testNestedRepeatedMessageField(self):
class Outer(messages.Message):
greetings = messages.MessageField(Greeting, 1, repeated=True)
extra = messages.IntegerField(2)
class Store(model.Model):
outer = msgprop.MessageProperty(Outer, indexed_fields=['greetings.text',
'extra'])
gr1 = Greeting(text='abc', when=123)
gr2 = Greeting(text='def', when=456)
outer1 = Outer(greetings=[gr1], extra=1)
outer2 = Outer(greetings=[gr2], extra=2)
outer3 = Outer(greetings=[gr1, gr2], extra=3)
store1 = Store(outer=outer1)
store1.put()
store2 = Store(outer=outer2)
store2.put()
store3 = Store(outer=outer3)
store3.put()
res = Store.query(Store.outer.greetings.text == 'abc').fetch()
self.assertEqual(res, [store1, store3])
def testNestedFieldErrors(self):
class Outer(messages.Message):
greetings = messages.MessageField(Greeting, 1, repeated=True)
extra = messages.IntegerField(2)
# Parent/child conflicts.
self.assertRaises(ValueError, msgprop.MessageProperty,
Outer, indexed_fields=['greetings.text', 'greetings'])
self.assertRaises(ValueError, msgprop.MessageProperty,
Outer, indexed_fields=['greetings', 'greetings.text'])
# Duplicate inner field.
self.assertRaises(ValueError, msgprop.MessageProperty,
Outer, indexed_fields=['greetings.text',
'greetings.text'])
# Can't index MessageField.
self.assertRaises(ValueError, msgprop.MessageProperty,
Outer, indexed_fields=['greetings'])
# Can't specify subfields for non-MessageField.
self.assertRaises(ValueError, msgprop.MessageProperty,
Outer, indexed_fields=['extra.foobar'])
# Non-existent subfield.
self.assertRaises(ValueError, msgprop.MessageProperty,
Outer, indexed_fields=['greetings.foobar'])
def testDoubleNestedRepeatErrors(self):
class Inner(messages.Message):
greets = messages.MessageField(Greeting, 1, repeated=True)
class Outer(messages.Message):
inner = messages.MessageField(Inner, 1)
inners = messages.MessageField(Inner, 2, repeated=True)
msgprop.MessageProperty(Inner, repeated=True) # Should not fail
msgprop.MessageProperty(Outer, repeated=True) # Should not fail
self.assertRaises(TypeError, msgprop.MessageProperty, Inner,
repeated=True, indexed_fields=['greets.text'])
self.assertRaises(TypeError, msgprop.MessageProperty, Outer,
indexed_fields=['inners.greets.text'])
self.assertRaises(TypeError, msgprop.MessageProperty, Outer,
repeated=True, indexed_fields=['inner.greets.text'])
def testEnumProperty(self):
class Foo(model.Model):
color = msgprop.EnumProperty(Color, default=Color.RED,
choices=[Color.RED, Color.GREEN])
colors = msgprop.EnumProperty(Color, repeated=True)
foo1 = Foo(colors=[Color.RED, Color.GREEN])
foo1.put()
foo2 = Foo(color=Color.GREEN, colors=[Color.RED, Color.BLUE])
foo2.put()
res = Foo.query(Foo.color == Color.RED).fetch()
self.assertEqual(res, [foo1])
res = Foo.query(Foo.colors == Color.RED).fetch()
self.assertEqual(res, [foo1, foo2])
class FooBar(model.Model):
color = msgprop.EnumProperty(Color, indexed=False,
verbose_name='The Color String',
validator=lambda prop, val: Color.BLUE)
self.assertEqual(FooBar.color._verbose_name, 'The Color String')
foobar1 = FooBar(color=Color.RED)
self.assertEqual(foobar1.color, Color.BLUE) # Tests the validator
foobar1.put()
self.assertRaises(datastore_errors.BadFilterError,
lambda: FooBar.color == Color.RED)
# Test some errors.
self.assertRaises(datastore_errors.BadValueError,
Foo, color=Color.BLUE) # Not in choices
self.assertRaises(TypeError, Foo, color='RED') # Not an enum
self.assertRaises(TypeError, Foo, color=620) # Not an enum
# Invalid default
self.assertRaises(TypeError, msgprop.EnumProperty, Color, default=42)
# Invalid choice
self.assertRaises(TypeError, msgprop.EnumProperty, Color, choices=[42])
foo2.colors.append(42)
self.ExpectWarnings()
self.assertRaises(TypeError, foo2.put) # Late-stage validation
class Bar(model.Model):
color = msgprop.EnumProperty(Color, required=True)
bar1 = Bar()
self.assertRaises(datastore_errors.BadValueError, bar1.put) # Missing value
def testPropertyNameConflict(self):
class MyMsg(messages.Message):
blob_ = messages.StringField(1)
msgprop.MessageProperty(MyMsg) # Should be okay
self.assertRaises(ValueError, msgprop.MessageProperty,
MyMsg, indexed_fields=['blob_'])
def testProtocolChange(self):
class Storage(model.Model):
greeting = msgprop.MessageProperty(Greeting, protocol='protobuf')
greet1 = Greeting(text='abc', when=123)
store1 = Storage(greeting=greet1)
key1 = store1.put()
class Storage(model.Model):
greeting = msgprop.MessageProperty(Greeting, protocol='protojson')
store2 = key1.get()
self.assertEqual(store2.greeting, greet1)
def testProjectionQueries(self):
class Wrapper(messages.Message):
greet = messages.MessageField(Greeting, 1)
class Storage(model.Model):
wrap = msgprop.MessageProperty(Wrapper, indexed_fields=['greet.text',
'greet.when'])
gr1 = Greeting(text='abc', when=123)
wr1 = Wrapper(greet=gr1)
st1 = Storage(wrap=wr1)
st1.put()
res1 = Storage.query().get(projection=['wrap.greet.text',
Storage.wrap.greet.when])
self.assertNotEqual(res1, st1)
self.assertEqual(res1.wrap, st1.wrap)
res2 = Storage.query().get(projection=['wrap.greet.text'])
self.assertEqual(res2.wrap, Wrapper(greet=Greeting(text='abc')))
def testProjectionQueriesRepeatedField(self):
class Wrapper(messages.Message):
greets = messages.MessageField(Greeting, 1, repeated=True)
class Storage(model.Model):
wrap = msgprop.MessageProperty(Wrapper, indexed_fields=['greets.text',
'greets.when'])
gr1 = Greeting(text='abc', when=123)
wr1 = Wrapper(greets=[gr1])
st1 = Storage(wrap=wr1)
st1.put()
res1 = Storage.query().get(projection=['wrap.greets.text',
Storage.wrap.greets.when])
self.assertNotEqual(res1, st1)
self.assertEqual(res1.wrap, st1.wrap)
res2 = Storage.query().get(projection=['wrap.greets.text'])
self.assertEqual(res2.wrap, Wrapper(greets=[Greeting(text='abc')]))
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| 6,636,100,694,403,759,000
| 36.655172
| 80
| 0.651843
| false
| 3.654466
| true
| false
| false
|
akrause2014/dispel4py
|
dispel4py/new/mpi_process.py
|
1
|
4853
|
# Copyright (c) The University of Edinburgh 2014
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mpi4py import MPI
comm=MPI.COMM_WORLD
rank=comm.Get_rank()
size=comm.Get_size()
from processor import GenericWrapper, simpleLogger, STATUS_TERMINATED, STATUS_ACTIVE
import processor
import types
import traceback
def process(workflow, inputs, args):
processes={}
inputmappings = {}
outputmappings = {}
success=True
nodes = [ node.getContainedObject() for node in workflow.graph.nodes() ]
if rank == 0 and not args.simple:
try:
processes, inputmappings, outputmappings = processor.assign_and_connect(workflow, size)
except:
success=False
success=comm.bcast(success,root=0)
if args.simple or not success:
ubergraph = processor.create_partitioned(workflow)
nodes = [ node.getContainedObject() for node in ubergraph.graph.nodes() ]
if rank == 0:
print 'Partitions: %s' % ', '.join(('[%s]' % ', '.join((pe.id for pe in part)) for part in workflow.partitions))
for node in ubergraph.graph.nodes():
wrapperPE = node.getContainedObject()
print('%s contains %s' % (wrapperPE.id, [n.getContainedObject().id for n in wrapperPE.workflow.graph.nodes()]))
try:
processes, inputmappings, outputmappings = processor.assign_and_connect(ubergraph, size)
inputs = processor.map_inputs_to_partitions(ubergraph, inputs)
success = True
except:
# print traceback.format_exc()
print 'dispel4py.mpi_process: Not enough processes for execution of graph'
success = False
success=comm.bcast(success,root=0)
if not success:
return
try:
inputs = { pe.id : v for pe, v in inputs.iteritems() }
except AttributeError:
pass
processes=comm.bcast(processes,root=0)
inputmappings=comm.bcast(inputmappings,root=0)
outputmappings=comm.bcast(outputmappings,root=0)
inputs=comm.bcast(inputs,root=0)
if rank == 0:
print 'Processes: %s' % processes
# print 'Inputs: %s' % inputs
for pe in nodes:
if rank in processes[pe.id]:
provided_inputs = processor.get_inputs(pe, inputs)
wrapper = MPIWrapper(pe, provided_inputs)
wrapper.targets = outputmappings[rank]
wrapper.sources = inputmappings[rank]
wrapper.process()
class MPIWrapper(GenericWrapper):
def __init__(self, pe, provided_inputs=None):
GenericWrapper.__init__(self, pe)
self.pe.log = types.MethodType(simpleLogger, pe)
self.pe.rank = rank
self.provided_inputs = provided_inputs
self.terminated = 0
def _read(self):
result = super(MPIWrapper, self)._read()
if result is not None:
return result
status = MPI.Status()
msg=comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
tag = status.Get_tag()
while tag == STATUS_TERMINATED:
self.terminated += 1
if self.terminated >= self._num_sources:
break
else:
msg=comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
tag = status.Get_tag()
return msg, tag
def _write(self, name, data):
try:
targets = self.targets[name]
except KeyError:
# no targets
# self.pe.log('Produced output: %s' % {name: data})
return
for (inputName, communication) in targets:
output = { inputName : data }
dest = communication.getDestination(output)
for i in dest:
# self.pe.log('Sending %s to %s' % (output, i))
request=comm.isend(output, tag=STATUS_ACTIVE, dest=i)
status = MPI.Status()
request.Wait(status)
def _terminate(self):
for output, targets in self.targets.iteritems():
for (inputName, communication) in targets:
for i in communication.destinations:
# self.pe.log('Terminating consumer %s' % i)
request=comm.isend(None, tag=STATUS_TERMINATED, dest=i)
|
apache-2.0
| 4,039,095,439,215,027,000
| 36.620155
| 127
| 0.608902
| false
| 4.071309
| false
| false
| false
|
tim-shea/learnability
|
network_test.py
|
1
|
1160
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from scipy.stats import binned_statistic as bin_stat
from lif import *
from syn import *
prefs.codegen.target = 'numpy'
defaultclock.dt = 1*ms
params = LifParams(constant_input=3)
params.update(SynParams())
neurons = LifNeurons(1000, params)
excitatory_synapses = ExcitatorySynapses(neurons, params)
excitatory_synapses.connect('i != j and i < 800', p=0.1)
excitatory_synapses.w = 1.0
inhibitory_synapses = InhibitorySynapses(neurons, params)
inhibitory_synapses.connect('i != j and i >= 800', p=0.1)
inhibitory_synapses.w = -1.0
rate_monitor = PopulationRateMonitor(neurons)
spike_monitor = SpikeMonitor(neurons)
network = Network()
network.add(neurons, excitatory_synapses, inhibitory_synapses, rate_monitor, spike_monitor)
network.run(10*second, report='stdout', report_period=1.0*second, namespace={})
figure()
subplot(211)
suptitle('Network Activity')
binned_rate = bin_stat(rate_monitor.t/second, rate_monitor.rate, bins=100)
plot(binned_rate[1][:-1], binned_rate[0])
ylabel('Firing Rate (Hz)')
subplot(212)
plot(spike_monitor.t/second, spike_monitor.i, '.k')
ylabel('Neuron #')
xlabel('Time (s)')
show()
|
cc0-1.0
| -6,118,570,399,146,441,000
| 32.142857
| 91
| 0.741379
| false
| 2.768496
| false
| true
| false
|
sumpfgottheit/arps
|
arps_old/restserver/views/taskresult.py
|
1
|
3216
|
# -*- coding: utf-8 -*-
__author__ = 'saf'
import logging
from flask import render_template, url_for, request
from flask.views import View
from arps_old.models import CeleryResult
from arps_old.restserver import app, redis_conn
log = logging.getLogger(__name__)
class TaskResultView(View):
methods = ['GET', ]
endpoint = 'endpoint_taskresult_detail'
endpoint_list = 'endpoint_taskresult_list'
endpoint_ajax_results = 'endpoint_taskresults_ajax'
template = 'taskresult/taskresult_detail.html'
template_list = 'taskresult/taskresult_list.html'
def dispatch_request(self, *args, **kwargs):
_id = kwargs.get('id', None)
if request.endpoint == self.endpoint_ajax_results:
return self.ajax_results()
if request.endpoint == self.endpoint_list:
return self.list(_id)
elif _id is not None:
return self.show_object(_id)
self.return_404(_id)
def ajax_results(self):
T = TaskUpdateRepoMetadataMetaStore
results_for_repo = T.query.join(T.result).filter(T.release_id == release_id, T.repository_id == repository_id).order_by(CeleryResult.start.desc()).all()
results_for_repo = [r.result for r in results_for_repo]
results = []
for result in results_for_repo:
results.append(result.json)
results[-1]['detail_url'] = url_for(TaskResultView.endpoint, id=result.id)
return jsonify({'data': results})
def list(self, task):
results = CeleryResult.query.filter_by(task=task).order_by(CeleryResult.submitted.desc()).limit(20).all()
return render_template(self.template_list, results=results, task=task)
def show_object(self, _id):
result = CeleryResult.query.get(_id)
if redis_conn.llen(_id) > app.config['MAX_LINES_FOR_STDOUT_ERR']:
a = redis_conn.lrange(_id, 0, app.config['MAX_LINES_FOR_STDOUT_ERR'] // 2)
b = redis_conn.lrange(_id, redis_conn.llen(_id) - app.config['MAX_LINES_FOR_STDOUT_ERR'] // 2, -1)
n = redis_conn.llen(_id) - app.config['MAX_LINES_FOR_STDOUT_ERR']
a = [(int(line[0]), line[1:]) for line in [line.decode('utf-8') for line in a]]
b = [(int(line[0]), line[1:]) for line in [line.decode('utf-8') for line in b]]
c = [(3, '========================================================' + "=" * len(str(n))),
(3, '============ TOO MUCH DATA - SKIPPED {} LINES ============'.format(n)),
(3, '========================================================' + "=" * len(str(n)))]
lines = a + c + b
else:
lines = redis_conn.lrange(_id, 0, -1)
lines = [(int(line[0]), line[1:]) for line in [line.decode('utf-8') for line in lines]]
return render_template(self.template, result=result, lines=reversed(lines))
taskresult_view = TaskResultView.as_view(TaskResultView.endpoint)
app.add_url_rule('/tasks/detail/<id>', view_func=taskresult_view)
app.add_url_rule('/tasks', view_func=taskresult_view, endpoint=TaskResultView.endpoint_list)
app.add_url_rule('/tasks/ajax', view_func=taskresult_view, endpoint=TaskResultView.endpoint_ajax_results)
|
mit
| 5,123,974,780,708,314,000
| 43.054795
| 160
| 0.598881
| false
| 3.443255
| false
| false
| false
|
twneale/rexlex
|
rexlex/log_config.py
|
1
|
7091
|
'''
Establish custom log levels for rexlexer's verbose output.
'''
import logging
from rexlex.config import LOG_MSG_MAXWIDTH
# ---------------------------------------------------------------------------
# Establish custom log levels.
# ---------------------------------------------------------------------------
# Used to report tokens getting yielded.
REXLEX_TRACE_RESULT = 9
# Used to report starting, stopping, etc.
REXLEX_TRACE_META = 8
# Used to report changes to lexer state.
REXLEX_TRACE_STATE = 7
# Used to report on specific rules.
REXLEX_TRACE_RULE = 6
# Used to dump as much info as possible.
REXLEX_TRACE = 5
REXLEX_LOG_LEVELS = (
(REXLEX_TRACE_RESULT, 'REXLEX_TRACE_RESULT', 'rexlex_trace_result'),
(REXLEX_TRACE_META, 'REXLEX_TRACE_META', 'rexlex_trace_meta'),
(REXLEX_TRACE_STATE, 'REXLEX_TRACE_STATE', 'rexlex_trace_state'),
(REXLEX_TRACE_RULE, 'REXLEX_TRACE_RULE', 'rexlex_trace_rule'),
(REXLEX_TRACE, 'REXLEX_TRACE', 'rexlex_trace'),
)
for loglevel, loglevel_name, method_name in REXLEX_LOG_LEVELS:
logging.addLevelName(loglevel, loglevel_name)
def rexlex_trace_result(self, message, *args, **kws):
if self.isEnabledFor(REXLEX_TRACE_RESULT):
self._log(REXLEX_TRACE_RESULT, message, args, **kws)
setattr(logging.Logger, 'rexlex_trace_result', rexlex_trace_result)
def rexlex_trace_meta(self, message, *args, **kws):
if self.isEnabledFor(REXLEX_TRACE_META):
self._log(REXLEX_TRACE_META, message, args, **kws)
setattr(logging.Logger, 'rexlex_trace_meta', rexlex_trace_meta)
def rexlex_trace_state(self, message, *args, **kws):
if self.isEnabledFor(REXLEX_TRACE_STATE):
self._log(REXLEX_TRACE_STATE, message, args, **kws)
setattr(logging.Logger, 'rexlex_trace_state', rexlex_trace_state)
def rexlex_trace_rule(self, message, *args, **kws):
if self.isEnabledFor(REXLEX_TRACE_RULE):
self._log(REXLEX_TRACE_RULE, message, args, **kws)
setattr(logging.Logger, 'rexlex_trace_rule', rexlex_trace_rule)
def rexlex_trace(self, message, *args, **kws):
if self.isEnabledFor(REXLEX_TRACE):
self._log(REXLEX_TRACE, message, args, **kws)
setattr(logging.Logger, 'rexlex_trace', rexlex_trace)
# ---------------------------------------------------------------------------
# Colorize them.
# ---------------------------------------------------------------------------
#
# Copyright (C) 2010-2012 Vinay Sajip. All rights reserved.
# Licensed under the new BSD license.
#
import ctypes
import logging
import os
class ColorizingStreamHandler(logging.StreamHandler):
# color names to indices
color_map = {
'black': 0,
'red': 1,
'green': 2,
'yellow': 3,
'blue': 4,
'magenta': 5,
'cyan': 6,
'white': 7,
}
#levels to (background, foreground, bold/intense)
if os.name == 'nt':
level_map = {
REXLEX_TRACE: (None, 'blue', True),
REXLEX_TRACE_RULE: (None, 'white', False),
REXLEX_TRACE_STATE: (None, 'yellow', True),
REXLEX_TRACE_META: (None, 'red', True),
REXLEX_TRACE_RESULT: ('red', 'white', True),
}
else:
level_map = {
REXLEX_TRACE: (None, 'blue', False),
REXLEX_TRACE_RULE: (None, 'white', False),
REXLEX_TRACE_STATE: (None, 'yellow', False),
REXLEX_TRACE_META: (None, 'red', False),
REXLEX_TRACE_RESULT: ('red', 'white', True),
}
csi = '\x1b['
reset = '\x1b[0m'
@property
def is_tty(self):
# bluff for Jenkins
if os.environ.get('JENKINS_URL'):
return True
isatty = getattr(self.stream, 'isatty', None)
return isatty and isatty()
def emit(self, record):
try:
message = self.format(record)
stream = self.stream
if not self.is_tty:
stream.write(message)
else:
self.output_colorized(message)
stream.write(getattr(self, 'terminator', '\n'))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
if os.name != 'nt':
def output_colorized(self, message): # NOQA
self.stream.write(message)
else:
import re
ansi_esc = re.compile(r'\x1b\[((?:\d+)(?:;(?:\d+))*)m')
nt_color_map = {
0: 0x00, # black
1: 0x04, # red
2: 0x02, # green
3: 0x06, # yellow
4: 0x01, # blue
5: 0x05, # magenta
6: 0x03, # cyan
7: 0x07, # white
}
def output_colorized(self, message): # NOQA
parts = self.ansi_esc.split(message)
write = self.stream.write
h = None
fd = getattr(self.stream, 'fileno', None)
if fd is not None:
fd = fd()
if fd in (1, 2): # stdout or stderr
h = ctypes.windll.kernel32.GetStdHandle(-10 - fd)
while parts:
text = parts.pop(0)
if text:
write(text)
if parts:
params = parts.pop(0)
if h is not None:
params = [int(p) for p in params.split(';')]
color = 0
for p in params:
if 40 <= p <= 47:
color |= self.nt_color_map[p - 40] << 4
elif 30 <= p <= 37:
color |= self.nt_color_map[p - 30]
elif p == 1:
color |= 0x08 # foreground intensity on
elif p == 0: # reset to default color
color = 0x07
else:
pass # error condition ignored
ctypes.windll.kernel32.SetConsoleTextAttribute(h,
color)
def colorize(self, message, record):
if record.levelno in self.level_map:
bg, fg, bold = self.level_map[record.levelno]
params = []
if bg in self.color_map:
params.append(str(self.color_map[bg] + 40))
if fg in self.color_map:
params.append(str(self.color_map[fg] + 30))
if bold:
params.append('1')
if params:
message = ''.join((self.csi, ';'.join(params),
'm', message, self.reset))
return message
def format(self, record):
message = logging.StreamHandler.format(self, record)
if self.is_tty:
# Don't colorize any traceback
parts = message.split('\n', 1)
parts[0] = self.colorize(parts[0], record)
message = '\n'.join(parts)
return message
|
bsd-3-clause
| -7,196,659,918,402,147,000
| 33.42233
| 77
| 0.502468
| false
| 3.736038
| false
| false
| false
|
supermitch/mech-ai
|
server/game.py
|
1
|
2542
|
import datetime
import json
import logging
import maps
import queue
import state
import utils
import world
class GAME_STATUS(object):
""" Game status constants. """
lobby = 'lobby' # In matchmaking lobby, waiting for all players
playing = 'playing' # In game mode, waiting for turns
complete = 'complete' # Game finished
cancelled = 'cancelled' # Broken?
class PLAYER_STATUS(object):
waiting = 'waiting' # Hasn't joined the lobby yet
joined = 'joined' # Has joined the lobby
playing = 'playing' # Sending moves and waiting for game state
lost = 'lost' # Missed turns/broken?
class Game(object):
def __init__(self, id=None, players=None, name='Mech AI', map_name='default', rounds=17):
"""
Initialize a new game.
Note that when we load a game from the repo, we init an empty
game, so all our arguments to the constructor are optional.
"""
self.id = id
self.name = name if name else 'Mech AI'
self.map_name = map_name if map_name else 'default'
self.players = players # List of player usernames
self.winner = None
self.status = GAME_STATUS.lobby
self.created = datetime.datetime.now()
# These attributes are persisted in the state, not DB properties
map = maps.get_map(self.map_name)
self.state = state.State(map=map, rounds=rounds, players=players)
self.queue = queue.Queue(players=players)
self.transactions = []
self.transactions.append({
'move': None,
'message': (True, 'Initial state'),
'state': self.state.jsonable,
})
@property
def not_joined(self):
""" Return list of unjoined players. """
return ', '.join(self.queue.not_joined)
def set_user_status(self, username, status):
""" Update Queue with new status. """
self.queue.set_status(username, status)
def update(self, username, move):
""" Execute a round. """
the_world = world.World(self) # Convert our self (a game object) into a World
success, reason = the_world.update(move)
if success:
self.queue.increment_move()
self.state.increment_turn()
if self.state.game_complete:
self.status = GAME_STATUS.complete
self.transactions.append({
'move': move,
'message': (success, reason),
'state': self.state.jsonable,
})
return success, reason
|
mit
| -3,785,946,842,478,620,700
| 30.382716
| 93
| 0.606609
| false
| 3.934985
| false
| false
| false
|
rndusr/stig
|
stig/utils/__init__.py
|
1
|
1595
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details
# http://www.gnu.org/licenses/gpl-3.0.txt
from types import SimpleNamespace
from ._converter import DataSizeConverter
convert = SimpleNamespace(bandwidth=DataSizeConverter(),
size=DataSizeConverter())
def cached_property(fget=None, *, after_creation=None):
"""
Property that replaces itself with the requested value when accessed
`after_creation` is called with the instance of the property when the
property is accessed for the first time.
"""
# https://stackoverflow.com/a/6849299
class _cached_property():
def __init__(self, fget):
self._fget = fget
self._property_name = fget.__name__
self._after_creation = after_creation
self._cache = {}
def __get__(self, obj, cls):
value = self._fget(obj)
setattr(obj, self._property_name, value)
if self._after_creation is not None:
self._after_creation(obj)
return value
if fget is None:
return _cached_property
else:
return _cached_property(fget)
|
gpl-3.0
| 4,341,910,219,779,885,600
| 34.444444
| 73
| 0.662696
| false
| 4.346049
| false
| false
| false
|
mozilla/stoneridge
|
srcleaner.py
|
1
|
1738
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
import logging
import os
import shutil
import sys
import time
import stoneridge
class StoneRidgeCleaner(object):
def __init__(self):
self.workdir = stoneridge.get_config('stoneridge', 'work')
self.keep = stoneridge.get_config_int('cleaner', 'keep')
def run(self):
logging.debug('cleaner running')
with stoneridge.cwd(self.workdir):
while True:
listing = os.listdir('.')
logging.debug('candidate files: %s' % (listing,))
directories = [l for l in listing
if os.path.isdir(l) and not l.startswith('.')]
logging.debug('directories: %s' % (directories,))
times = [(d, os.stat(d).st_mtime) for d in directories]
times.sort(key=lambda x: x[1])
delete_us = times[:-self.keep]
logging.debug('directories to delete: %s' % (delete_us,))
for d in delete_us:
logging.debug('removing %s' % (d,))
shutil.rmtree(d)
# Check again in a minute
time.sleep(60)
def daemon(args):
cleaner = StoneRidgeCleaner()
cleaner.run()
os.unlink(args.pidfile)
sys.exit(0)
@stoneridge.main
def main():
"""A simple cleanup program for stone ridge that blows away the working
directory
"""
parser = stoneridge.DaemonArgumentParser()
args = parser.parse_args()
parser.start_daemon(daemon, args=args)
|
mpl-2.0
| -5,283,611,898,606,058,000
| 27.966667
| 78
| 0.581703
| false
| 3.811404
| false
| false
| false
|
codingneo/CLRPrediction
|
src/model/ftrl_proximal.py
|
1
|
3602
|
"""Follow The Regularized Leader Proximal Online Learning
Author:
"""
from math import exp, sqrt
class model(object):
''' Our main algorithm: Follow the regularized leader - proximal
In short,
this is an adaptive-learning-rate sparse logistic-regression with
efficient L1-L2-regularization
Reference:
http://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf
'''
def __init__(self, alpha, beta, L1, L2, D, interaction):
# parameters
self.alpha = alpha
self.beta = beta
self.L1 = L1
self.L2 = L2
# feature related parameters
self.D = D
self.interaction = interaction
# model
# n: squared sum of past gradients
# z: weights
# w: lazy weights
self.n = [0.] * D
self.z = [0.] * D
self.w = {}
def _indices(self, x):
''' A helper generator that yields the indices in x
The purpose of this generator is to make the following
code a bit cleaner when doing feature interaction.
'''
# first yield index of the bias term
yield 0
# then yield the normal indices
for index in x:
yield index
# now yield interactions (if applicable)
if self.interaction:
D = self.D
L = len(x)
x = sorted(x)
for i in xrange(L):
for j in xrange(i+1, L):
# one-hot encode interactions with hash trick
yield abs(hash(str(x[i]) + '_' + str(x[j]))) % D
def predict(self, x):
''' Get probability estimation on x
INPUT:
x: features
OUTPUT:
probability of p(y = 1 | x; w)
'''
# parameters
alpha = self.alpha
beta = self.beta
L1 = self.L1
L2 = self.L2
# model
n = self.n
z = self.z
w = {}
# wTx is the inner product of w and x
wTx = 0.
for i in self._indices(x):
sign = -1. if z[i] < 0 else 1. # get sign of z[i]
# build w on the fly using z and n, hence the name - lazy weights
# we are doing this at prediction instead of update time is because
# this allows us for not storing the complete w
if sign * z[i] <= L1:
# w[i] vanishes due to L1 regularization
w[i] = 0.
else:
# apply prediction time L1, L2 regularization to z and get w
w[i] = (sign * L1 - z[i]) / ((beta + sqrt(n[i])) / alpha + L2)
wTx += w[i]
# cache the current w for update stage
self.w = w
# bounded sigmoid function, this is the probability estimation
return 1. / (1. + exp(-max(min(wTx, 10.), -10.)))
def update(self, x, p, y):
''' Update model using x, p, y
INPUT:
x: feature, a list of indices
p: click probability prediction of our model
y: answer
MODIFIES:
self.n: increase by squared gradient
self.z: weights
'''
# parameter
alpha = self.alpha
# model
n = self.n
z = self.z
w = self.w
# gradient under logloss
g = p - y
# update z and n
for i in self._indices(x):
sigma = (sqrt(n[i] + g * g) - sqrt(n[i])) / alpha
z[i] += g - sigma * w[i]
n[i] += g * g
|
apache-2.0
| 8,417,160,105,458,115,000
| 25.688889
| 79
| 0.493615
| false
| 4.020089
| false
| false
| false
|
nerdvegas/rez
|
src/rezgui/widgets/VariantSummaryWidget.py
|
1
|
4520
|
from Qt import QtCompat, QtCore, QtWidgets
from rezgui.util import create_pane, get_timestamp_str
from rez.packages import Package, Variant
from rez.util import find_last_sublist
class VariantSummaryWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
super(VariantSummaryWidget, self).__init__(parent)
self.variant = None
self.label = QtWidgets.QLabel()
self.table = QtWidgets.QTableWidget(0, 1)
self.table.setGridStyle(QtCore.Qt.DotLine)
self.table.setFocusPolicy(QtCore.Qt.NoFocus)
self.table.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.table.setAlternatingRowColors(True)
hh = self.table.horizontalHeader()
hh.setStretchLastSection(True)
hh.setVisible(False)
vh = self.table.verticalHeader()
QtCompat.QHeaderView.setSectionResizeMode(
vh, QtWidgets.QHeaderView.ResizeToContents)
create_pane([self.label, self.table], False, compact=True,
parent_widget=self)
self.clear()
def clear(self):
self.label.setText("no package selected")
self.table.clear()
self.table.setRowCount(0)
vh = self.table.verticalHeader()
vh.setVisible(False)
self.setEnabled(False)
def set_variant(self, variant):
if variant == self.variant:
return
if variant is None:
self.clear()
else:
self.setEnabled(True)
if isinstance(variant, Package):
label_name = variant.qualified_name
location = variant.uri
else:
label_name = variant.qualified_package_name
location = variant.parent.uri
label = "%s@%s" % (label_name, variant.wrapped.location)
self.label.setText(label)
self.table.clear()
rows = []
if variant.description:
desc = variant.description
max_chars = 1000
if len(desc) > max_chars:
desc = desc[:max_chars] + "..."
rows.append(("description: ", desc))
if variant.uri:
rows.append(("location: ", location))
if variant.timestamp:
release_time_str = get_timestamp_str(variant.timestamp)
rows.append(("released: ", release_time_str))
if variant.authors:
txt = "; ".join(variant.authors)
rows.append(("authors: ", txt))
if variant.requires:
var_strs = [str(x) for x in variant.requires]
if isinstance(variant, Variant):
# put variant-specific requires in square brackets
if variant.requires:
index = find_last_sublist(variant.requires, variant.requires)
if index is not None:
var_strs[index] = "[%s" % var_strs[index]
index2 = index + len(variant.requires) - 1
var_strs[index2] = "%s]" % var_strs[index2]
txt = "; ".join(var_strs)
rows.append(("requires: ", txt))
self.table.setRowCount(len(rows))
for i, row in enumerate(rows):
label, value = row
item = QtWidgets.QTableWidgetItem(label)
item.setTextAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)
self.table.setVerticalHeaderItem(i, item)
item = QtWidgets.QTableWidgetItem(value)
self.table.setItem(i, 0, item)
vh = self.table.verticalHeader()
vh.setVisible(True)
self.table.resizeRowsToContents()
self.variant = variant
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
|
lgpl-3.0
| -3,228,950,963,447,475,000
| 37.632479
| 85
| 0.589159
| false
| 4.329502
| false
| false
| false
|
innes213/TradingTools
|
examples/dashboard.py
|
1
|
2782
|
from pyhoofinance.defs import *
from pyhoofinance.quotedata import get_quote
from tradingtools.market_metrics.historic_change_and_stdv import s_and_p_historic
from tradingtools.market_metrics.market_cap_index_performance import market_cap_index_performance
from tradingtools.market_metrics.sector_performance import sector_performance
from tradingtools.technicals.indicators.SMA import SMA
if __name__ == '__main__':
day_ranges = [1, 2, 5, 10, 20, 100, 200, 500]
print '\n================= S&P Dashboard =================\n'
print '\nMarket Cap index performance:\n'
data = market_cap_index_performance(dayranges=day_ranges)
if data is not None:
outstr = 'Index\t'
for i in day_ranges:
outstr = outstr + str(i) + '-day\t'
print outstr
for idx, perf_list in data:
outstr = '%s: \t' % idx
for perf in perf_list:
outstr = outstr + '%5.2f%%\t' % (100 * perf)
print outstr
print '\nSummary of price changes\n'
data = s_and_p_historic(1)
for daydata in data:
outstr = '%12s: ' % str(daydata['tradedate']) + \
'Advancers: %5i \t' % daydata['gainers'] + \
'Decliners: %5i \t' % daydata['decliners'] + \
'Average change: %2.2f%% \t' % daydata['avgpercentchange'] + \
'Std Dev: %2.2f%% \t' % daydata['percentchangestdev'] + \
'Total Volume: %i \t' % int(daydata['volume'])
print outstr
print '\nS & P Sector Performance\n'
data = sector_performance(day_ranges)
if data is not None:
outstr = 'Sector'
for i in day_ranges:
outstr = outstr + '\t%i-day' % i
print outstr
for symbol, perf_data in data:
outstr = '%s:' % symbol
for perf in perf_data:
outstr = outstr + '\t%3.2f%%' % (100 * perf)
print outstr
# Sector Rotation triggers
print '\nS & P Sector Rotation\n'
spyquote = get_quote('SPY')
spylast = spyquote[LAST_TRADE_PRICE_ONLY_STR]
d0 = spyquote[LAST_TRADE_DATE_STR]
#[TODO: replace number of days with 1 month and 1 year
# get S&P 500 1 year performance and moving average
spymadays = 240 # values greater than 36 diverge from yahoo and etrade sma calculations
spysma = SMA(num_periods=1, window_size=spymadays).calculate_for_symbol('SPY')[0]
spymadelta = 100 * (spylast - spysma) / spysma
num_days = 22
data = sector_performance(num_days)
print d0.strftime('As of %d %b, %Y')
print 'SPY difference from %i moving average: %3.2f%% ' % (spymadays, spymadelta)
print '%i-Day Performance' % num_days
for symbol, perf in data:
print '%s: %3.2f%%' % (symbol, 100 * perf)
|
bsd-2-clause
| 7,186,727,864,974,381,000
| 39.318841
| 97
| 0.593817
| false
| 3.269095
| false
| false
| false
|
googleapis/googleapis-gen
|
google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/services/types/shared_criterion_service.py
|
1
|
5893
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v6.enums.types import response_content_type as gage_response_content_type
from google.ads.googleads.v6.resources.types import shared_criterion as gagr_shared_criterion
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v6.services',
marshal='google.ads.googleads.v6',
manifest={
'GetSharedCriterionRequest',
'MutateSharedCriteriaRequest',
'SharedCriterionOperation',
'MutateSharedCriteriaResponse',
'MutateSharedCriterionResult',
},
)
class GetSharedCriterionRequest(proto.Message):
r"""Request message for
[SharedCriterionService.GetSharedCriterion][google.ads.googleads.v6.services.SharedCriterionService.GetSharedCriterion].
Attributes:
resource_name (str):
Required. The resource name of the shared
criterion to fetch.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
class MutateSharedCriteriaRequest(proto.Message):
r"""Request message for
[SharedCriterionService.MutateSharedCriteria][google.ads.googleads.v6.services.SharedCriterionService.MutateSharedCriteria].
Attributes:
customer_id (str):
Required. The ID of the customer whose shared
criteria are being modified.
operations (Sequence[google.ads.googleads.v6.services.types.SharedCriterionOperation]):
Required. The list of operations to perform
on individual shared criteria.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
response_content_type (google.ads.googleads.v6.enums.types.ResponseContentTypeEnum.ResponseContentType):
The response content type setting. Determines
whether the mutable resource or just the
resource name should be returned post mutation.
"""
customer_id = proto.Field(
proto.STRING,
number=1,
)
operations = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='SharedCriterionOperation',
)
partial_failure = proto.Field(
proto.BOOL,
number=3,
)
validate_only = proto.Field(
proto.BOOL,
number=4,
)
response_content_type = proto.Field(
proto.ENUM,
number=5,
enum=gage_response_content_type.ResponseContentTypeEnum.ResponseContentType,
)
class SharedCriterionOperation(proto.Message):
r"""A single operation (create, remove) on an shared criterion.
Attributes:
create (google.ads.googleads.v6.resources.types.SharedCriterion):
Create operation: No resource name is
expected for the new shared criterion.
remove (str):
Remove operation: A resource name for the removed shared
criterion is expected, in this format:
``customers/{customer_id}/sharedCriteria/{shared_set_id}~{criterion_id}``
"""
create = proto.Field(
proto.MESSAGE,
number=1,
oneof='operation',
message=gagr_shared_criterion.SharedCriterion,
)
remove = proto.Field(
proto.STRING,
number=3,
oneof='operation',
)
class MutateSharedCriteriaResponse(proto.Message):
r"""Response message for a shared criterion mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results (Sequence[google.ads.googleads.v6.services.types.MutateSharedCriterionResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(
proto.MESSAGE,
number=3,
message=status_pb2.Status,
)
results = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='MutateSharedCriterionResult',
)
class MutateSharedCriterionResult(proto.Message):
r"""The result for the shared criterion mutate.
Attributes:
resource_name (str):
Returned for successful operations.
shared_criterion (google.ads.googleads.v6.resources.types.SharedCriterion):
The mutated shared criterion with only mutable fields after
mutate. The field will only be returned when
response_content_type is set to "MUTABLE_RESOURCE".
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
shared_criterion = proto.Field(
proto.MESSAGE,
number=2,
message=gagr_shared_criterion.SharedCriterion,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
| -4,445,974,067,140,955,000
| 32.674286
| 128
| 0.665535
| false
| 4.261027
| false
| false
| false
|
Winand/pandas
|
pandas/core/internals.py
|
1
|
186942
|
import copy
from warnings import catch_warnings
import itertools
import re
import operator
from datetime import datetime, timedelta, date
from collections import defaultdict
from functools import partial
import numpy as np
from pandas.core.base import PandasObject
from pandas.core.dtypes.dtypes import (
ExtensionDtype, DatetimeTZDtype,
CategoricalDtype)
from pandas.core.dtypes.common import (
_TD_DTYPE, _NS_DTYPE,
_ensure_int64, _ensure_platform_int,
is_integer,
is_dtype_equal,
is_timedelta64_dtype,
is_datetime64_dtype, is_datetimetz, is_sparse,
is_categorical, is_categorical_dtype,
is_integer_dtype,
is_datetime64tz_dtype,
is_bool_dtype,
is_object_dtype,
is_datetimelike_v_numeric,
is_float_dtype, is_numeric_dtype,
is_numeric_v_string_like, is_extension_type,
is_list_like,
is_re,
is_re_compilable,
is_scalar,
_get_dtype)
from pandas.core.dtypes.cast import (
maybe_downcast_to_dtype,
maybe_upcast,
maybe_promote,
infer_dtype_from,
infer_dtype_from_scalar,
soft_convert_objects,
maybe_convert_objects,
astype_nansafe,
find_common_type)
from pandas.core.dtypes.missing import (
isna, notna, array_equivalent,
_isna_compat,
is_null_datelike_scalar)
import pandas.core.dtypes.concat as _concat
from pandas.core.dtypes.generic import ABCSeries, ABCDatetimeIndex
from pandas.core.common import is_null_slice
import pandas.core.algorithms as algos
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import maybe_convert_indices, length_of_indexer
from pandas.core.categorical import Categorical, _maybe_to_categorical
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.io.formats.printing import pprint_thing
import pandas.core.missing as missing
from pandas.core.sparse.array import _maybe_to_sparse, SparseArray
from pandas._libs import lib, tslib
from pandas._libs.tslib import Timedelta
from pandas._libs.lib import BlockPlacement
import pandas.core.computation.expressions as expressions
from pandas.util._decorators import cache_readonly
from pandas.util._validators import validate_bool_kwarg
from pandas import compat
from pandas.compat import range, map, zip, u
class Block(PandasObject):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['_mgr_locs', 'values', 'ndim']
is_numeric = False
is_float = False
is_integer = False
is_complex = False
is_datetime = False
is_datetimetz = False
is_timedelta = False
is_bool = False
is_object = False
is_categorical = False
is_sparse = False
_box_to_block_values = True
_can_hold_na = False
_downcast_dtype = None
_can_consolidate = True
_verify_integrity = True
_validate_ndim = True
_ftype = 'dense'
_holder = None
def __init__(self, values, placement, ndim=None, fastpath=False):
if ndim is None:
ndim = values.ndim
elif values.ndim != ndim:
raise ValueError('Wrong number of dimensions')
self.ndim = ndim
self.mgr_locs = placement
self.values = values
if ndim and len(self.mgr_locs) != len(self.values):
raise ValueError('Wrong number of items passed %d, placement '
'implies %d' % (len(self.values),
len(self.mgr_locs)))
@property
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@property
def _is_single_block(self):
return self.ndim == 1
@property
def is_view(self):
""" return a boolean if I am possibly a view """
return self.values.base is not None
@property
def is_datelike(self):
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
def is_categorical_astype(self, dtype):
"""
validate that we have a astypeable to categorical,
returns a boolean if we are a categorical
"""
if dtype is Categorical or dtype is CategoricalDtype:
# this is a pd.Categorical, but is not
# a valid type for astypeing
raise TypeError("invalid type {0} for astype".format(dtype))
elif is_categorical_dtype(dtype):
return True
return False
def external_values(self, dtype=None):
""" return an outside world format, currently just the ndarray """
return self.values
def internal_values(self, dtype=None):
""" return an internal format, currently just the ndarray
this should be the pure internal API format
"""
return self.values
def formatting_values(self):
"""Return the internal values used by the DataFrame/SeriesFormatter"""
return self.internal_values()
def get_values(self, dtype=None):
"""
return an internal format, currently just the ndarray
this is often overriden to handle to_dense like operations
"""
if is_object_dtype(dtype):
return self.values.astype(object)
return self.values
def to_dense(self):
return self.values.view()
@property
def _na_value(self):
return np.nan
@property
def fill_value(self):
return np.nan
@property
def mgr_locs(self):
return self._mgr_locs
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an
array
"""
return self.dtype
def make_block(self, values, placement=None, ndim=None, **kwargs):
"""
Create a new block, with type inference propagate any values that are
not specified
"""
if placement is None:
placement = self.mgr_locs
if ndim is None:
ndim = self.ndim
return make_block(values, placement=placement, ndim=ndim, **kwargs)
def make_block_scalar(self, values, **kwargs):
"""
Create a ScalarBlock
"""
return ScalarBlock(values)
def make_block_same_class(self, values, placement=None, fastpath=True,
**kwargs):
""" Wrap given values in a block of same type as self. """
if placement is None:
placement = self.mgr_locs
return make_block(values, placement=placement, klass=self.__class__,
fastpath=fastpath, **kwargs)
@mgr_locs.setter
def mgr_locs(self, new_mgr_locs):
if not isinstance(new_mgr_locs, BlockPlacement):
new_mgr_locs = BlockPlacement(new_mgr_locs)
self._mgr_locs = new_mgr_locs
def __unicode__(self):
# don't want to print out all of the items here
name = pprint_thing(self.__class__.__name__)
if self._is_single_block:
result = '%s: %s dtype: %s' % (name, len(self), self.dtype)
else:
shape = ' x '.join([pprint_thing(s) for s in self.shape])
result = '%s: %s, %s, dtype: %s' % (name, pprint_thing(
self.mgr_locs.indexer), shape, self.dtype)
return result
def __len__(self):
return len(self.values)
def __getstate__(self):
return self.mgr_locs.indexer, self.values
def __setstate__(self, state):
self.mgr_locs = BlockPlacement(state[0])
self.values = state[1]
self.ndim = self.values.ndim
def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
def reshape_nd(self, labels, shape, ref_items, mgr=None):
"""
Parameters
----------
labels : list of new axis labels
shape : new shape
ref_items : new ref_items
return a new block that is transformed to a nd block
"""
return _block2d_to_blocknd(values=self.get_values().T,
placement=self.mgr_locs, shape=shape,
labels=labels, ref_items=ref_items)
def getitem_block(self, slicer, new_mgr_locs=None):
"""
Perform __getitem__-like, return result as block.
As of now, only supports slices that preserve dimensionality.
"""
if new_mgr_locs is None:
if isinstance(slicer, tuple):
axis0_slicer = slicer[0]
else:
axis0_slicer = slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
new_values = self._slice(slicer)
if self._validate_ndim and new_values.ndim != self.ndim:
raise ValueError("Only same dim slicing is allowed")
return self.make_block_same_class(new_values, new_mgr_locs)
@property
def shape(self):
return self.values.shape
@property
def itemsize(self):
return self.values.itemsize
@property
def dtype(self):
return self.values.dtype
@property
def ftype(self):
return "%s:%s" % (self.dtype, self._ftype)
def merge(self, other):
return _merge_blocks([self, other])
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
if fill_value is None:
fill_value = self.fill_value
new_values = algos.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
return self.make_block(new_values, fastpath=True)
def iget(self, i):
return self.values[i]
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
self.values[locs] = values
def delete(self, loc):
"""
Delete given loc(-s) from block in-place.
"""
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
def apply(self, func, mgr=None, **kwargs):
""" apply the function to my values; return a block if we are not
one
"""
with np.errstate(all='ignore'):
result = func(self.values, **kwargs)
if not isinstance(result, Block):
result = self.make_block(values=_block_shape(result,
ndim=self.ndim))
return result
def fillna(self, value, limit=None, inplace=False, downcast=None,
mgr=None):
""" fillna on the block with the value. If we fail, then convert to
ObjectBlock and try again
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not self._can_hold_na:
if inplace:
return self
else:
return self.copy()
mask = isna(self.values)
if limit is not None:
if not is_integer(limit):
raise ValueError('Limit must be an integer')
if limit < 1:
raise ValueError('Limit must be greater than 0')
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillna' "
"is currently limited to 2")
mask[mask.cumsum(self.ndim - 1) > limit] = False
# fillna, but if we cannot coerce, then try again as an ObjectBlock
try:
values, _, _, _ = self._try_coerce_args(self.values, value)
blocks = self.putmask(mask, value, inplace=inplace)
blocks = [b.make_block(values=self._try_coerce_result(b.values))
for b in blocks]
return self._maybe_downcast(blocks, downcast)
except (TypeError, ValueError):
# we can't process the value, but nothing to do
if not mask.any():
return self if inplace else self.copy()
# operate column-by-column
def f(m, v, i):
block = self.coerce_to_target_dtype(value)
# slice out our block
if i is not None:
block = block.getitem_block(slice(i, i + 1))
return block.fillna(value,
limit=limit,
inplace=inplace,
downcast=None)
return self.split_and_operate(mask, f, inplace)
def split_and_operate(self, mask, f, inplace):
"""
split the block per-column, and apply the callable f
per-column, return a new block for each. Handle
masking which will not change a block unless needed.
Parameters
----------
mask : 2-d boolean mask
f : callable accepting (1d-mask, 1d values, indexer)
inplace : boolean
Returns
-------
list of blocks
"""
if mask is None:
mask = np.ones(self.shape, dtype=bool)
new_values = self.values
def make_a_block(nv, ref_loc):
if isinstance(nv, Block):
block = nv
elif isinstance(nv, list):
block = nv[0]
else:
# Put back the dimension that was taken from it and make
# a block out of the result.
try:
nv = _block_shape(nv, ndim=self.ndim)
except (AttributeError, NotImplementedError):
pass
block = self.make_block(values=nv,
placement=ref_loc, fastpath=True)
return block
# ndim == 1
if self.ndim == 1:
if mask.any():
nv = f(mask, new_values, None)
else:
nv = new_values if inplace else new_values.copy()
block = make_a_block(nv, self.mgr_locs)
return [block]
# ndim > 1
new_blocks = []
for i, ref_loc in enumerate(self.mgr_locs):
m = mask[i]
v = new_values[i]
# need a new block
if m.any():
nv = f(m, v, i)
else:
nv = v if inplace else v.copy()
block = make_a_block(nv, [ref_loc])
new_blocks.append(block)
return new_blocks
def _maybe_downcast(self, blocks, downcast=None):
# no need to downcast our float
# unless indicated
if downcast is None and self.is_float:
return blocks
elif downcast is None and (self.is_timedelta or self.is_datetime):
return blocks
if not isinstance(blocks, list):
blocks = [blocks]
return _extend_blocks([b.downcast(downcast) for b in blocks])
def downcast(self, dtypes=None, mgr=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return self
values = self.values
# single block handling
if self._is_single_block:
# try to cast all non-floats here
if dtypes is None:
dtypes = 'infer'
nv = maybe_downcast_to_dtype(values, dtypes)
return self.make_block(nv, fastpath=True)
# ndim > 1
if dtypes is None:
return self
if not (dtypes == 'infer' or isinstance(dtypes, dict)):
raise ValueError("downcast must have a dictionary or 'infer' as "
"its argument")
# operate column-by-column
# this is expensive as it splits the blocks items-by-item
def f(m, v, i):
if dtypes == 'infer':
dtype = 'infer'
else:
raise AssertionError("dtypes as dict is not supported yet")
if dtype is not None:
v = maybe_downcast_to_dtype(v, dtype)
return v
return self.split_and_operate(None, f, False)
def astype(self, dtype, copy=False, errors='raise', values=None, **kwargs):
return self._astype(dtype, copy=copy, errors=errors, values=values,
**kwargs)
def _astype(self, dtype, copy=False, errors='raise', values=None,
klass=None, mgr=None, raise_on_error=False, **kwargs):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
errors_legal_values = ('raise', 'ignore')
if errors not in errors_legal_values:
invalid_arg = ("Expected value of kwarg 'errors' to be one of {}. "
"Supplied value is '{}'".format(
list(errors_legal_values), errors))
raise ValueError(invalid_arg)
# may need to convert to categorical
# this is only called for non-categoricals
if self.is_categorical_astype(dtype):
if (('categories' in kwargs or 'ordered' in kwargs) and
isinstance(dtype, CategoricalDtype)):
raise TypeError("Cannot specify a CategoricalDtype and also "
"`categories` or `ordered`. Use "
"`dtype=CategoricalDtype(categories, ordered)`"
" instead.")
kwargs = kwargs.copy()
categories = getattr(dtype, 'categories', None)
ordered = getattr(dtype, 'ordered', False)
kwargs.setdefault('categories', categories)
kwargs.setdefault('ordered', ordered)
return self.make_block(Categorical(self.values, **kwargs))
# astype processing
dtype = np.dtype(dtype)
if self.dtype == dtype:
if copy:
return self.copy()
return self
if klass is None:
if dtype == np.object_:
klass = ObjectBlock
try:
# force the copy here
if values is None:
if issubclass(dtype.type,
(compat.text_type, compat.string_types)):
# use native type formatting for datetime/tz/timedelta
if self.is_datelike:
values = self.to_native_types()
# astype formatting
else:
values = self.values
else:
values = self.get_values(dtype=dtype)
# _astype_nansafe works fine with 1-d only
values = astype_nansafe(values.ravel(), dtype, copy=True)
values = values.reshape(self.shape)
newb = make_block(values, placement=self.mgr_locs, dtype=dtype,
klass=klass)
except:
if errors == 'raise':
raise
newb = self.copy() if copy else self
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError("cannot set astype for copy = [%s] for dtype "
"(%s [%s]) with smaller itemsize that current "
"(%s [%s])" % (copy, self.dtype.name,
self.itemsize, newb.dtype.name,
newb.itemsize))
return newb
def convert(self, copy=True, **kwargs):
""" attempt to coerce any object types to better types return a copy
of the block (if copy = True) by definition we are not an ObjectBlock
here!
"""
return self.copy() if copy else self
def _can_hold_element(self, element):
""" require the same dtype as ourselves """
dtype = self.values.dtype.type
if is_list_like(element):
element = np.asarray(element)
tipo = element.dtype.type
return issubclass(tipo, dtype)
return isinstance(element, dtype)
def _try_cast_result(self, result, dtype=None):
""" try to cast the result to our original type, we may have
roundtripped thru object in the mean-time
"""
if dtype is None:
dtype = self.dtype
if self.is_integer or self.is_bool or self.is_datetime:
pass
elif self.is_float and result.dtype == self.dtype:
# protect against a bool/object showing up here
if isinstance(dtype, compat.string_types) and dtype == 'infer':
return result
if not isinstance(dtype, type):
dtype = dtype.type
if issubclass(dtype, (np.bool_, np.object_)):
if issubclass(dtype, np.bool_):
if isna(result).all():
return result.astype(np.bool_)
else:
result = result.astype(np.object_)
result[result == 1] = True
result[result == 0] = False
return result
else:
return result.astype(np.object_)
return result
# may need to change the dtype here
return maybe_downcast_to_dtype(result, dtype)
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
if np.any(notna(other)) and not self._can_hold_element(other):
# coercion issues
# let higher levels handle
raise TypeError("cannot convert {} to an {}".format(
type(other).__name__,
type(self).__name__.lower().replace('Block', '')))
return values, False, other, False
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
return result
def _try_coerce_and_cast_result(self, result, dtype=None):
result = self._try_coerce_result(result)
result = self._try_cast_result(result, dtype=dtype)
return result
def to_native_types(self, slicer=None, na_rep='nan', quoting=None,
**kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isna(values)
if not self.is_object and not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
return values
# block actions ####
def copy(self, deep=True, mgr=None):
""" copy constructor """
values = self.values
if deep:
values = values.copy()
return self.make_block_same_class(values)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False, convert=True, mgr=None):
""" replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
original_to_replace = to_replace
# try to replace, if we raise an error, convert to ObjectBlock and
# retry
try:
values, _, to_replace, _ = self._try_coerce_args(self.values,
to_replace)
mask = missing.mask_missing(values, to_replace)
if filter is not None:
filtered_out = ~self.mgr_locs.isin(filter)
mask[filtered_out.nonzero()[0]] = False
blocks = self.putmask(mask, value, inplace=inplace)
if convert:
blocks = [b.convert(by_item=True, numeric=False,
copy=not inplace) for b in blocks]
return blocks
except (TypeError, ValueError):
# try again with a compatible block
block = self.astype(object)
return block.replace(
to_replace=original_to_replace, value=value, inplace=inplace,
filter=filter, regex=regex, convert=convert)
def _replace_single(self, *args, **kwargs):
""" no-op on a non-ObjectBlock """
return self if kwargs['inplace'] else self.copy()
def setitem(self, indexer, value, mgr=None):
""" set the value inplace; return a new block (of a possibly different
dtype)
indexer is a direct slice/positional indexer; value must be a
compatible shape
"""
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce if block dtype can store value
values = self.values
try:
values, _, value, _ = self._try_coerce_args(values, value)
# can keep its own dtype
if hasattr(value, 'dtype') and is_dtype_equal(values.dtype,
value.dtype):
dtype = self.dtype
else:
dtype = 'infer'
except (TypeError, ValueError):
# current dtype cannot store value, coerce to common dtype
find_dtype = False
if hasattr(value, 'dtype'):
dtype = value.dtype
find_dtype = True
elif is_scalar(value):
if isna(value):
# NaN promotion is handled in latter path
dtype = False
else:
dtype, _ = infer_dtype_from_scalar(value,
pandas_dtype=True)
find_dtype = True
else:
dtype = 'infer'
if find_dtype:
dtype = find_common_type([values.dtype, dtype])
if not is_dtype_equal(self.dtype, dtype):
b = self.astype(dtype)
return b.setitem(indexer, value, mgr=mgr)
# value must be storeable at this moment
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = maybe_promote(arr_value.dtype)
values = values.astype(dtype)
transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)
values = transf(values)
l = len(values)
# length checking
# boolean with truth values == len of the value is ok too
if isinstance(indexer, (np.ndarray, list)):
if is_list_like(value) and len(indexer) != len(value):
if not (isinstance(indexer, np.ndarray) and
indexer.dtype == np.bool_ and
len(indexer[indexer]) == len(value)):
raise ValueError("cannot set using a list-like indexer "
"with a different length than the value")
# slice
elif isinstance(indexer, slice):
if is_list_like(value) and l:
if len(value) != length_of_indexer(indexer, values):
raise ValueError("cannot set using a slice indexer with a "
"different length than the value")
def _is_scalar_indexer(indexer):
# return True if we are all scalar indexers
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0
for idx in indexer)
return False
def _is_empty_indexer(indexer):
# return a boolean if we have an empty indexer
if is_list_like(indexer) and not len(indexer):
return True
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0
for idx in indexer)
return False
# empty indexers
# 8669 (empty)
if _is_empty_indexer(indexer):
pass
# setting a single element for each dim and with a rhs that could
# be say a list
# GH 6043
elif _is_scalar_indexer(indexer):
values[indexer] = value
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif (len(arr_value.shape) and
arr_value.shape[0] == values.shape[0] and
np.prod(arr_value.shape) == np.prod(values.shape)):
values[indexer] = value
try:
values = values.astype(arr_value.dtype)
except ValueError:
pass
# set
else:
values[indexer] = value
# coerce and try to infer the dtypes of the result
values = self._try_coerce_and_cast_result(values, dtype)
block = self.make_block(transf(values), fastpath=True)
return block
def putmask(self, mask, new, align=True, inplace=False, axis=0,
transpose=False, mgr=None):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
axis : int
transpose : boolean
Set to True if self is stored with axes reversed
Returns
-------
a list of new blocks, the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
if hasattr(new, 'reindex_axis'):
new = new.values
if hasattr(mask, 'reindex_axis'):
mask = mask.values
# if we are passed a scalar None, convert it here
if not is_list_like(new) and isna(new) and not self.is_object:
new = self.fill_value
if self._can_hold_element(new):
_, _, new, _ = self._try_coerce_args(new_values, new)
if transpose:
new_values = new_values.T
# If the default repeat behavior in np.putmask would go in the
# wrong direction, then explictly repeat and reshape new instead
if getattr(new, 'ndim', 0) >= 1:
if self.ndim - 1 == new.ndim and axis == 1:
new = np.repeat(
new, new_values.shape[-1]).reshape(self.shape)
new = new.astype(new_values.dtype)
# we require exact matches between the len of the
# values we are setting (or is compat). np.putmask
# doesn't check this and will simply truncate / pad
# the output, but we want sane error messages
#
# TODO: this prob needs some better checking
# for 2D cases
if ((is_list_like(new) and
np.any(mask[mask]) and
getattr(new, 'ndim', 1) == 1)):
if not (mask.shape[-1] == len(new) or
mask[mask].shape[-1] == len(new) or
len(new) == 1):
raise ValueError("cannot assign mismatch "
"length to masked array")
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.any():
if transpose:
mask = mask.T
if isinstance(new, np.ndarray):
new = new.T
axis = new_values.ndim - axis - 1
# Pseudo-broadcast
if getattr(new, 'ndim', 0) >= 1:
if self.ndim - 1 == new.ndim:
new_shape = list(new.shape)
new_shape.insert(axis, 1)
new = new.reshape(tuple(new_shape))
# operate column-by-column
def f(m, v, i):
if i is None:
# ndim==1 case.
n = new
else:
if isinstance(new, np.ndarray):
n = np.squeeze(new[i % new.shape[0]])
else:
n = np.array(new)
# type of the new block
dtype, _ = maybe_promote(n.dtype)
# we need to explicitly astype here to make a copy
n = n.astype(dtype)
nv = _putmask_smart(v, m, n)
return nv
new_blocks = self.split_and_operate(mask, f, inplace)
return new_blocks
if inplace:
return [self]
if transpose:
new_values = new_values.T
return [self.make_block(new_values, fastpath=True)]
def coerce_to_target_dtype(self, other):
"""
coerce the current block to a dtype compat for other
we will return a block, possibly object, and not raise
we can also safely try to coerce to the same dtype
and will receive the same block
"""
# if we cannot then coerce to object
dtype, _ = infer_dtype_from(other, pandas_dtype=True)
if is_dtype_equal(self.dtype, dtype):
return self
if self.is_bool or is_object_dtype(dtype) or is_bool_dtype(dtype):
# we don't upcast to bool
return self.astype(object)
elif ((self.is_float or self.is_complex) and
(is_integer_dtype(dtype) or is_float_dtype(dtype))):
# don't coerce float/complex to int
return self
elif (self.is_datetime or
is_datetime64_dtype(dtype) or
is_datetime64tz_dtype(dtype)):
# not a datetime
if not ((is_datetime64_dtype(dtype) or
is_datetime64tz_dtype(dtype)) and self.is_datetime):
return self.astype(object)
# don't upcast timezone with different timezone or no timezone
mytz = getattr(self.dtype, 'tz', None)
othertz = getattr(dtype, 'tz', None)
if str(mytz) != str(othertz):
return self.astype(object)
raise AssertionError("possible recursion in "
"coerce_to_target_dtype: {} {}".format(
self, other))
elif (self.is_timedelta or is_timedelta64_dtype(dtype)):
# not a timedelta
if not (is_timedelta64_dtype(dtype) and self.is_timedelta):
return self.astype(object)
raise AssertionError("possible recursion in "
"coerce_to_target_dtype: {} {}".format(
self, other))
try:
return self.astype(dtype)
except (ValueError, TypeError):
pass
return self.astype(object)
def interpolate(self, method='pad', axis=0, index=None, values=None,
inplace=False, limit=None, limit_direction='forward',
fill_value=None, coerce=False, downcast=None, mgr=None,
**kwargs):
inplace = validate_bool_kwarg(inplace, 'inplace')
def check_int_bool(self, inplace):
# Only FloatBlocks will contain NaNs.
# timedelta subclasses IntBlock
if (self.is_bool or self.is_integer) and not self.is_timedelta:
if inplace:
return self
else:
return self.copy()
# a fill na type method
try:
m = missing.clean_fill_method(method)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate_with_fill(method=m, axis=axis,
inplace=inplace, limit=limit,
fill_value=fill_value,
coerce=coerce,
downcast=downcast, mgr=mgr)
# try an interp method
try:
m = missing.clean_interp_method(method, **kwargs)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate(method=m, index=index, values=values,
axis=axis, limit=limit,
limit_direction=limit_direction,
fill_value=fill_value, inplace=inplace,
downcast=downcast, mgr=mgr, **kwargs)
raise ValueError("invalid method '{0}' to interpolate.".format(method))
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, coerce=False,
downcast=None, mgr=None):
""" fillna but using the interpolate machinery """
inplace = validate_bool_kwarg(inplace, 'inplace')
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
values = self.values if inplace else self.values.copy()
values, _, fill_value, _ = self._try_coerce_args(values, fill_value)
values = missing.interpolate_2d(values, method=method, axis=axis,
limit=limit, fill_value=fill_value,
dtype=self.dtype)
values = self._try_coerce_result(values)
blocks = [self.make_block(values, klass=self.__class__, fastpath=True)]
return self._maybe_downcast(blocks, downcast)
def _interpolate(self, method=None, index=None, values=None,
fill_value=None, axis=0, limit=None,
limit_direction='forward', inplace=False, downcast=None,
mgr=None, **kwargs):
""" interpolate using scipy wrappers """
inplace = validate_bool_kwarg(inplace, 'inplace')
data = self.values if inplace else self.values.copy()
# only deal with floats
if not self.is_float:
if not self.is_integer:
return self
data = data.astype(np.float64)
if fill_value is None:
fill_value = self.fill_value
if method in ('krogh', 'piecewise_polynomial', 'pchip'):
if not index.is_monotonic:
raise ValueError("{0} interpolation requires that the "
"index be monotonic.".format(method))
# process 1-d slices in the axis direction
def func(x):
# process a 1-d slice, returning it
# should the axis argument be handled below in apply_along_axis?
# i.e. not an arg to missing.interpolate_1d
return missing.interpolate_1d(index, x, method=method, limit=limit,
limit_direction=limit_direction,
fill_value=fill_value,
bounds_error=False, **kwargs)
# interp each column independently
interp_values = np.apply_along_axis(func, axis, data)
blocks = [self.make_block(interp_values, klass=self.__class__,
fastpath=True)]
return self._maybe_downcast(blocks, downcast)
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
# algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock
# so need to preserve types
# sparse is treated like an ndarray, but needs .get_values() shaping
values = self.values
if self.is_sparse:
values = self.get_values()
if fill_tuple is None:
fill_value = self.fill_value
new_values = algos.take_nd(values, indexer, axis=axis,
allow_fill=False)
else:
fill_value = fill_tuple[0]
new_values = algos.take_nd(values, indexer, axis=axis,
allow_fill=True, fill_value=fill_value)
if new_mgr_locs is None:
if axis == 0:
slc = lib.indexer_as_slice(indexer)
if slc is not None:
new_mgr_locs = self.mgr_locs[slc]
else:
new_mgr_locs = self.mgr_locs[indexer]
else:
new_mgr_locs = self.mgr_locs
if not is_dtype_equal(new_values.dtype, self.dtype):
return self.make_block(new_values, new_mgr_locs)
else:
return self.make_block_same_class(new_values, new_mgr_locs)
def diff(self, n, axis=1, mgr=None):
""" return block for the diff of the values """
new_values = algos.diff(self.values, n, axis=axis)
return [self.make_block(values=new_values, fastpath=True)]
def shift(self, periods, axis=0, mgr=None):
""" shift the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = maybe_upcast(self.values)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if np.prod(new_values.shape):
new_values = np.roll(new_values, _ensure_platform_int(periods),
axis=axis)
axis_indexer = [slice(None)] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None, periods)
else:
axis_indexer[axis] = slice(periods, None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [self.make_block(new_values, fastpath=True)]
def eval(self, func, other, raise_on_error=True, try_cast=False, mgr=None):
"""
evaluate the block; return result block from the result
Parameters
----------
func : how to combine self, other
other : a ndarray/object
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
try_cast : try casting the results to the input type
Returns
-------
a new block, the result of the func
"""
orig_other = other
values = self.values
if hasattr(other, 'reindex_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim:
is_transposed = True
else:
if values.shape == other.shape[::-1]:
is_transposed = True
elif values.shape[0] == other.shape[-1]:
is_transposed = True
else:
# this is a broadcast error heree
raise ValueError("cannot broadcast shape [%s] with block "
"values [%s]" % (values.T.shape,
other.shape))
transf = (lambda x: x.T) if is_transposed else (lambda x: x)
# coerce/transpose the args if needed
try:
values, values_mask, other, other_mask = self._try_coerce_args(
transf(values), other)
except TypeError:
block = self.coerce_to_target_dtype(orig_other)
return block.eval(func, orig_other,
raise_on_error=raise_on_error,
try_cast=try_cast, mgr=mgr)
# get the result, may need to transpose the other
def get_result(other):
# avoid numpy warning of comparisons again None
if other is None:
result = not func.__name__ == 'eq'
# avoid numpy warning of elementwise comparisons to object
elif is_numeric_v_string_like(values, other):
result = False
# avoid numpy warning of elementwise comparisons
elif func.__name__ == 'eq':
if is_list_like(other) and not isinstance(other, np.ndarray):
other = np.asarray(other)
# if we can broadcast, then ok
if values.shape[-1] != other.shape[-1]:
return False
result = func(values, other)
else:
result = func(values, other)
# mask if needed
if isinstance(values_mask, np.ndarray) and values_mask.any():
result = result.astype('float64', copy=False)
result[values_mask] = np.nan
if other_mask is True:
result = result.astype('float64', copy=False)
result[:] = np.nan
elif isinstance(other_mask, np.ndarray) and other_mask.any():
result = result.astype('float64', copy=False)
result[other_mask.ravel()] = np.nan
return result
# error handler if we have an issue operating with the function
def handle_error():
if raise_on_error:
# The 'detail' variable is defined in outer scope.
raise TypeError('Could not operate %s with block values %s' %
(repr(other), str(detail))) # noqa
else:
# return the values
result = np.empty(values.shape, dtype='O')
result.fill(np.nan)
return result
# get the result
try:
with np.errstate(all='ignore'):
result = get_result(other)
# if we have an invalid shape/broadcast error
# GH4576, so raise instead of allowing to pass through
except ValueError as detail:
raise
except Exception as detail:
result = handle_error()
# technically a broadcast error in numpy can 'work' by returning a
# boolean False
if not isinstance(result, np.ndarray):
if not isinstance(result, np.ndarray):
# differentiate between an invalid ndarray-ndarray comparison
# and an invalid type comparison
if isinstance(values, np.ndarray) and is_list_like(other):
raise ValueError('Invalid broadcasting comparison [%s] '
'with block values' % repr(other))
raise TypeError('Could not compare [%s] with block values' %
repr(other))
# transpose if needed
result = transf(result)
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
result = _block_shape(result, ndim=self.ndim)
return [self.make_block(result, fastpath=True, )]
def where(self, other, cond, align=True, raise_on_error=True,
try_cast=False, axis=0, transpose=False, mgr=None):
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
align : boolean, perform alignment on other/cond
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
axis : int
transpose : boolean
Set to True if self is stored with axes reversed
Returns
-------
a new block(s), the result of the func
"""
values = self.values
orig_other = other
if transpose:
values = values.T
if hasattr(other, 'reindex_axis'):
other = other.values
if hasattr(cond, 'reindex_axis'):
cond = cond.values
# If the default broadcasting would go in the wrong direction, then
# explictly reshape other instead
if getattr(other, 'ndim', 0) >= 1:
if values.ndim - 1 == other.ndim and axis == 1:
other = other.reshape(tuple(other.shape + (1, )))
if not hasattr(cond, 'shape'):
raise ValueError("where must have a condition that is ndarray "
"like")
# our where function
def func(cond, values, other):
if cond.ravel().all():
return values
values, values_mask, other, other_mask = self._try_coerce_args(
values, other)
try:
return self._try_coerce_result(expressions.where(
cond, values, other, raise_on_error=True))
except Exception as detail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values '
'[%s]' % (repr(other), str(detail)))
else:
# return the values
result = np.empty(values.shape, dtype='float64')
result.fill(np.nan)
return result
# see if we can operate on the entire block, or need item-by-item
# or if we are a single block (ndim == 1)
try:
result = func(cond, values, other)
except TypeError:
# we cannot coerce, return a compat dtype
# we are explicity ignoring raise_on_error here
block = self.coerce_to_target_dtype(other)
blocks = block.where(orig_other, cond, align=align,
raise_on_error=raise_on_error,
try_cast=try_cast, axis=axis,
transpose=transpose)
return self._maybe_downcast(blocks, 'infer')
if self._can_hold_na or self.ndim == 1:
if transpose:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return self.make_block(result)
# might need to separate out blocks
axis = cond.ndim - 1
cond = cond.swapaxes(axis, 0)
mask = np.array([cond[i].all() for i in range(cond.shape[0])],
dtype=bool)
result_blocks = []
for m in [mask, ~mask]:
if m.any():
r = self._try_cast_result(result.take(m.nonzero()[0],
axis=axis))
result_blocks.append(
self.make_block(r.T, placement=self.mgr_locs[m]))
return result_blocks
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape:
return False
return array_equivalent(self.values, other.values)
def _unstack(self, unstacker_func, new_columns):
"""Return a list of unstacked blocks of self
Parameters
----------
unstacker_func : callable
Partially applied unstacker.
new_columns : Index
All columns of the unstacked BlockManager.
Returns
-------
blocks : list of Block
New blocks of unstacked values.
mask : array_like of bool
The mask of columns of `blocks` we should keep.
"""
unstacker = unstacker_func(self.values.T)
new_items = unstacker.get_new_columns()
new_placement = new_columns.get_indexer(new_items)
new_values, mask = unstacker.get_new_values()
mask = mask.any(0)
new_values = new_values.T[mask]
new_placement = new_placement[mask]
blocks = [make_block(new_values, placement=new_placement)]
return blocks, mask
def quantile(self, qs, interpolation='linear', axis=0, mgr=None):
"""
compute the quantiles of the
Parameters
----------
qs: a scalar or list of the quantiles to be computed
interpolation: type of interpolation, default 'linear'
axis: axis to compute, default 0
Returns
-------
tuple of (axis, block)
"""
kw = {'interpolation': interpolation}
values = self.get_values()
values, _, _, _ = self._try_coerce_args(values, values)
def _nanpercentile1D(values, mask, q, **kw):
values = values[~mask]
if len(values) == 0:
if is_scalar(q):
return self._na_value
else:
return np.array([self._na_value] * len(q),
dtype=values.dtype)
return np.percentile(values, q, **kw)
def _nanpercentile(values, q, axis, **kw):
mask = isna(self.values)
if not is_scalar(mask) and mask.any():
if self.ndim == 1:
return _nanpercentile1D(values, mask, q, **kw)
else:
# for nonconsolidatable blocks mask is 1D, but values 2D
if mask.ndim < values.ndim:
mask = mask.reshape(values.shape)
if axis == 0:
values = values.T
mask = mask.T
result = [_nanpercentile1D(val, m, q, **kw) for (val, m)
in zip(list(values), list(mask))]
result = np.array(result, dtype=values.dtype, copy=False).T
return result
else:
return np.percentile(values, q, axis=axis, **kw)
from pandas import Float64Index
is_empty = values.shape[axis] == 0
if is_list_like(qs):
ax = Float64Index(qs)
if is_empty:
if self.ndim == 1:
result = self._na_value
else:
# create the array of na_values
# 2d len(values) * len(qs)
result = np.repeat(np.array([self._na_value] * len(qs)),
len(values)).reshape(len(values),
len(qs))
else:
try:
result = _nanpercentile(values, np.array(qs) * 100,
axis=axis, **kw)
except ValueError:
# older numpies don't handle an array for q
result = [_nanpercentile(values, q * 100,
axis=axis, **kw) for q in qs]
result = np.array(result, copy=False)
if self.ndim > 1:
result = result.T
else:
if self.ndim == 1:
ax = Float64Index([qs])
else:
ax = mgr.axes[0]
if is_empty:
if self.ndim == 1:
result = self._na_value
else:
result = np.array([self._na_value] * len(self))
else:
result = _nanpercentile(values, qs * 100, axis=axis, **kw)
ndim = getattr(result, 'ndim', None) or 0
result = self._try_coerce_result(result)
if is_scalar(result):
return ax, self.make_block_scalar(result)
return ax, make_block(result,
placement=np.arange(len(result)),
ndim=ndim)
class ScalarBlock(Block):
"""
a scalar compat Block
"""
__slots__ = ['_mgr_locs', 'values', 'ndim']
def __init__(self, values):
self.ndim = 0
self.mgr_locs = [0]
self.values = values
@property
def dtype(self):
return type(self.values)
@property
def shape(self):
return tuple([0])
def __len__(self):
return 0
class NonConsolidatableMixIn(object):
""" hold methods for the nonconsolidatable blocks """
_can_consolidate = False
_verify_integrity = False
_validate_ndim = False
_holder = None
def __init__(self, values, placement, ndim=None, fastpath=False, **kwargs):
# Placement must be converted to BlockPlacement via property setter
# before ndim logic, because placement may be a slice which doesn't
# have a length.
self.mgr_locs = placement
# kludgetastic
if ndim is None:
if len(self.mgr_locs) != 1:
ndim = 1
else:
ndim = 2
self.ndim = ndim
if not isinstance(values, self._holder):
raise TypeError("values must be {0}".format(self._holder.__name__))
self.values = values
@property
def shape(self):
if self.ndim == 1:
return (len(self.values)),
return (len(self.mgr_locs), len(self.values))
def get_values(self, dtype=None):
""" need to to_dense myself (and always return a ndim sized object) """
values = self.values.to_dense()
if values.ndim == self.ndim - 1:
values = values.reshape((1,) + values.shape)
return values
def iget(self, col):
if self.ndim == 2 and isinstance(col, tuple):
col, loc = col
if not is_null_slice(col) and col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
else:
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values
def should_store(self, value):
return isinstance(value, self._holder)
def set(self, locs, values, check=False):
assert locs.tolist() == [0]
self.values = values
def putmask(self, mask, new, align=True, inplace=False, axis=0,
transpose=False, mgr=None):
"""
putmask the data to the block; we must be a single block and not
generate other blocks
return the resulting block
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
# use block's copy logic.
# .values may be an Index which does shallow copy by default
new_values = self.values if inplace else self.copy().values
new_values, _, new, _ = self._try_coerce_args(new_values, new)
if isinstance(new, np.ndarray) and len(new) == len(mask):
new = new[mask]
mask = _safe_reshape(mask, new_values.shape)
new_values[mask] = new
new_values = self._try_coerce_result(new_values)
return [self.make_block(values=new_values)]
def _slice(self, slicer):
""" return a slice of my values (but densify first) """
return self.get_values()[slicer]
def _try_cast_result(self, result, dtype=None):
return result
def _unstack(self, unstacker_func, new_columns):
"""Return a list of unstacked blocks of self
Parameters
----------
unstacker_func : callable
Partially applied unstacker.
new_columns : Index
All columns of the unstacked BlockManager.
Returns
-------
blocks : list of Block
New blocks of unstacked values.
mask : array_like of bool
The mask of columns of `blocks` we should keep.
"""
# NonConsolidatable blocks can have a single item only, so we return
# one block per item
unstacker = unstacker_func(self.values.T)
new_items = unstacker.get_new_columns()
new_placement = new_columns.get_indexer(new_items)
new_values, mask = unstacker.get_new_values()
mask = mask.any(0)
new_values = new_values.T[mask]
new_placement = new_placement[mask]
blocks = [self.make_block_same_class(vals, [place])
for vals, place in zip(new_values, new_placement)]
return blocks, mask
class NumericBlock(Block):
__slots__ = ()
is_numeric = True
_can_hold_na = True
class FloatOrComplexBlock(NumericBlock):
__slots__ = ()
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape:
return False
left, right = self.values, other.values
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
class FloatBlock(FloatOrComplexBlock):
__slots__ = ()
is_float = True
_downcast_dtype = 'int64'
def _can_hold_element(self, element):
if is_list_like(element):
element = np.asarray(element)
tipo = element.dtype.type
return (issubclass(tipo, (np.floating, np.integer)) and
not issubclass(tipo, (np.datetime64, np.timedelta64)))
return (isinstance(element, (float, int, np.floating, np.int_)) and
not isinstance(element, (bool, np.bool_, datetime, timedelta,
np.datetime64, np.timedelta64)))
def to_native_types(self, slicer=None, na_rep='', float_format=None,
decimal='.', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
# see gh-13418: no special formatting is desired at the
# output (important for appropriate 'quoting' behaviour),
# so do not pass it through the FloatArrayFormatter
if float_format is None and decimal == '.':
mask = isna(values)
if not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
return values
from pandas.io.formats.format import FloatArrayFormatter
formatter = FloatArrayFormatter(values, na_rep=na_rep,
float_format=float_format,
decimal=decimal, quoting=quoting,
fixed_width=False)
return formatter.get_result_as_array()
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return (issubclass(value.dtype.type, np.floating) and
value.dtype == self.dtype)
class ComplexBlock(FloatOrComplexBlock):
__slots__ = ()
is_complex = True
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type,
(np.floating, np.integer, np.complexfloating))
return (isinstance(element,
(float, int, complex, np.float_, np.int_)) and
not isinstance(element, (bool, np.bool_)))
def should_store(self, value):
return issubclass(value.dtype.type, np.complexfloating)
class IntBlock(NumericBlock):
__slots__ = ()
is_integer = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return (issubclass(tipo, np.integer) and
not issubclass(tipo, (np.datetime64, np.timedelta64)) and
self.dtype.itemsize >= element.dtype.itemsize)
return is_integer(element)
def should_store(self, value):
return is_integer_dtype(value) and value.dtype == self.dtype
class DatetimeLikeBlockMixin(object):
@property
def _na_value(self):
return tslib.NaT
@property
def fill_value(self):
return tslib.iNaT
def get_values(self, dtype=None):
"""
return object dtype as boxed values, such as Timestamps/Timedelta
"""
if is_object_dtype(dtype):
return lib.map_infer(self.values.ravel(),
self._box_func).reshape(self.values.shape)
return self.values
class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock):
__slots__ = ()
is_timedelta = True
_can_hold_na = True
is_numeric = False
@property
def _box_func(self):
return lambda x: tslib.Timedelta(x, unit='ns')
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, np.timedelta64)
return isinstance(element, (timedelta, np.timedelta64))
def fillna(self, value, **kwargs):
# allow filling with integers to be
# interpreted as seconds
if is_integer(value) and not isinstance(value, np.timedelta64):
value = Timedelta(value, unit='s')
return super(TimeDeltaBlock, self).fillna(value, **kwargs)
def _try_coerce_args(self, values, other):
"""
Coerce values and other to int64, with null values converted to
iNaT. values is always ndarray-like, other may not be
Parameters
----------
values : ndarray-like
other : ndarray-like or scalar
Returns
-------
base-type values, values mask, base-type other, other mask
"""
values_mask = isna(values)
values = values.view('i8')
other_mask = False
if isinstance(other, bool):
raise TypeError
elif is_null_datelike_scalar(other):
other = tslib.iNaT
other_mask = True
elif isinstance(other, Timedelta):
other_mask = isna(other)
other = other.value
elif isinstance(other, timedelta):
other = Timedelta(other).value
elif isinstance(other, np.timedelta64):
other_mask = isna(other)
other = Timedelta(other).value
elif hasattr(other, 'dtype') and is_timedelta64_dtype(other):
other_mask = isna(other)
other = other.astype('i8', copy=False).view('i8')
else:
# coercion issues
# let higher levels handle
raise TypeError
return values, values_mask, other, other_mask
def _try_coerce_result(self, result):
""" reverse of try_coerce_args / try_operate """
if isinstance(result, np.ndarray):
mask = isna(result)
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('m8[ns]')
result[mask] = tslib.iNaT
elif isinstance(result, (np.integer, np.float)):
result = self._box_func(result)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.timedelta64)
def to_native_types(self, slicer=None, na_rep=None, quoting=None,
**kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isna(values)
rvalues = np.empty(values.shape, dtype=object)
if na_rep is None:
na_rep = 'NaT'
rvalues[mask] = na_rep
imask = (~mask).ravel()
# FIXME:
# should use the formats.format.Timedelta64Formatter here
# to figure what format to pass to the Timedelta
# e.g. to not show the decimals say
rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all')
for val in values.ravel()[imask]],
dtype=object)
return rvalues
class BoolBlock(NumericBlock):
__slots__ = ()
is_bool = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.asarray(element)
return issubclass(element.dtype.type, np.bool_)
return isinstance(element, (bool, np.bool_))
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False, convert=True, mgr=None):
inplace = validate_bool_kwarg(inplace, 'inplace')
to_replace_values = np.atleast_1d(to_replace)
if not np.can_cast(to_replace_values, bool):
return self
return super(BoolBlock, self).replace(to_replace, value,
inplace=inplace, filter=filter,
regex=regex, convert=convert,
mgr=mgr)
class ObjectBlock(Block):
__slots__ = ()
is_object = True
_can_hold_na = True
def __init__(self, values, ndim=2, fastpath=False, placement=None,
**kwargs):
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object)
super(ObjectBlock, self).__init__(values, ndim=ndim, fastpath=fastpath,
placement=placement, **kwargs)
@property
def is_bool(self):
""" we can be a bool if we have only bool values but are of type
object
"""
return lib.is_bool_array(self.values.ravel())
# TODO: Refactor when convert_objects is removed since there will be 1 path
def convert(self, *args, **kwargs):
""" attempt to coerce any object types to better types return a copy of
the block (if copy = True) by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
if args:
raise NotImplementedError
by_item = True if 'by_item' not in kwargs else kwargs['by_item']
new_inputs = ['coerce', 'datetime', 'numeric', 'timedelta']
new_style = False
for kw in new_inputs:
new_style |= kw in kwargs
if new_style:
fn = soft_convert_objects
fn_inputs = new_inputs
else:
fn = maybe_convert_objects
fn_inputs = ['convert_dates', 'convert_numeric',
'convert_timedeltas']
fn_inputs += ['copy']
fn_kwargs = {}
for key in fn_inputs:
if key in kwargs:
fn_kwargs[key] = kwargs[key]
# operate column-by-column
def f(m, v, i):
shape = v.shape
values = fn(v.ravel(), **fn_kwargs)
try:
values = values.reshape(shape)
values = _block_shape(values, ndim=self.ndim)
except (AttributeError, NotImplementedError):
pass
return values
if by_item and not self._is_single_block:
blocks = self.split_and_operate(None, f, False)
else:
values = f(None, self.values.ravel(), None)
blocks = [make_block(values, ndim=self.ndim,
placement=self.mgr_locs)]
return blocks
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
# GH6026
if check:
try:
if (self.values[locs] == values).all():
return
except:
pass
try:
self.values[locs] = values
except (ValueError):
# broadcasting error
# see GH6171
new_shape = list(values.shape)
new_shape[0] = len(self.items)
self.values = np.empty(tuple(new_shape), dtype=self.dtype)
self.values.fill(np.nan)
self.values[locs] = values
def _maybe_downcast(self, blocks, downcast=None):
if downcast is not None:
return blocks
# split and convert the blocks
return _extend_blocks([b.convert(datetime=True, numeric=False)
for b in blocks])
def _can_hold_element(self, element):
return True
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
if isinstance(other, ABCDatetimeIndex):
# to store DatetimeTZBlock as object
other = other.asobject.values
return values, False, other, False
def should_store(self, value):
return not (issubclass(value.dtype.type,
(np.integer, np.floating, np.complexfloating,
np.datetime64, np.bool_)) or
is_extension_type(value))
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False, convert=True, mgr=None):
to_rep_is_list = is_list_like(to_replace)
value_is_list = is_list_like(value)
both_lists = to_rep_is_list and value_is_list
either_list = to_rep_is_list or value_is_list
result_blocks = []
blocks = [self]
if not either_list and is_re(to_replace):
return self._replace_single(to_replace, value, inplace=inplace,
filter=filter, regex=True,
convert=convert, mgr=mgr)
elif not (either_list or regex):
return super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter, regex=regex,
convert=convert, mgr=mgr)
elif both_lists:
for to_rep, v in zip(to_replace, value):
result_blocks = []
for b in blocks:
result = b._replace_single(to_rep, v, inplace=inplace,
filter=filter, regex=regex,
convert=convert, mgr=mgr)
result_blocks = _extend_blocks(result, result_blocks)
blocks = result_blocks
return result_blocks
elif to_rep_is_list and regex:
for to_rep in to_replace:
result_blocks = []
for b in blocks:
result = b._replace_single(to_rep, value, inplace=inplace,
filter=filter, regex=regex,
convert=convert, mgr=mgr)
result_blocks = _extend_blocks(result, result_blocks)
blocks = result_blocks
return result_blocks
return self._replace_single(to_replace, value, inplace=inplace,
filter=filter, convert=convert,
regex=regex, mgr=mgr)
def _replace_single(self, to_replace, value, inplace=False, filter=None,
regex=False, convert=True, mgr=None):
inplace = validate_bool_kwarg(inplace, 'inplace')
# to_replace is regex compilable
to_rep_re = regex and is_re_compilable(to_replace)
# regex is regex compilable
regex_re = is_re_compilable(regex)
# only one will survive
if to_rep_re and regex_re:
raise AssertionError('only one of to_replace and regex can be '
'regex compilable')
# if regex was passed as something that can be a regex (rather than a
# boolean)
if regex_re:
to_replace = regex
regex = regex_re or to_rep_re
# try to get the pattern attribute (compiled re) or it's a string
try:
pattern = to_replace.pattern
except AttributeError:
pattern = to_replace
# if the pattern is not empty and to_replace is either a string or a
# regex
if regex and pattern:
rx = re.compile(to_replace)
else:
# if the thing to replace is not a string or compiled regex call
# the superclass method -> to_replace is some kind of object
return super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter, regex=regex,
mgr=mgr)
new_values = self.values if inplace else self.values.copy()
# deal with replacing values with objects (strings) that match but
# whose replacement is not a string (numeric, nan, object)
if isna(value) or not isinstance(value, compat.string_types):
def re_replacer(s):
try:
return value if rx.search(s) is not None else s
except TypeError:
return s
else:
# value is guaranteed to be a string here, s can be either a string
# or null if it's null it gets returned
def re_replacer(s):
try:
return rx.sub(value, s)
except TypeError:
return s
f = np.vectorize(re_replacer, otypes=[self.dtype])
if filter is None:
filt = slice(None)
else:
filt = self.mgr_locs.isin(filter).nonzero()[0]
new_values[filt] = f(new_values[filt])
# convert
block = self.make_block(new_values)
if convert:
block = block.convert(by_item=True, numeric=False)
return block
class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock):
__slots__ = ()
is_categorical = True
_verify_integrity = True
_can_hold_na = True
_holder = Categorical
def __init__(self, values, placement, fastpath=False, **kwargs):
# coerce to categorical if we can
super(CategoricalBlock, self).__init__(_maybe_to_categorical(values),
fastpath=True,
placement=placement, **kwargs)
@property
def is_view(self):
""" I am never a view """
return False
def to_dense(self):
return self.values.to_dense().view()
def convert(self, copy=True, **kwargs):
return self.copy() if copy else self
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an
array
"""
return np.object_
def _slice(self, slicer):
""" return a slice of my values """
# slice the category
# return same dims as we currently have
return self.values._slice(slicer)
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
# GH12564: CategoricalBlock is 1-dim only
# while returned results could be any dim
if ((not is_categorical_dtype(result)) and
isinstance(result, np.ndarray)):
result = _block_shape(result, ndim=self.ndim)
return result
def fillna(self, value, limit=None, inplace=False, downcast=None,
mgr=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
values = self.values if inplace else self.values.copy()
values = self._try_coerce_result(values.fillna(value=value,
limit=limit))
return [self.make_block(values=values)]
def interpolate(self, method='pad', axis=0, inplace=False, limit=None,
fill_value=None, **kwargs):
values = self.values if inplace else self.values.copy()
return self.make_block_same_class(
values=values.fillna(fill_value=fill_value, method=method,
limit=limit),
placement=self.mgr_locs)
def shift(self, periods, axis=0, mgr=None):
return self.make_block_same_class(values=self.values.shift(periods),
placement=self.mgr_locs)
def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = None
else:
fill_value = fill_tuple[0]
# axis doesn't matter; we are really a single-dim object
# but are passed the axis depending on the calling routing
# if its REALLY axis 0, then this will be a reindex and not a take
new_values = self.values.take_nd(indexer, fill_value=fill_value)
# if we are a 1-dim object, then always place at 0
if self.ndim == 1:
new_mgr_locs = [0]
else:
if new_mgr_locs is None:
new_mgr_locs = self.mgr_locs
return self.make_block_same_class(new_values, new_mgr_locs)
def _astype(self, dtype, copy=False, errors='raise', values=None,
klass=None, mgr=None):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
if self.is_categorical_astype(dtype):
values = self.values
else:
values = np.asarray(self.values).astype(dtype, copy=False)
if copy:
values = values.copy()
return self.make_block(values)
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
# Categorical is always one dimension
values = values[slicer]
mask = isna(values)
values = np.array(values, dtype='object')
values[mask] = na_rep
# we are expected to return a 2-d ndarray
return values.reshape(1, len(values))
class DatetimeBlock(DatetimeLikeBlockMixin, Block):
__slots__ = ()
is_datetime = True
_can_hold_na = True
def __init__(self, values, placement, fastpath=False, **kwargs):
if values.dtype != _NS_DTYPE:
values = tslib.cast_to_nanoseconds(values)
super(DatetimeBlock, self).__init__(values, fastpath=True,
placement=placement, **kwargs)
def _astype(self, dtype, mgr=None, **kwargs):
"""
these automatically copy, so copy=True has no effect
raise on an except if raise == True
"""
# if we are passed a datetime64[ns, tz]
if is_datetime64tz_dtype(dtype):
dtype = DatetimeTZDtype(dtype)
values = self.values
if getattr(values, 'tz', None) is None:
values = DatetimeIndex(values).tz_localize('UTC')
values = values.tz_convert(dtype.tz)
return self.make_block(values)
# delegate
return super(DatetimeBlock, self)._astype(dtype=dtype, **kwargs)
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return element.dtype == _NS_DTYPE or element.dtype == np.int64
return (is_integer(element) or isinstance(element, datetime) or
isna(element))
def _try_coerce_args(self, values, other):
"""
Coerce values and other to dtype 'i8'. NaN and NaT convert to
the smallest i8, and will correctly round-trip to NaT if converted
back in _try_coerce_result. values is always ndarray-like, other
may not be
Parameters
----------
values : ndarray-like
other : ndarray-like or scalar
Returns
-------
base-type values, values mask, base-type other, other mask
"""
values_mask = isna(values)
values = values.view('i8')
other_mask = False
if isinstance(other, bool):
raise TypeError
elif is_null_datelike_scalar(other):
other = tslib.iNaT
other_mask = True
elif isinstance(other, (datetime, np.datetime64, date)):
other = self._box_func(other)
if getattr(other, 'tz') is not None:
raise TypeError("cannot coerce a Timestamp with a tz on a "
"naive Block")
other_mask = isna(other)
other = other.asm8.view('i8')
elif hasattr(other, 'dtype') and is_datetime64_dtype(other):
other_mask = isna(other)
other = other.astype('i8', copy=False).view('i8')
else:
# coercion issues
# let higher levels handle
raise TypeError
return values, values_mask, other, other_mask
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
if isinstance(result, np.ndarray):
if result.dtype.kind in ['i', 'f', 'O']:
try:
result = result.astype('M8[ns]')
except ValueError:
pass
elif isinstance(result, (np.integer, np.float, np.datetime64)):
result = self._box_func(result)
return result
@property
def _box_func(self):
return tslib.Timestamp
def to_native_types(self, slicer=None, na_rep=None, date_format=None,
quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[..., slicer]
from pandas.io.formats.format import _get_format_datetime64_from_values
format = _get_format_datetime64_from_values(values, date_format)
result = tslib.format_array_from_datetime(
values.view('i8').ravel(), tz=getattr(self.values, 'tz', None),
format=format, na_rep=na_rep).reshape(values.shape)
return np.atleast_2d(result)
def should_store(self, value):
return (issubclass(value.dtype.type, np.datetime64) and
not is_datetimetz(value))
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
if values.dtype != _NS_DTYPE:
# Workaround for numpy 1.6 bug
values = tslib.cast_to_nanoseconds(values)
self.values[locs] = values
class DatetimeTZBlock(NonConsolidatableMixIn, DatetimeBlock):
""" implement a datetime64 block with a tz attribute """
__slots__ = ()
_holder = DatetimeIndex
is_datetimetz = True
def __init__(self, values, placement, ndim=2, **kwargs):
if not isinstance(values, self._holder):
values = self._holder(values)
dtype = kwargs.pop('dtype', None)
if dtype is not None:
if isinstance(dtype, compat.string_types):
dtype = DatetimeTZDtype.construct_from_string(dtype)
values = values._shallow_copy(tz=dtype.tz)
if values.tz is None:
raise ValueError("cannot create a DatetimeTZBlock without a tz")
super(DatetimeTZBlock, self).__init__(values, placement=placement,
ndim=ndim, **kwargs)
def copy(self, deep=True, mgr=None):
""" copy constructor """
values = self.values
if deep:
values = values.copy(deep=True)
return self.make_block_same_class(values)
def external_values(self):
""" we internally represent the data as a DatetimeIndex, but for
external compat with ndarray, export as a ndarray of Timestamps
"""
return self.values.astype('datetime64[ns]').values
def get_values(self, dtype=None):
# return object dtype as Timestamps with the zones
if is_object_dtype(dtype):
f = lambda x: lib.Timestamp(x, tz=self.values.tz)
return lib.map_infer(
self.values.ravel(), f).reshape(self.values.shape)
return self.values
def _slice(self, slicer):
""" return a slice of my values """
if isinstance(slicer, tuple):
col, loc = slicer
if not is_null_slice(col) and col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
return self.values[slicer]
def _try_coerce_args(self, values, other):
"""
localize and return i8 for the values
Parameters
----------
values : ndarray-like
other : ndarray-like or scalar
Returns
-------
base-type values, values mask, base-type other, other mask
"""
values_mask = _block_shape(isna(values), ndim=self.ndim)
# asi8 is a view, needs copy
values = _block_shape(values.asi8, ndim=self.ndim)
other_mask = False
if isinstance(other, ABCSeries):
other = self._holder(other)
other_mask = isna(other)
if isinstance(other, bool):
raise TypeError
elif (is_null_datelike_scalar(other) or
(is_scalar(other) and isna(other))):
other = tslib.iNaT
other_mask = True
elif isinstance(other, self._holder):
if other.tz != self.values.tz:
raise ValueError("incompatible or non tz-aware value")
other = other.asi8
other_mask = isna(other)
elif isinstance(other, (np.datetime64, datetime, date)):
other = lib.Timestamp(other)
tz = getattr(other, 'tz', None)
# test we can have an equal time zone
if tz is None or str(tz) != str(self.values.tz):
raise ValueError("incompatible or non tz-aware value")
other_mask = isna(other)
other = other.value
else:
raise TypeError
return values, values_mask, other, other_mask
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
if isinstance(result, np.ndarray):
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('M8[ns]')
elif isinstance(result, (np.integer, np.float, np.datetime64)):
result = lib.Timestamp(result, tz=self.values.tz)
if isinstance(result, np.ndarray):
# allow passing of > 1dim if its trivial
if result.ndim > 1:
result = result.reshape(np.prod(result.shape))
result = self.values._shallow_copy(result)
return result
@property
def _box_func(self):
return lambda x: tslib.Timestamp(x, tz=self.dtype.tz)
def shift(self, periods, axis=0, mgr=None):
""" shift the block by periods """
# think about moving this to the DatetimeIndex. This is a non-freq
# (number of periods) shift ###
N = len(self)
indexer = np.zeros(N, dtype=int)
if periods > 0:
indexer[periods:] = np.arange(N - periods)
else:
indexer[:periods] = np.arange(-periods, N)
new_values = self.values.asi8.take(indexer)
if periods > 0:
new_values[:periods] = tslib.iNaT
else:
new_values[periods:] = tslib.iNaT
new_values = self.values._shallow_copy(new_values)
return [self.make_block_same_class(new_values,
placement=self.mgr_locs)]
class SparseBlock(NonConsolidatableMixIn, Block):
""" implement as a list of sparse arrays of the same dtype """
__slots__ = ()
is_sparse = True
is_numeric = True
_box_to_block_values = False
_can_hold_na = True
_ftype = 'sparse'
_holder = SparseArray
@property
def shape(self):
return (len(self.mgr_locs), self.sp_index.length)
@property
def itemsize(self):
return self.dtype.itemsize
@property
def fill_value(self):
# return np.nan
return self.values.fill_value
@fill_value.setter
def fill_value(self, v):
self.values.fill_value = v
def to_dense(self):
return self.values.to_dense().view()
@property
def sp_values(self):
return self.values.sp_values
@sp_values.setter
def sp_values(self, v):
# reset the sparse values
self.values = SparseArray(v, sparse_index=self.sp_index,
kind=self.kind, dtype=v.dtype,
fill_value=self.values.fill_value,
copy=False)
@property
def sp_index(self):
return self.values.sp_index
@property
def kind(self):
return self.values.kind
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None, mgr=None, **kwargs):
if values is None:
values = self.values
values = values.astype(dtype, copy=copy)
return self.make_block_same_class(values=values,
placement=self.mgr_locs)
def __len__(self):
try:
return self.sp_index.length
except:
return 0
def copy(self, deep=True, mgr=None):
return self.make_block_same_class(values=self.values,
sparse_index=self.sp_index,
kind=self.kind, copy=deep,
placement=self.mgr_locs)
def make_block_same_class(self, values, placement, sparse_index=None,
kind=None, dtype=None, fill_value=None,
copy=False, fastpath=True, **kwargs):
""" return a new block """
if dtype is None:
dtype = values.dtype
if fill_value is None and not isinstance(values, SparseArray):
fill_value = self.values.fill_value
# if not isinstance(values, SparseArray) and values.ndim != self.ndim:
# raise ValueError("ndim mismatch")
if values.ndim == 2:
nitems = values.shape[0]
if nitems == 0:
# kludgy, but SparseBlocks cannot handle slices, where the
# output is 0-item, so let's convert it to a dense block: it
# won't take space since there's 0 items, plus it will preserve
# the dtype.
return self.make_block(np.empty(values.shape, dtype=dtype),
placement,
fastpath=True)
elif nitems > 1:
raise ValueError("Only 1-item 2d sparse blocks are supported")
else:
values = values.reshape(values.shape[1])
new_values = SparseArray(values, sparse_index=sparse_index,
kind=kind or self.kind, dtype=dtype,
fill_value=fill_value, copy=copy)
return self.make_block(new_values, fastpath=fastpath,
placement=placement)
def interpolate(self, method='pad', axis=0, inplace=False, limit=None,
fill_value=None, **kwargs):
values = missing.interpolate_2d(self.values.to_dense(), method, axis,
limit, fill_value)
return self.make_block_same_class(values=values,
placement=self.mgr_locs)
def fillna(self, value, limit=None, inplace=False, downcast=None,
mgr=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
values = self.values if inplace else self.values.copy()
values = values.fillna(value, downcast=downcast)
return [self.make_block_same_class(values=values,
placement=self.mgr_locs)]
def shift(self, periods, axis=0, mgr=None):
""" shift the block by periods """
N = len(self.values.T)
indexer = np.zeros(N, dtype=int)
if periods > 0:
indexer[periods:] = np.arange(N - periods)
else:
indexer[:periods] = np.arange(-periods, N)
new_values = self.values.to_dense().take(indexer)
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = maybe_upcast(new_values)
if periods > 0:
new_values[:periods] = fill_value
else:
new_values[periods:] = fill_value
return [self.make_block_same_class(new_values,
placement=self.mgr_locs)]
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
# taking on the 0th axis always here
if fill_value is None:
fill_value = self.fill_value
return self.make_block_same_class(self.values.take(indexer),
fill_value=fill_value,
placement=self.mgr_locs)
def sparse_reindex(self, new_index):
""" sparse reindex and return a new block
current reindex only works for float64 dtype! """
values = self.values
values = values.sp_index.to_int_index().reindex(
values.sp_values.astype('float64'), values.fill_value, new_index)
return self.make_block_same_class(values, sparse_index=new_index,
placement=self.mgr_locs)
def make_block(values, placement, klass=None, ndim=None, dtype=None,
fastpath=False):
if klass is None:
dtype = dtype or values.dtype
vtype = dtype.type
if isinstance(values, SparseArray):
klass = SparseBlock
elif issubclass(vtype, np.floating):
klass = FloatBlock
elif (issubclass(vtype, np.integer) and
issubclass(vtype, np.timedelta64)):
klass = TimeDeltaBlock
elif (issubclass(vtype, np.integer) and
not issubclass(vtype, np.datetime64)):
klass = IntBlock
elif dtype == np.bool_:
klass = BoolBlock
elif issubclass(vtype, np.datetime64):
if hasattr(values, 'tz'):
klass = DatetimeTZBlock
else:
klass = DatetimeBlock
elif is_datetimetz(values):
klass = DatetimeTZBlock
elif issubclass(vtype, np.complexfloating):
klass = ComplexBlock
elif is_categorical(values):
klass = CategoricalBlock
else:
klass = ObjectBlock
elif klass is DatetimeTZBlock and not is_datetimetz(values):
return klass(values, ndim=ndim, fastpath=fastpath,
placement=placement, dtype=dtype)
return klass(values, ndim=ndim, fastpath=fastpath, placement=placement)
# TODO: flexible with index=None and/or items=None
class BlockManager(PandasObject):
"""
Core internal data structure to implement DataFrame, Series, Panel, etc.
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Attributes
----------
shape
ndim
axes
values
items
Methods
-------
set_axis(axis, new_labels)
copy(deep=True)
get_dtype_counts
get_ftype_counts
get_dtypes
get_ftypes
apply(func, axes, block_filter_fn)
get_bool_data
get_numeric_data
get_slice(slice_like, axis)
get(label)
iget(loc)
get_scalar(label_tup)
take(indexer, axis)
reindex_axis(new_labels, axis)
reindex_indexer(new_labels, indexer, axis)
delete(label)
insert(loc, label, value)
set(label, value)
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated',
'_is_consolidated', '_blknos', '_blklocs']
def __init__(self, blocks, axes, do_integrity_check=True, fastpath=True):
self.axes = [_ensure_index(ax) for ax in axes]
self.blocks = tuple(blocks)
for block in blocks:
if block.is_sparse:
if len(block.mgr_locs) != 1:
raise AssertionError("Sparse block refers to multiple "
"items")
else:
if self.ndim != block.ndim:
raise AssertionError('Number of Block dimensions (%d) '
'must equal number of axes (%d)' %
(block.ndim, self.ndim))
if do_integrity_check:
self._verify_integrity()
self._consolidate_check()
self._rebuild_blknos_and_blklocs()
def make_empty(self, axes=None):
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
axes = [_ensure_index([])] + [_ensure_index(a)
for a in self.axes[1:]]
# preserve dtype if possible
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes)
def __nonzero__(self):
return True
# Python3 compat
__bool__ = __nonzero__
@property
def shape(self):
return tuple(len(ax) for ax in self.axes)
@property
def ndim(self):
return len(self.axes)
def set_axis(self, axis, new_labels):
new_labels = _ensure_index(new_labels)
old_len = len(self.axes[axis])
new_len = len(new_labels)
if new_len != old_len:
raise ValueError('Length mismatch: Expected axis has %d elements, '
'new values have %d elements' %
(old_len, new_len))
self.axes[axis] = new_labels
def rename_axis(self, mapper, axis, copy=True, level=None):
"""
Rename one of axes.
Parameters
----------
mapper : unary callable
axis : int
copy : boolean, default True
level : int, default None
"""
obj = self.copy(deep=copy)
obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level))
return obj
def add_prefix(self, prefix):
f = partial('{prefix}{}'.format, prefix=prefix)
return self.rename_axis(f, axis=0)
def add_suffix(self, suffix):
f = partial('{}{suffix}'.format, suffix=suffix)
return self.rename_axis(f, axis=0)
@property
def _is_single_block(self):
if self.ndim == 1:
return True
if len(self.blocks) != 1:
return False
blk = self.blocks[0]
return (blk.mgr_locs.is_slice_like and
blk.mgr_locs.as_slice == slice(0, len(self), 1))
def _rebuild_blknos_and_blklocs(self):
"""
Update mgr._blknos / mgr._blklocs.
"""
new_blknos = np.empty(self.shape[0], dtype=np.int64)
new_blklocs = np.empty(self.shape[0], dtype=np.int64)
new_blknos.fill(-1)
new_blklocs.fill(-1)
for blkno, blk in enumerate(self.blocks):
rl = blk.mgr_locs
new_blknos[rl.indexer] = blkno
new_blklocs[rl.indexer] = np.arange(len(rl))
if (new_blknos == -1).any():
raise AssertionError("Gaps in blk ref_locs")
self._blknos = new_blknos
self._blklocs = new_blklocs
# make items read only for now
def _get_items(self):
return self.axes[0]
items = property(fget=_get_items)
def _get_counts(self, f):
""" return a dict of the counts of the function in BlockManager """
self._consolidate_inplace()
counts = dict()
for b in self.blocks:
v = f(b)
counts[v] = counts.get(v, 0) + b.shape[0]
return counts
def get_dtype_counts(self):
return self._get_counts(lambda b: b.dtype.name)
def get_ftype_counts(self):
return self._get_counts(lambda b: b.ftype)
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return algos.take_1d(dtypes, self._blknos, allow_fill=False)
def get_ftypes(self):
ftypes = np.array([blk.ftype for blk in self.blocks])
return algos.take_1d(ftypes, self._blknos, allow_fill=False)
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
axes_array = [ax for ax in self.axes]
extra_state = {
'0.14.1': {
'axes': axes_array,
'blocks': [dict(values=b.values, mgr_locs=b.mgr_locs.indexer)
for b in self.blocks]
}
}
# First three elements of the state are to maintain forward
# compatibility with 0.13.1.
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
def unpickle_block(values, mgr_locs):
# numpy < 1.7 pickle compat
if values.dtype == 'M8[us]':
values = values.astype('M8[ns]')
return make_block(values, placement=mgr_locs)
if (isinstance(state, tuple) and len(state) >= 4 and
'0.14.1' in state[3]):
state = state[3]['0.14.1']
self.axes = [_ensure_index(ax) for ax in state['axes']]
self.blocks = tuple(unpickle_block(b['values'], b['mgr_locs'])
for b in state['blocks'])
else:
# discard anything after 3rd, support beta pickling format for a
# little while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [_ensure_index(ax) for ax in ax_arrays]
if len(bitems) == 1 and self.axes[0].equals(bitems[0]):
# This is a workaround for pre-0.14.1 pickles that didn't
# support unpickling multi-block frames/panels with non-unique
# columns/items, because given a manager with items ["a", "b",
# "a"] there's no way of knowing which block's "a" is where.
#
# Single-block case can be supported under the assumption that
# block items corresponded to manager items 1-to-1.
all_mgr_locs = [slice(0, len(bitems[0]))]
else:
all_mgr_locs = [self.axes[0].get_indexer(blk_items)
for blk_items in bitems]
self.blocks = tuple(
unpickle_block(values, mgr_locs)
for values, mgr_locs in zip(bvalues, all_mgr_locs))
self._post_setstate()
def _post_setstate(self):
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
def __len__(self):
return len(self.items)
def __unicode__(self):
output = pprint_thing(self.__class__.__name__)
for i, ax in enumerate(self.axes):
if i == 0:
output += u('\nItems: %s') % ax
else:
output += u('\nAxis %d: %s') % (i, ax)
for block in self.blocks:
output += u('\n%s') % pprint_thing(block)
return output
def _verify_integrity(self):
mgr_shape = self.shape
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
if block._verify_integrity and block.shape[1:] != mgr_shape[1:]:
construction_error(tot_items, block.shape[1:], self.axes)
if len(self.items) != tot_items:
raise AssertionError('Number of manager items must equal union of '
'block items\n# manager items: {0}, # '
'tot_items: {1}'.format(
len(self.items), tot_items))
def apply(self, f, axes=None, filter=None, do_integrity_check=False,
consolidate=True, **kwargs):
"""
iterate over the blocks, collect and create a new block manager
Parameters
----------
f : the callable or function name to operate on at the block level
axes : optional (if not supplied, use self.axes)
filter : list, if supplied, only call the block if the filter is in
the block
do_integrity_check : boolean, default False. Do the block manager
integrity check
consolidate: boolean, default True. Join together blocks having same
dtype
Returns
-------
Block Manager (new object)
"""
result_blocks = []
# filter kwarg is used in replace-* family of methods
if filter is not None:
filter_locs = set(self.items.get_indexer_for(filter))
if len(filter_locs) == len(self.items):
# All items are included, as if there were no filtering
filter = None
else:
kwargs['filter'] = filter_locs
if consolidate:
self._consolidate_inplace()
if f == 'where':
align_copy = True
if kwargs.get('align', True):
align_keys = ['other', 'cond']
else:
align_keys = ['cond']
elif f == 'putmask':
align_copy = False
if kwargs.get('align', True):
align_keys = ['new', 'mask']
else:
align_keys = ['mask']
elif f == 'eval':
align_copy = False
align_keys = ['other']
elif f == 'fillna':
# fillna internally does putmask, maybe it's better to do this
# at mgr, not block level?
align_copy = False
align_keys = ['value']
else:
align_keys = []
aligned_args = dict((k, kwargs[k])
for k in align_keys
if hasattr(kwargs[k], 'reindex_axis'))
for b in self.blocks:
if filter is not None:
if not b.mgr_locs.isin(filter_locs).any():
result_blocks.append(b)
continue
if aligned_args:
b_items = self.items[b.mgr_locs.indexer]
for k, obj in aligned_args.items():
axis = getattr(obj, '_info_axis_number', 0)
kwargs[k] = obj.reindex_axis(b_items, axis=axis,
copy=align_copy)
kwargs['mgr'] = self
applied = getattr(b, f)(**kwargs)
result_blocks = _extend_blocks(applied, result_blocks)
if len(result_blocks) == 0:
return self.make_empty(axes or self.axes)
bm = self.__class__(result_blocks, axes or self.axes,
do_integrity_check=do_integrity_check)
bm._consolidate_inplace()
return bm
def reduction(self, f, axis=0, consolidate=True, transposed=False,
**kwargs):
"""
iterate over the blocks, collect and create a new block manager.
This routine is intended for reduction type operations and
will do inference on the generated blocks.
Parameters
----------
f: the callable or function name to operate on at the block level
axis: reduction axis, default 0
consolidate: boolean, default True. Join together blocks having same
dtype
transposed: boolean, default False
we are holding transposed data
Returns
-------
Block Manager (new object)
"""
if consolidate:
self._consolidate_inplace()
axes, blocks = [], []
for b in self.blocks:
kwargs['mgr'] = self
axe, block = getattr(b, f)(axis=axis, **kwargs)
axes.append(axe)
blocks.append(block)
# note that some DatetimeTZ, Categorical are always ndim==1
ndim = set([b.ndim for b in blocks])
if 2 in ndim:
new_axes = list(self.axes)
# multiple blocks that are reduced
if len(blocks) > 1:
new_axes[1] = axes[0]
# reset the placement to the original
for b, sb in zip(blocks, self.blocks):
b.mgr_locs = sb.mgr_locs
else:
new_axes[axis] = Index(np.concatenate(
[ax.values for ax in axes]))
if transposed:
new_axes = new_axes[::-1]
blocks = [b.make_block(b.values.T,
placement=np.arange(b.shape[1])
) for b in blocks]
return self.__class__(blocks, new_axes)
# 0 ndim
if 0 in ndim and 1 not in ndim:
values = np.array([b.values for b in blocks])
if len(values) == 1:
return values.item()
blocks = [make_block(values, ndim=1)]
axes = Index([ax[0] for ax in axes])
# single block
values = _concat._concat_compat([b.values for b in blocks])
# compute the orderings of our original data
if len(self.blocks) > 1:
indexer = np.empty(len(self.axes[0]), dtype=np.intp)
i = 0
for b in self.blocks:
for j in b.mgr_locs:
indexer[j] = i
i = i + 1
values = values.take(indexer)
return SingleBlockManager(
[make_block(values,
ndim=1,
placement=np.arange(len(values)))],
axes[0])
def isna(self, **kwargs):
return self.apply('apply', **kwargs)
def where(self, **kwargs):
return self.apply('where', **kwargs)
def eval(self, **kwargs):
return self.apply('eval', **kwargs)
def quantile(self, **kwargs):
return self.reduction('quantile', **kwargs)
def setitem(self, **kwargs):
return self.apply('setitem', **kwargs)
def putmask(self, **kwargs):
return self.apply('putmask', **kwargs)
def diff(self, **kwargs):
return self.apply('diff', **kwargs)
def interpolate(self, **kwargs):
return self.apply('interpolate', **kwargs)
def shift(self, **kwargs):
return self.apply('shift', **kwargs)
def fillna(self, **kwargs):
return self.apply('fillna', **kwargs)
def downcast(self, **kwargs):
return self.apply('downcast', **kwargs)
def astype(self, dtype, **kwargs):
return self.apply('astype', dtype=dtype, **kwargs)
def convert(self, **kwargs):
return self.apply('convert', **kwargs)
def replace(self, **kwargs):
return self.apply('replace', **kwargs)
def replace_list(self, src_list, dest_list, inplace=False, regex=False,
mgr=None):
""" do a list replace """
inplace = validate_bool_kwarg(inplace, 'inplace')
if mgr is None:
mgr = self
# figure out our mask a-priori to avoid repeated replacements
values = self.as_matrix()
def comp(s):
if isna(s):
return isna(values)
return _maybe_compare(values, getattr(s, 'asm8', s), operator.eq)
masks = [comp(s) for i, s in enumerate(src_list)]
result_blocks = []
src_len = len(src_list) - 1
for blk in self.blocks:
# its possible to get multiple result blocks here
# replace ALWAYS will return a list
rb = [blk if inplace else blk.copy()]
for i, (s, d) in enumerate(zip(src_list, dest_list)):
new_rb = []
for b in rb:
if b.dtype == np.object_:
convert = i == src_len
result = b.replace(s, d, inplace=inplace, regex=regex,
mgr=mgr, convert=convert)
new_rb = _extend_blocks(result, new_rb)
else:
# get our mask for this element, sized to this
# particular block
m = masks[i][b.mgr_locs.indexer]
if m.any():
b = b.coerce_to_target_dtype(d)
new_rb.extend(b.putmask(m, d, inplace=True))
else:
new_rb.append(b)
rb = new_rb
result_blocks.extend(rb)
bm = self.__class__(result_blocks, self.axes)
bm._consolidate_inplace()
return bm
def reshape_nd(self, axes, **kwargs):
""" a 2d-nd reshape operation on a BlockManager """
return self.apply('reshape_nd', axes=axes, **kwargs)
def is_consolidated(self):
"""
Return True if more than one block with the same dtype
"""
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self):
ftypes = [blk.ftype for blk in self.blocks]
self._is_consolidated = len(ftypes) == len(set(ftypes))
self._known_consolidated = True
@property
def is_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return len(self.blocks) > 1
@property
def is_numeric_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return all([block.is_numeric for block in self.blocks])
@property
def is_datelike_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return any([block.is_datelike for block in self.blocks])
@property
def is_view(self):
""" return a boolean if we are a single block and are a view """
if len(self.blocks) == 1:
return self.blocks[0].is_view
# It is technically possible to figure out which blocks are views
# e.g. [ b.values.base is not None for b in self.blocks ]
# but then we have the case of possibly some blocks being a view
# and some blocks not. setting in theory is possible on the non-view
# blocks w/o causing a SettingWithCopy raise/warn. But this is a bit
# complicated
return False
def get_bool_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_bool], copy)
def get_numeric_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_numeric], copy)
def combine(self, blocks, copy=True):
""" return a new manager with the blocks """
if len(blocks) == 0:
return self.make_empty()
# FIXME: optimization potential
indexer = np.sort(np.concatenate([b.mgr_locs.as_array
for b in blocks]))
inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])
new_blocks = []
for b in blocks:
b = b.copy(deep=copy)
b.mgr_locs = algos.take_1d(inv_indexer, b.mgr_locs.as_array,
axis=0, allow_fill=False)
new_blocks.append(b)
axes = list(self.axes)
axes[0] = self.items.take(indexer)
return self.__class__(new_blocks, axes, do_integrity_check=False)
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(slobj)
else:
slicer = [slice(None)] * (axis + 1)
slicer[axis] = slobj
slicer = tuple(slicer)
new_blocks = [blk.getitem_block(slicer) for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
bm = self.__class__(new_blocks, new_axes, do_integrity_check=False,
fastpath=True)
bm._consolidate_inplace()
return bm
def __contains__(self, item):
return item in self.items
@property
def nblocks(self):
return len(self.blocks)
def copy(self, deep=True, mgr=None):
"""
Make deep or shallow copy of BlockManager
Parameters
----------
deep : boolean o rstring, default True
If False, return shallow copy (do not copy data)
If 'all', copy data and a deep copy of the index
Returns
-------
copy : BlockManager
"""
# this preserves the notion of view copying of axes
if deep:
if deep == 'all':
copy = lambda ax: ax.copy(deep=True)
else:
copy = lambda ax: ax.view()
new_axes = [copy(ax) for ax in self.axes]
else:
new_axes = list(self.axes)
return self.apply('copy', axes=new_axes, deep=deep,
do_integrity_check=False)
def as_matrix(self, items=None):
if len(self.blocks) == 0:
return np.empty(self.shape, dtype=float)
if items is not None:
mgr = self.reindex_axis(items, axis=0)
else:
mgr = self
if self._is_single_block or not self.is_mixed_type:
return mgr.blocks[0].get_values()
else:
return mgr._interleave()
def _interleave(self):
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
result = np.empty(self.shape, dtype=dtype)
if result.shape[0] == 0:
# Workaround for numpy 1.7 bug:
#
# >>> a = np.empty((0,10))
# >>> a[slice(0,0)]
# array([], shape=(0, 10), dtype=float64)
# >>> a[[]]
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# IndexError: index 0 is out of bounds for axis 0 with size 0
return result
itemmask = np.zeros(self.shape[0])
for blk in self.blocks:
rl = blk.mgr_locs
result[rl.indexer] = blk.get_values(dtype)
itemmask[rl.indexer] = 1
if not itemmask.all():
raise AssertionError('Some items were not contained in blocks')
return result
def to_dict(self, copy=True):
"""
Return a dict of str(dtype) -> BlockManager
Parameters
----------
copy : boolean, default True
Returns
-------
values : a dict of dtype -> BlockManager
Notes
-----
This consolidates based on str(dtype)
"""
self._consolidate_inplace()
bd = {}
for b in self.blocks:
bd.setdefault(str(b.dtype), []).append(b)
return {dtype: self.combine(blocks, copy=copy)
for dtype, blocks in bd.items()}
def xs(self, key, axis=1, copy=True, takeable=False):
if axis < 1:
raise AssertionError('Can only take xs across axis >= 1, got %d' %
axis)
# take by position
if takeable:
loc = key
else:
loc = self.axes[axis].get_loc(key)
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = loc
slicer = tuple(slicer)
new_axes = list(self.axes)
# could be an array indexer!
if isinstance(loc, (slice, np.ndarray)):
new_axes[axis] = new_axes[axis][loc]
else:
new_axes.pop(axis)
new_blocks = []
if len(self.blocks) > 1:
# we must copy here as we are mixed type
for blk in self.blocks:
newb = make_block(values=blk.values[slicer],
klass=blk.__class__, fastpath=True,
placement=blk.mgr_locs)
new_blocks.append(newb)
elif len(self.blocks) == 1:
block = self.blocks[0]
vals = block.values[slicer]
if copy:
vals = vals.copy()
new_blocks = [make_block(values=vals,
placement=block.mgr_locs,
klass=block.__class__,
fastpath=True, )]
return self.__class__(new_blocks, new_axes)
def fast_xs(self, loc):
"""
get a cross sectional for a given location in the
items ; handle dups
return the result, is *could* be a view in the case of a
single block
"""
if len(self.blocks) == 1:
return self.blocks[0].iget((slice(None), loc))
items = self.items
# non-unique (GH4726)
if not items.is_unique:
result = self._interleave()
if self.ndim == 2:
result = result.T
return result[loc]
# unique
dtype = _interleaved_dtype(self.blocks)
n = len(items)
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
# Such assignment may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
for i, rl in enumerate(blk.mgr_locs):
result[rl] = blk._try_coerce_result(blk.iget((i, loc)))
return result
def consolidate(self):
"""
Join together blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
bm = self.__class__(self.blocks, self.axes)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
def _consolidate_inplace(self):
if not self.is_consolidated():
self.blocks = tuple(_consolidate(self.blocks))
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
def get(self, item, fastpath=True):
"""
Return values for selected item (ndarray or BlockManager).
"""
if self.items.is_unique:
if not isna(item):
loc = self.items.get_loc(item)
else:
indexer = np.arange(len(self.items))[isna(self.items)]
# allow a single nan location indexer
if not is_scalar(indexer):
if len(indexer) == 1:
loc = indexer.item()
else:
raise ValueError("cannot label index with a null key")
return self.iget(loc, fastpath=fastpath)
else:
if isna(item):
raise TypeError("cannot label index with a null key")
indexer = self.items.get_indexer_for([item])
return self.reindex_indexer(new_axis=self.items[indexer],
indexer=indexer, axis=0,
allow_dups=True)
def iget(self, i, fastpath=True):
"""
Return the data as a SingleBlockManager if fastpath=True and possible
Otherwise return as a ndarray
"""
block = self.blocks[self._blknos[i]]
values = block.iget(self._blklocs[i])
if not fastpath or not block._box_to_block_values or values.ndim != 1:
return values
# fastpath shortcut for select a single-dim from a 2-dim BM
return SingleBlockManager(
[block.make_block_same_class(values,
placement=slice(0, len(values)),
ndim=1, fastpath=True)],
self.axes[1])
def get_scalar(self, tup):
"""
Retrieve single item
"""
full_loc = list(ax.get_loc(x) for ax, x in zip(self.axes, tup))
blk = self.blocks[self._blknos[full_loc[0]]]
values = blk.values
# FIXME: this may return non-upcasted types?
if values.ndim == 1:
return values[full_loc[1]]
full_loc[0] = self._blklocs[full_loc[0]]
return values[tuple(full_loc)]
def delete(self, item):
"""
Delete selected item (items if non-unique) in-place.
"""
indexer = self.items.get_loc(item)
is_deleted = np.zeros(self.shape[0], dtype=np.bool_)
is_deleted[indexer] = True
ref_loc_offset = -is_deleted.cumsum()
is_blk_deleted = [False] * len(self.blocks)
if isinstance(indexer, int):
affected_start = indexer
else:
affected_start = is_deleted.nonzero()[0][0]
for blkno, _ in _fast_count_smallints(self._blknos[affected_start:]):
blk = self.blocks[blkno]
bml = blk.mgr_locs
blk_del = is_deleted[bml.indexer].nonzero()[0]
if len(blk_del) == len(bml):
is_blk_deleted[blkno] = True
continue
elif len(blk_del) != 0:
blk.delete(blk_del)
bml = blk.mgr_locs
blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer])
# FIXME: use Index.delete as soon as it uses fastpath=True
self.axes[0] = self.items[~is_deleted]
self.blocks = tuple(b for blkno, b in enumerate(self.blocks)
if not is_blk_deleted[blkno])
self._shape = None
self._rebuild_blknos_and_blklocs()
def set(self, item, value, check=False):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
if check, then validate that we are not setting the same data in-place
"""
# FIXME: refactor, clearly separate broadcasting & zip-like assignment
# can prob also fix the various if tests for sparse/categorical
value_is_extension_type = is_extension_type(value)
# categorical/spares/datetimetz
if value_is_extension_type:
def value_getitem(placement):
return value
else:
if value.ndim == self.ndim - 1:
value = _safe_reshape(value, (1,) + value.shape)
def value_getitem(placement):
return value
else:
def value_getitem(placement):
return value[placement.indexer]
if value.shape[1:] != self.shape[1:]:
raise AssertionError('Shape of new values must be compatible '
'with manager shape')
try:
loc = self.items.get_loc(item)
except KeyError:
# This item wasn't present, just insert at end
self.insert(len(self.items), item, value)
return
if isinstance(loc, int):
loc = [loc]
blknos = self._blknos[loc]
blklocs = self._blklocs[loc].copy()
unfit_mgr_locs = []
unfit_val_locs = []
removed_blknos = []
for blkno, val_locs in _get_blkno_placements(blknos, len(self.blocks),
group=True):
blk = self.blocks[blkno]
blk_locs = blklocs[val_locs.indexer]
if blk.should_store(value):
blk.set(blk_locs, value_getitem(val_locs), check=check)
else:
unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])
unfit_val_locs.append(val_locs)
# If all block items are unfit, schedule the block for removal.
if len(val_locs) == len(blk.mgr_locs):
removed_blknos.append(blkno)
else:
self._blklocs[blk.mgr_locs.indexer] = -1
blk.delete(blk_locs)
self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk))
if len(removed_blknos):
# Remove blocks & update blknos accordingly
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
new_blknos = np.empty(self.nblocks, dtype=np.int64)
new_blknos.fill(-1)
new_blknos[~is_deleted] = np.arange(self.nblocks -
len(removed_blknos))
self._blknos = algos.take_1d(new_blknos, self._blknos, axis=0,
allow_fill=False)
self.blocks = tuple(blk for i, blk in enumerate(self.blocks)
if i not in set(removed_blknos))
if unfit_val_locs:
unfit_mgr_locs = np.concatenate(unfit_mgr_locs)
unfit_count = len(unfit_mgr_locs)
new_blocks = []
if value_is_extension_type:
# This code (ab-)uses the fact that sparse blocks contain only
# one item.
new_blocks.extend(
make_block(values=value.copy(), ndim=self.ndim,
placement=slice(mgr_loc, mgr_loc + 1))
for mgr_loc in unfit_mgr_locs)
self._blknos[unfit_mgr_locs] = (np.arange(unfit_count) +
len(self.blocks))
self._blklocs[unfit_mgr_locs] = 0
else:
# unfit_val_locs contains BlockPlacement objects
unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:])
new_blocks.append(
make_block(values=value_getitem(unfit_val_items),
ndim=self.ndim, placement=unfit_mgr_locs))
self._blknos[unfit_mgr_locs] = len(self.blocks)
self._blklocs[unfit_mgr_locs] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
# Newly created block's dtype may already be present.
self._known_consolidated = False
def insert(self, loc, item, value, allow_duplicates=False):
"""
Insert item at selected position.
Parameters
----------
loc : int
item : hashable
value : array_like
allow_duplicates: bool
If False, trying to insert non-unique item will raise
"""
if not allow_duplicates and item in self.items:
# Should this be a different kind of error??
raise ValueError('cannot insert {}, already exists'.format(item))
if not isinstance(loc, int):
raise TypeError("loc must be int")
# insert to the axis; this could possibly raise a TypeError
new_axis = self.items.insert(loc, item)
block = make_block(values=value, ndim=self.ndim,
placement=slice(loc, loc + 1))
for blkno, count in _fast_count_smallints(self._blknos[loc:]):
blk = self.blocks[blkno]
if count == len(blk.mgr_locs):
blk.mgr_locs = blk.mgr_locs.add(1)
else:
new_mgr_locs = blk.mgr_locs.as_array.copy()
new_mgr_locs[new_mgr_locs >= loc] += 1
blk.mgr_locs = new_mgr_locs
if loc == self._blklocs.shape[0]:
# np.append is a lot faster (at least in numpy 1.7.1), let's use it
# if we can.
self._blklocs = np.append(self._blklocs, 0)
self._blknos = np.append(self._blknos, len(self.blocks))
else:
self._blklocs = np.insert(self._blklocs, loc, 0)
self._blknos = np.insert(self._blknos, loc, len(self.blocks))
self.axes[0] = new_axis
self.blocks += (block,)
self._shape = None
self._known_consolidated = False
if len(self.blocks) > 100:
self._consolidate_inplace()
def reindex_axis(self, new_index, axis, method=None, limit=None,
fill_value=None, copy=True):
"""
Conform block manager to new index.
"""
new_index = _ensure_index(new_index)
new_index, indexer = self.axes[axis].reindex(new_index, method=method,
limit=limit)
return self.reindex_indexer(new_index, indexer, axis=axis,
fill_value=fill_value, copy=copy)
def reindex_indexer(self, new_axis, indexer, axis, fill_value=None,
allow_dups=False, copy=True):
"""
Parameters
----------
new_axis : Index
indexer : ndarray of int64 or None
axis : int
fill_value : object
allow_dups : bool
pandas-indexer with -1's only.
"""
if indexer is None:
if new_axis is self.axes[axis] and not copy:
return self
result = self.copy(deep=copy)
result.axes = list(self.axes)
result.axes[axis] = new_axis
return result
self._consolidate_inplace()
# some axes don't allow reindexing with dups
if not allow_dups:
self.axes[axis]._can_reindex(indexer)
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(indexer,
fill_tuple=(fill_value,))
else:
new_blocks = [blk.take_nd(indexer, axis=axis, fill_tuple=(
fill_value if fill_value is not None else blk.fill_value,))
for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axis
return self.__class__(new_blocks, new_axes)
def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None):
"""
Slice/take blocks along axis=0.
Overloaded for SingleBlock
Returns
-------
new_blocks : list of Block
"""
allow_fill = fill_tuple is not None
sl_type, slobj, sllen = _preprocess_slice_or_indexer(
slice_or_indexer, self.shape[0], allow_fill=allow_fill)
if self._is_single_block:
blk = self.blocks[0]
if sl_type in ('slice', 'mask'):
return [blk.getitem_block(slobj, new_mgr_locs=slice(0, sllen))]
elif not allow_fill or self.ndim == 1:
if allow_fill and fill_tuple[0] is None:
_, fill_value = maybe_promote(blk.dtype)
fill_tuple = (fill_value, )
return [blk.take_nd(slobj, axis=0,
new_mgr_locs=slice(0, sllen),
fill_tuple=fill_tuple)]
if sl_type in ('slice', 'mask'):
blknos = self._blknos[slobj]
blklocs = self._blklocs[slobj]
else:
blknos = algos.take_1d(self._blknos, slobj, fill_value=-1,
allow_fill=allow_fill)
blklocs = algos.take_1d(self._blklocs, slobj, fill_value=-1,
allow_fill=allow_fill)
# When filling blknos, make sure blknos is updated before appending to
# blocks list, that way new blkno is exactly len(blocks).
#
# FIXME: mgr_groupby_blknos must return mgr_locs in ascending order,
# pytables serialization will break otherwise.
blocks = []
for blkno, mgr_locs in _get_blkno_placements(blknos, len(self.blocks),
group=True):
if blkno == -1:
# If we've got here, fill_tuple was not None.
fill_value = fill_tuple[0]
blocks.append(self._make_na_block(placement=mgr_locs,
fill_value=fill_value))
else:
blk = self.blocks[blkno]
# Otherwise, slicing along items axis is necessary.
if not blk._can_consolidate:
# A non-consolidatable block, it's easy, because there's
# only one item and each mgr loc is a copy of that single
# item.
for mgr_loc in mgr_locs:
newblk = blk.copy(deep=True)
newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1)
blocks.append(newblk)
else:
blocks.append(blk.take_nd(blklocs[mgr_locs.indexer],
axis=0, new_mgr_locs=mgr_locs,
fill_tuple=None))
return blocks
def _make_na_block(self, placement, fill_value=None):
# TODO: infer dtypes other than float64 from fill_value
if fill_value is None:
fill_value = np.nan
block_shape = list(self.shape)
block_shape[0] = len(placement)
dtype, fill_value = infer_dtype_from_scalar(fill_value)
block_values = np.empty(block_shape, dtype=dtype)
block_values.fill(fill_value)
return make_block(block_values, placement=placement)
def take(self, indexer, axis=1, verify=True, convert=True):
"""
Take items along any axis.
"""
self._consolidate_inplace()
indexer = (np.arange(indexer.start, indexer.stop, indexer.step,
dtype='int64')
if isinstance(indexer, slice)
else np.asanyarray(indexer, dtype='int64'))
n = self.shape[axis]
if convert:
indexer = maybe_convert_indices(indexer, n)
if verify:
if ((indexer == -1) | (indexer >= n)).any():
raise Exception('Indices must be nonzero and less than '
'the axis length')
new_labels = self.axes[axis].take(indexer)
return self.reindex_indexer(new_axis=new_labels, indexer=indexer,
axis=axis, allow_dups=True)
def merge(self, other, lsuffix='', rsuffix=''):
if not self._is_indexed_like(other):
raise AssertionError('Must have same axes to merge managers')
l, r = items_overlap_with_suffix(left=self.items, lsuffix=lsuffix,
right=other.items, rsuffix=rsuffix)
new_items = _concat_indexes([l, r])
new_blocks = [blk.copy(deep=False) for blk in self.blocks]
offset = self.shape[0]
for blk in other.blocks:
blk = blk.copy(deep=False)
blk.mgr_locs = blk.mgr_locs.add(offset)
new_blocks.append(blk)
new_axes = list(self.axes)
new_axes[0] = new_items
return self.__class__(_consolidate(new_blocks), new_axes)
def _is_indexed_like(self, other):
"""
Check all axes except items
"""
if self.ndim != other.ndim:
raise AssertionError('Number of dimensions must agree '
'got %d and %d' % (self.ndim, other.ndim))
for ax, oax in zip(self.axes[1:], other.axes[1:]):
if not ax.equals(oax):
return False
return True
def equals(self, other):
self_axes, other_axes = self.axes, other.axes
if len(self_axes) != len(other_axes):
return False
if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)):
return False
self._consolidate_inplace()
other._consolidate_inplace()
if len(self.blocks) != len(other.blocks):
return False
# canonicalize block order, using a tuple combining the type
# name and then mgr_locs because there might be unconsolidated
# blocks (say, Categorical) which can only be distinguished by
# the iteration order
def canonicalize(block):
return (block.dtype.name, block.mgr_locs.as_array.tolist())
self_blocks = sorted(self.blocks, key=canonicalize)
other_blocks = sorted(other.blocks, key=canonicalize)
return all(block.equals(oblock)
for block, oblock in zip(self_blocks, other_blocks))
def unstack(self, unstacker_func):
"""Return a blockmanager with all blocks unstacked.
Parameters
----------
unstacker_func : callable
A (partially-applied) ``pd.core.reshape._Unstacker`` class.
Returns
-------
unstacked : BlockManager
"""
dummy = unstacker_func(np.empty((0, 0)), value_columns=self.items)
new_columns = dummy.get_new_columns()
new_index = dummy.get_new_index()
new_blocks = []
columns_mask = []
for blk in self.blocks:
blocks, mask = blk._unstack(
partial(unstacker_func,
value_columns=self.items[blk.mgr_locs.indexer]),
new_columns)
new_blocks.extend(blocks)
columns_mask.extend(mask)
new_columns = new_columns[columns_mask]
bm = BlockManager(new_blocks, [new_columns, new_index])
return bm
class SingleBlockManager(BlockManager):
""" manage a single block with """
ndim = 1
_is_consolidated = True
_known_consolidated = True
__slots__ = ()
def __init__(self, block, axis, do_integrity_check=False, fastpath=False):
if isinstance(axis, list):
if len(axis) != 1:
raise ValueError("cannot create SingleBlockManager with more "
"than 1 axis")
axis = axis[0]
# passed from constructor, single block, single axis
if fastpath:
self.axes = [axis]
if isinstance(block, list):
# empty block
if len(block) == 0:
block = [np.array([])]
elif len(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
else:
self.axes = [_ensure_index(axis)]
# create the block here
if isinstance(block, list):
# provide consolidation to the interleaved_dtype
if len(block) > 1:
dtype = _interleaved_dtype(block)
block = [b.astype(dtype) for b in block]
block = _consolidate(block)
if len(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
if not isinstance(block, Block):
block = make_block(block, placement=slice(0, len(axis)), ndim=1,
fastpath=True)
self.blocks = [block]
def _post_setstate(self):
pass
@property
def _block(self):
return self.blocks[0]
@property
def _values(self):
return self._block.values
@property
def _blknos(self):
""" compat with BlockManager """
return None
@property
def _blklocs(self):
""" compat with BlockManager """
return None
def reindex(self, new_axis, indexer=None, method=None, fill_value=None,
limit=None, copy=True):
# if we are the same and don't copy, just return
if self.index.equals(new_axis):
if copy:
return self.copy(deep=True)
else:
return self
values = self._block.get_values()
if indexer is None:
indexer = self.items.get_indexer_for(new_axis)
if fill_value is None:
fill_value = np.nan
new_values = algos.take_1d(values, indexer, fill_value=fill_value)
# fill if needed
if method is not None or limit is not None:
new_values = missing.interpolate_2d(new_values,
method=method,
limit=limit,
fill_value=fill_value)
if self._block.is_sparse:
make_block = self._block.make_block_same_class
block = make_block(new_values, copy=copy,
placement=slice(0, len(new_axis)))
mgr = SingleBlockManager(block, new_axis)
mgr._consolidate_inplace()
return mgr
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
return self.__class__(self._block._slice(slobj),
self.index[slobj], fastpath=True)
@property
def index(self):
return self.axes[0]
def convert(self, **kwargs):
""" convert the whole block as one """
kwargs['by_item'] = False
return self.apply('convert', **kwargs)
@property
def dtype(self):
return self._block.dtype
@property
def array_dtype(self):
return self._block.array_dtype
@property
def ftype(self):
return self._block.ftype
def get_dtype_counts(self):
return {self.dtype.name: 1}
def get_ftype_counts(self):
return {self.ftype: 1}
def get_dtypes(self):
return np.array([self._block.dtype])
def get_ftypes(self):
return np.array([self._block.ftype])
def external_values(self):
return self._block.external_values()
def internal_values(self):
return self._block.internal_values()
def formatting_values(self):
"""Return the internal values used by the DataFrame/SeriesFormatter"""
return self._block.formatting_values()
def get_values(self):
""" return a dense type view """
return np.array(self._block.to_dense(), copy=False)
@property
def asobject(self):
"""
return a object dtype array. datetime/timedelta like values are boxed
to Timestamp/Timedelta instances.
"""
return self._block.get_values(dtype=object)
@property
def itemsize(self):
return self._block.values.itemsize
@property
def _can_hold_na(self):
return self._block._can_hold_na
def is_consolidated(self):
return True
def _consolidate_check(self):
pass
def _consolidate_inplace(self):
pass
def delete(self, item):
"""
Delete single item from SingleBlockManager.
Ensures that self.blocks doesn't become empty.
"""
loc = self.items.get_loc(item)
self._block.delete(loc)
self.axes[0] = self.axes[0].delete(loc)
def fast_xs(self, loc):
"""
fast path for getting a cross-section
return a view of the data
"""
return self._block.values[loc]
def construction_error(tot_items, block_shape, axes, e=None):
""" raise a helpful message about our construction """
passed = tuple(map(int, [tot_items] + list(block_shape)))
implied = tuple(map(int, [len(ax) for ax in axes]))
if passed == implied and e is not None:
raise e
if block_shape[0] == 0:
raise ValueError("Empty data passed with indices specified.")
raise ValueError("Shape of passed values is {0}, indices imply {1}".format(
passed, implied))
def create_block_manager_from_blocks(blocks, axes):
try:
if len(blocks) == 1 and not isinstance(blocks[0], Block):
# if blocks[0] is of length 0, return empty blocks
if not len(blocks[0]):
blocks = []
else:
# It's OK if a single block is passed as values, its placement
# is basically "all items", but if there're many, don't bother
# converting, it's an error anyway.
blocks = [make_block(values=blocks[0],
placement=slice(0, len(axes[0])))]
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError) as e:
blocks = [getattr(b, 'values', b) for b in blocks]
tot_items = sum(b.shape[0] for b in blocks)
construction_error(tot_items, blocks[0].shape[1:], axes, e)
def create_block_manager_from_arrays(arrays, names, axes):
try:
blocks = form_blocks(arrays, names, axes)
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except ValueError as e:
construction_error(len(arrays), arrays[0].shape, axes, e)
def form_blocks(arrays, names, axes):
# put "leftover" items in float bucket, where else?
# generalize?
float_items = []
complex_items = []
int_items = []
bool_items = []
object_items = []
sparse_items = []
datetime_items = []
datetime_tz_items = []
cat_items = []
extra_locs = []
names_idx = Index(names)
if names_idx.equals(axes[0]):
names_indexer = np.arange(len(names_idx))
else:
assert names_idx.intersection(axes[0]).is_unique
names_indexer = names_idx.get_indexer_for(axes[0])
for i, name_idx in enumerate(names_indexer):
if name_idx == -1:
extra_locs.append(i)
continue
k = names[name_idx]
v = arrays[name_idx]
if is_sparse(v):
sparse_items.append((i, k, v))
elif issubclass(v.dtype.type, np.floating):
float_items.append((i, k, v))
elif issubclass(v.dtype.type, np.complexfloating):
complex_items.append((i, k, v))
elif issubclass(v.dtype.type, np.datetime64):
if v.dtype != _NS_DTYPE:
v = tslib.cast_to_nanoseconds(v)
if is_datetimetz(v):
datetime_tz_items.append((i, k, v))
else:
datetime_items.append((i, k, v))
elif is_datetimetz(v):
datetime_tz_items.append((i, k, v))
elif issubclass(v.dtype.type, np.integer):
int_items.append((i, k, v))
elif v.dtype == np.bool_:
bool_items.append((i, k, v))
elif is_categorical(v):
cat_items.append((i, k, v))
else:
object_items.append((i, k, v))
blocks = []
if len(float_items):
float_blocks = _multi_blockify(float_items)
blocks.extend(float_blocks)
if len(complex_items):
complex_blocks = _multi_blockify(complex_items)
blocks.extend(complex_blocks)
if len(int_items):
int_blocks = _multi_blockify(int_items)
blocks.extend(int_blocks)
if len(datetime_items):
datetime_blocks = _simple_blockify(datetime_items, _NS_DTYPE)
blocks.extend(datetime_blocks)
if len(datetime_tz_items):
dttz_blocks = [make_block(array,
klass=DatetimeTZBlock,
fastpath=True,
placement=[i], )
for i, _, array in datetime_tz_items]
blocks.extend(dttz_blocks)
if len(bool_items):
bool_blocks = _simple_blockify(bool_items, np.bool_)
blocks.extend(bool_blocks)
if len(object_items) > 0:
object_blocks = _simple_blockify(object_items, np.object_)
blocks.extend(object_blocks)
if len(sparse_items) > 0:
sparse_blocks = _sparse_blockify(sparse_items)
blocks.extend(sparse_blocks)
if len(cat_items) > 0:
cat_blocks = [make_block(array, klass=CategoricalBlock, fastpath=True,
placement=[i])
for i, _, array in cat_items]
blocks.extend(cat_blocks)
if len(extra_locs):
shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:])
# empty items -> dtype object
block_values = np.empty(shape, dtype=object)
block_values.fill(np.nan)
na_block = make_block(block_values, placement=extra_locs)
blocks.append(na_block)
return blocks
def _simple_blockify(tuples, dtype):
""" return a single array of a block that has a single dtype; if dtype is
not None, coerce to this dtype
"""
values, placement = _stack_arrays(tuples, dtype)
# CHECK DTYPE?
if dtype is not None and values.dtype != dtype: # pragma: no cover
values = values.astype(dtype)
block = make_block(values, placement=placement)
return [block]
def _multi_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes """
# group by dtype
grouper = itertools.groupby(tuples, lambda x: x[2].dtype)
new_blocks = []
for dtype, tup_block in grouper:
values, placement = _stack_arrays(list(tup_block), dtype)
block = make_block(values, placement=placement)
new_blocks.append(block)
return new_blocks
def _sparse_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes (and
are sparse)
"""
new_blocks = []
for i, names, array in tuples:
array = _maybe_to_sparse(array)
block = make_block(array, klass=SparseBlock, fastpath=True,
placement=[i])
new_blocks.append(block)
return new_blocks
def _stack_arrays(tuples, dtype):
# fml
def _asarray_compat(x):
if isinstance(x, ABCSeries):
return x._values
else:
return np.asarray(x)
def _shape_compat(x):
if isinstance(x, ABCSeries):
return len(x),
else:
return x.shape
placement, names, arrays = zip(*tuples)
first = arrays[0]
shape = (len(arrays),) + _shape_compat(first)
stacked = np.empty(shape, dtype=dtype)
for i, arr in enumerate(arrays):
stacked[i] = _asarray_compat(arr)
return stacked, placement
def _interleaved_dtype(blocks):
if not len(blocks):
return None
dtype = find_common_type([b.dtype for b in blocks])
# only numpy compat
if isinstance(dtype, ExtensionDtype):
dtype = np.object
return dtype
def _consolidate(blocks):
"""
Merge blocks having same dtype, exclude non-consolidating blocks
"""
# sort by _can_consolidate, dtype
gkey = lambda x: x._consolidate_key
grouper = itertools.groupby(sorted(blocks, key=gkey), gkey)
new_blocks = []
for (_can_consolidate, dtype), group_blocks in grouper:
merged_blocks = _merge_blocks(list(group_blocks), dtype=dtype,
_can_consolidate=_can_consolidate)
new_blocks = _extend_blocks(merged_blocks, new_blocks)
return new_blocks
def _merge_blocks(blocks, dtype=None, _can_consolidate=True):
if len(blocks) == 1:
return blocks[0]
if _can_consolidate:
if dtype is None:
if len(set([b.dtype for b in blocks])) != 1:
raise AssertionError("_merge_blocks are invalid!")
dtype = blocks[0].dtype
# FIXME: optimization potential in case all mgrs contain slices and
# combination of those slices is a slice, too.
new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])
new_values = _vstack([b.values for b in blocks], dtype)
argsort = np.argsort(new_mgr_locs)
new_values = new_values[argsort]
new_mgr_locs = new_mgr_locs[argsort]
return make_block(new_values, fastpath=True, placement=new_mgr_locs)
# no merge
return blocks
def _extend_blocks(result, blocks=None):
""" return a new extended blocks, givin the result """
if blocks is None:
blocks = []
if isinstance(result, list):
for r in result:
if isinstance(r, list):
blocks.extend(r)
else:
blocks.append(r)
elif isinstance(result, BlockManager):
blocks.extend(result.blocks)
else:
blocks.append(result)
return blocks
def _block_shape(values, ndim=1, shape=None):
""" guarantee the shape of the values to be at least 1 d """
if values.ndim < ndim:
if shape is None:
shape = values.shape
values = values.reshape(tuple((1, ) + shape))
return values
def _vstack(to_stack, dtype):
# work around NumPy 1.6 bug
if dtype == _NS_DTYPE or dtype == _TD_DTYPE:
new_values = np.vstack([x.view('i8') for x in to_stack])
return new_values.view(dtype)
else:
return np.vstack(to_stack)
def _maybe_compare(a, b, op):
is_a_array = isinstance(a, np.ndarray)
is_b_array = isinstance(b, np.ndarray)
# numpy deprecation warning to have i8 vs integer comparisions
if is_datetimelike_v_numeric(a, b):
result = False
# numpy deprecation warning if comparing numeric vs string-like
elif is_numeric_v_string_like(a, b):
result = False
else:
result = op(a, b)
if is_scalar(result) and (is_a_array or is_b_array):
type_names = [type(a).__name__, type(b).__name__]
if is_a_array:
type_names[0] = 'ndarray(dtype=%s)' % a.dtype
if is_b_array:
type_names[1] = 'ndarray(dtype=%s)' % b.dtype
raise TypeError("Cannot compare types %r and %r" % tuple(type_names))
return result
def _concat_indexes(indexes):
return indexes[0].append(indexes[1:])
def _block2d_to_blocknd(values, placement, shape, labels, ref_items):
""" pivot to the labels shape """
panel_shape = (len(placement),) + shape
# TODO: lexsort depth needs to be 2!!
# Create observation selection vector using major and minor
# labels, for converting to panel format.
selector = _factor_indexer(shape[1:], labels)
mask = np.zeros(np.prod(shape), dtype=bool)
mask.put(selector, True)
if mask.all():
pvalues = np.empty(panel_shape, dtype=values.dtype)
else:
dtype, fill_value = maybe_promote(values.dtype)
pvalues = np.empty(panel_shape, dtype=dtype)
pvalues.fill(fill_value)
for i in range(len(placement)):
pvalues[i].flat[mask] = values[:, i]
return make_block(pvalues, placement=placement)
def _factor_indexer(shape, labels):
"""
given a tuple of shape and a list of Categorical labels, return the
expanded label indexer
"""
mult = np.array(shape)[::-1].cumprod()[::-1]
return _ensure_platform_int(
np.sum(np.array(labels).T * np.append(mult, [1]), axis=1).T)
def _get_blkno_placements(blknos, blk_count, group=True):
"""
Parameters
----------
blknos : array of int64
blk_count : int
group : bool
Returns
-------
iterator
yield (BlockPlacement, blkno)
"""
blknos = _ensure_int64(blknos)
# FIXME: blk_count is unused, but it may avoid the use of dicts in cython
for blkno, indexer in lib.get_blkno_indexers(blknos, group):
yield blkno, BlockPlacement(indexer)
def items_overlap_with_suffix(left, lsuffix, right, rsuffix):
"""
If two indices overlap, add suffixes to overlapping entries.
If corresponding suffix is empty, the entry is simply converted to string.
"""
to_rename = left.intersection(right)
if len(to_rename) == 0:
return left, right
else:
if not lsuffix and not rsuffix:
raise ValueError('columns overlap but no suffix specified: %s' %
to_rename)
def lrenamer(x):
if x in to_rename:
return '%s%s' % (x, lsuffix)
return x
def rrenamer(x):
if x in to_rename:
return '%s%s' % (x, rsuffix)
return x
return (_transform_index(left, lrenamer),
_transform_index(right, rrenamer))
def _safe_reshape(arr, new_shape):
"""
If possible, reshape `arr` to have shape `new_shape`,
with a couple of exceptions (see gh-13012):
1) If `arr` is a Categorical or Index, `arr` will be
returned as is.
2) If `arr` is a Series, the `_values` attribute will
be reshaped and returned.
Parameters
----------
arr : array-like, object to be reshaped
new_shape : int or tuple of ints, the new shape
"""
if isinstance(arr, ABCSeries):
arr = arr._values
if not isinstance(arr, Categorical):
arr = arr.reshape(new_shape)
return arr
def _transform_index(index, func, level=None):
"""
Apply function to all values found in index.
This includes transforming multiindex entries separately.
Only apply function to one level of the MultiIndex if level is specified.
"""
if isinstance(index, MultiIndex):
if level is not None:
items = [tuple(func(y) if i == level else y
for i, y in enumerate(x)) for x in index]
else:
items = [tuple(func(y) for y in x) for x in index]
return MultiIndex.from_tuples(items, names=index.names)
else:
items = [func(x) for x in index]
return Index(items, name=index.name)
def _putmask_smart(v, m, n):
"""
Return a new ndarray, try to preserve dtype if possible.
Parameters
----------
v : `values`, updated in-place (array like)
m : `mask`, applies to both sides (array like)
n : `new values` either scalar or an array like aligned with `values`
Returns
-------
values : ndarray with updated values
this *may* be a copy of the original
See Also
--------
ndarray.putmask
"""
# we cannot use np.asarray() here as we cannot have conversions
# that numpy does when numeric are mixed with strings
# n should be the length of the mask or a scalar here
if not is_list_like(n):
n = np.repeat(n, len(m))
elif isinstance(n, np.ndarray) and n.ndim == 0: # numpy scalar
n = np.repeat(np.array(n, ndmin=1), len(m))
# see if we are only masking values that if putted
# will work in the current dtype
try:
nn = n[m]
# make sure that we have a nullable type
# if we have nulls
if not _isna_compat(v, nn[0]):
raise ValueError
# we ignore ComplexWarning here
with catch_warnings(record=True):
nn_at = nn.astype(v.dtype)
# avoid invalid dtype comparisons
# between numbers & strings
# only compare integers/floats
# don't compare integers to datetimelikes
if (not is_numeric_v_string_like(nn, nn_at) and
(is_float_dtype(nn.dtype) or
is_integer_dtype(nn.dtype) and
is_float_dtype(nn_at.dtype) or
is_integer_dtype(nn_at.dtype))):
comp = (nn == nn_at)
if is_list_like(comp) and comp.all():
nv = v.copy()
nv[m] = nn_at
return nv
except (ValueError, IndexError, TypeError):
pass
n = np.asarray(n)
def _putmask_preserve(nv, n):
try:
nv[m] = n[m]
except (IndexError, ValueError):
nv[m] = n
return nv
# preserves dtype if possible
if v.dtype.kind == n.dtype.kind:
return _putmask_preserve(v, n)
# change the dtype if needed
dtype, _ = maybe_promote(n.dtype)
if is_extension_type(v.dtype) and is_object_dtype(dtype):
v = v.get_values(dtype)
else:
v = v.astype(dtype)
return _putmask_preserve(v, n)
def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy):
"""
Concatenate block managers into one.
Parameters
----------
mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
"""
concat_plan = combine_concat_plans(
[get_mgr_concatenation_plan(mgr, indexers)
for mgr, indexers in mgrs_indexers], concat_axis)
blocks = [make_block(
concatenate_join_units(join_units, concat_axis, copy=copy),
placement=placement) for placement, join_units in concat_plan]
return BlockManager(blocks, axes)
def get_empty_dtype_and_na(join_units):
"""
Return dtype and N/A values to use when concatenating specified units.
Returned N/A value may be None which means there was no casting involved.
Returns
-------
dtype
na
"""
if len(join_units) == 1:
blk = join_units[0].block
if blk is None:
return np.float64, np.nan
has_none_blocks = False
dtypes = [None] * len(join_units)
for i, unit in enumerate(join_units):
if unit.block is None:
has_none_blocks = True
else:
dtypes[i] = unit.dtype
upcast_classes = defaultdict(list)
null_upcast_classes = defaultdict(list)
for dtype, unit in zip(dtypes, join_units):
if dtype is None:
continue
if is_categorical_dtype(dtype):
upcast_cls = 'category'
elif is_datetimetz(dtype):
upcast_cls = 'datetimetz'
elif issubclass(dtype.type, np.bool_):
upcast_cls = 'bool'
elif issubclass(dtype.type, np.object_):
upcast_cls = 'object'
elif is_datetime64_dtype(dtype):
upcast_cls = 'datetime'
elif is_timedelta64_dtype(dtype):
upcast_cls = 'timedelta'
elif is_float_dtype(dtype) or is_numeric_dtype(dtype):
upcast_cls = dtype.name
else:
upcast_cls = 'float'
# Null blocks should not influence upcast class selection, unless there
# are only null blocks, when same upcasting rules must be applied to
# null upcast classes.
if unit.is_na:
null_upcast_classes[upcast_cls].append(dtype)
else:
upcast_classes[upcast_cls].append(dtype)
if not upcast_classes:
upcast_classes = null_upcast_classes
# create the result
if 'object' in upcast_classes:
return np.dtype(np.object_), np.nan
elif 'bool' in upcast_classes:
if has_none_blocks:
return np.dtype(np.object_), np.nan
else:
return np.dtype(np.bool_), None
elif 'category' in upcast_classes:
return np.dtype(np.object_), np.nan
elif 'datetimetz' in upcast_classes:
dtype = upcast_classes['datetimetz']
return dtype[0], tslib.iNaT
elif 'datetime' in upcast_classes:
return np.dtype('M8[ns]'), tslib.iNaT
elif 'timedelta' in upcast_classes:
return np.dtype('m8[ns]'), tslib.iNaT
else: # pragma
g = np.find_common_type(upcast_classes, [])
if is_float_dtype(g):
return g, g.type(np.nan)
elif is_numeric_dtype(g):
if has_none_blocks:
return np.float64, np.nan
else:
return g, None
msg = "invalid dtype determination in get_concat_dtype"
raise AssertionError(msg)
def concatenate_join_units(join_units, concat_axis, copy):
"""
Concatenate values from several join units along selected axis.
"""
if concat_axis == 0 and len(join_units) > 1:
# Concatenating join units along ax0 is handled in _merge_blocks.
raise AssertionError("Concatenating join units along axis0")
empty_dtype, upcasted_na = get_empty_dtype_and_na(join_units)
to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype,
upcasted_na=upcasted_na)
for ju in join_units]
if len(to_concat) == 1:
# Only one block, nothing to concatenate.
concat_values = to_concat[0]
if copy and concat_values.base is not None:
concat_values = concat_values.copy()
else:
concat_values = _concat._concat_compat(to_concat, axis=concat_axis)
return concat_values
def get_mgr_concatenation_plan(mgr, indexers):
"""
Construct concatenation plan for given block manager and indexers.
Parameters
----------
mgr : BlockManager
indexers : dict of {axis: indexer}
Returns
-------
plan : list of (BlockPlacement, JoinUnit) tuples
"""
# Calculate post-reindex shape , save for item axis which will be separate
# for each block anyway.
mgr_shape = list(mgr.shape)
for ax, indexer in indexers.items():
mgr_shape[ax] = len(indexer)
mgr_shape = tuple(mgr_shape)
if 0 in indexers:
ax0_indexer = indexers.pop(0)
blknos = algos.take_1d(mgr._blknos, ax0_indexer, fill_value=-1)
blklocs = algos.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1)
else:
if mgr._is_single_block:
blk = mgr.blocks[0]
return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]
ax0_indexer = None
blknos = mgr._blknos
blklocs = mgr._blklocs
plan = []
for blkno, placements in _get_blkno_placements(blknos, len(mgr.blocks),
group=False):
assert placements.is_slice_like
join_unit_indexers = indexers.copy()
shape = list(mgr_shape)
shape[0] = len(placements)
shape = tuple(shape)
if blkno == -1:
unit = JoinUnit(None, shape)
else:
blk = mgr.blocks[blkno]
ax0_blk_indexer = blklocs[placements.indexer]
unit_no_ax0_reindexing = (len(placements) == len(blk.mgr_locs) and
# Fastpath detection of join unit not
# needing to reindex its block: no ax0
# reindexing took place and block
# placement was sequential before.
((ax0_indexer is None and
blk.mgr_locs.is_slice_like and
blk.mgr_locs.as_slice.step == 1) or
# Slow-ish detection: all indexer locs
# are sequential (and length match is
# checked above).
(np.diff(ax0_blk_indexer) == 1).all()))
# Omit indexer if no item reindexing is required.
if unit_no_ax0_reindexing:
join_unit_indexers.pop(0, None)
else:
join_unit_indexers[0] = ax0_blk_indexer
unit = JoinUnit(blk, shape, join_unit_indexers)
plan.append((placements, unit))
return plan
def combine_concat_plans(plans, concat_axis):
"""
Combine multiple concatenation plans into one.
existing_plan is updated in-place.
"""
if len(plans) == 1:
for p in plans[0]:
yield p[0], [p[1]]
elif concat_axis == 0:
offset = 0
for plan in plans:
last_plc = None
for plc, unit in plan:
yield plc.add(offset), [unit]
last_plc = plc
if last_plc is not None:
offset += last_plc.as_slice.stop
else:
num_ended = [0]
def _next_or_none(seq):
retval = next(seq, None)
if retval is None:
num_ended[0] += 1
return retval
plans = list(map(iter, plans))
next_items = list(map(_next_or_none, plans))
while num_ended[0] != len(next_items):
if num_ended[0] > 0:
raise ValueError("Plan shapes are not aligned")
placements, units = zip(*next_items)
lengths = list(map(len, placements))
min_len, max_len = min(lengths), max(lengths)
if min_len == max_len:
yield placements[0], units
next_items[:] = map(_next_or_none, plans)
else:
yielded_placement = None
yielded_units = [None] * len(next_items)
for i, (plc, unit) in enumerate(next_items):
yielded_units[i] = unit
if len(plc) > min_len:
# trim_join_unit updates unit in place, so only
# placement needs to be sliced to skip min_len.
next_items[i] = (plc[min_len:],
trim_join_unit(unit, min_len))
else:
yielded_placement = plc
next_items[i] = _next_or_none(plans[i])
yield yielded_placement, yielded_units
def trim_join_unit(join_unit, length):
"""
Reduce join_unit's shape along item axis to length.
Extra items that didn't fit are returned as a separate block.
"""
if 0 not in join_unit.indexers:
extra_indexers = join_unit.indexers
if join_unit.block is None:
extra_block = None
else:
extra_block = join_unit.block.getitem_block(slice(length, None))
join_unit.block = join_unit.block.getitem_block(slice(length))
else:
extra_block = join_unit.block
extra_indexers = copy.copy(join_unit.indexers)
extra_indexers[0] = extra_indexers[0][length:]
join_unit.indexers[0] = join_unit.indexers[0][:length]
extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:]
join_unit.shape = (length,) + join_unit.shape[1:]
return JoinUnit(block=extra_block, indexers=extra_indexers,
shape=extra_shape)
class JoinUnit(object):
def __init__(self, block, shape, indexers=None):
# Passing shape explicitly is required for cases when block is None.
if indexers is None:
indexers = {}
self.block = block
self.indexers = indexers
self.shape = shape
def __repr__(self):
return '%s(%r, %s)' % (self.__class__.__name__, self.block,
self.indexers)
@cache_readonly
def needs_filling(self):
for indexer in self.indexers.values():
# FIXME: cache results of indexer == -1 checks.
if (indexer == -1).any():
return True
return False
@cache_readonly
def dtype(self):
if self.block is None:
raise AssertionError("Block is None, no dtype")
if not self.needs_filling:
return self.block.dtype
else:
return _get_dtype(maybe_promote(self.block.dtype,
self.block.fill_value)[0])
@cache_readonly
def is_na(self):
if self.block is None:
return True
if not self.block._can_hold_na:
return False
# Usually it's enough to check but a small fraction of values to see if
# a block is NOT null, chunks should help in such cases. 1000 value
# was chosen rather arbitrarily.
values = self.block.values
if self.block.is_categorical:
values_flat = values.categories
elif self.block.is_sparse:
# fill_value is not NaN and have holes
if not values._null_fill_value and values.sp_index.ngaps > 0:
return False
values_flat = values.ravel(order='K')
else:
values_flat = values.ravel(order='K')
total_len = values_flat.shape[0]
chunk_len = max(total_len // 40, 1000)
for i in range(0, total_len, chunk_len):
if not isna(values_flat[i:i + chunk_len]).all():
return False
return True
def get_reindexed_values(self, empty_dtype, upcasted_na):
if upcasted_na is None:
# No upcasting is necessary
fill_value = self.block.fill_value
values = self.block.get_values()
else:
fill_value = upcasted_na
if self.is_na:
if getattr(self.block, 'is_object', False):
# we want to avoid filling with np.nan if we are
# using None; we already know that we are all
# nulls
values = self.block.values.ravel(order='K')
if len(values) and values[0] is None:
fill_value = None
if getattr(self.block, 'is_datetimetz', False):
pass
elif getattr(self.block, 'is_categorical', False):
pass
elif getattr(self.block, 'is_sparse', False):
pass
else:
missing_arr = np.empty(self.shape, dtype=empty_dtype)
missing_arr.fill(fill_value)
return missing_arr
if not self.indexers:
if not self.block._can_consolidate:
# preserve these for validation in _concat_compat
return self.block.values
if self.block.is_bool:
# External code requested filling/upcasting, bool values must
# be upcasted to object to avoid being upcasted to numeric.
values = self.block.astype(np.object_).values
elif self.block.is_categorical:
values = self.block.values
else:
# No dtype upcasting is done here, it will be performed during
# concatenation itself.
values = self.block.get_values()
if not self.indexers:
# If there's no indexing to be done, we want to signal outside
# code that this array must be copied explicitly. This is done
# by returning a view and checking `retval.base`.
values = values.view()
else:
for ax, indexer in self.indexers.items():
values = algos.take_nd(values, indexer, axis=ax,
fill_value=fill_value)
return values
def _fast_count_smallints(arr):
"""Faster version of set(arr) for sequences of small numbers."""
if len(arr) == 0:
# Handle empty arr case separately: numpy 1.6 chokes on that.
return np.empty((0, 2), dtype=arr.dtype)
else:
counts = np.bincount(arr.astype(np.int_))
nz = counts.nonzero()[0]
return np.c_[nz, counts[nz]]
def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill):
if isinstance(slice_or_indexer, slice):
return 'slice', slice_or_indexer, lib.slice_len(slice_or_indexer,
length)
elif (isinstance(slice_or_indexer, np.ndarray) and
slice_or_indexer.dtype == np.bool_):
return 'mask', slice_or_indexer, slice_or_indexer.sum()
else:
indexer = np.asanyarray(slice_or_indexer, dtype=np.int64)
if not allow_fill:
indexer = maybe_convert_indices(indexer, length)
return 'fancy', indexer, len(indexer)
|
bsd-3-clause
| 4,438,622,254,422,599,700
| 32.88472
| 79
| 0.539857
| false
| 4.2034
| false
| false
| false
|
MSLNZ/msl-equipment
|
msl/examples/equipment/energetiq/eq99.py
|
1
|
1074
|
"""
Example showing how to communicate with an EQ-99 Manager from Energetiq.
"""
import time
from msl.equipment import (
EquipmentRecord,
ConnectionRecord,
Backend,
)
record = EquipmentRecord(
manufacturer='Energetiq',
model='EQ-99',
connection=ConnectionRecord(
address='COM6', # update for your controller
backend=Backend.MSL,
)
)
# connect to the Manager
eq99 = record.connect()
# get the total number of running hours of the lamp
print('Lamp ON time is {} hours'.format(eq99.get_lamptime()))
# turn the output on
eq99.set_output(True)
# wait for the lamp to turn on
t0 = time.time()
while True:
value, bitmask = eq99.condition_register()
print('Elapsed time: {:3.0f} seconds, bitmask: {}'.format(time.time() - t0, bitmask))
if bitmask[5] == '1': # index 5 represents the "Lamp on" state
print('Lamp is on')
break
time.sleep(1)
# do other stuff while the lamp is on
time.sleep(10)
# turn the output off when done
eq99.set_output(False)
# disconnect from the Manager
eq99.disconnect()
|
mit
| -7,131,231,820,671,970,000
| 21.851064
| 89
| 0.676909
| false
| 3.215569
| false
| false
| false
|
btjhjeon/ConversationalQA
|
skipthoughts/decoding/train.py
|
2
|
7706
|
"""
Main trainer function
"""
import theano
import theano.tensor as tensor
import cPickle as pkl
import numpy
import copy
import os
import warnings
import sys
import time
import homogeneous_data
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from collections import defaultdict
from utils import *
from layers import get_layer, param_init_fflayer, fflayer, param_init_gru, gru_layer
from optim import adam
from model import init_params, build_model, build_sampler
from vocab import load_dictionary
from search import gen_sample
# main trainer
def trainer(X, C, stmodel,
dimctx=4800, #vector dimensionality
dim_word=620, # word vector dimensionality
dim=1600, # the number of GRU units
encoder='gru',
decoder='gru',
doutput=False,
max_epochs=5,
dispFreq=1,
decay_c=0.,
grad_clip=5.,
n_words=40000,
maxlen_w=100,
optimizer='adam',
batch_size = 16,
saveto='/u/rkiros/research/semhash/models/toy.npz',
dictionary='/ais/gobi3/u/rkiros/bookgen/book_dictionary_large.pkl',
embeddings=None,
saveFreq=1000,
sampleFreq=100,
reload_=False):
# Model options
model_options = {}
model_options['dimctx'] = dimctx
model_options['dim_word'] = dim_word
model_options['dim'] = dim
model_options['encoder'] = encoder
model_options['decoder'] = decoder
model_options['doutput'] = doutput
model_options['max_epochs'] = max_epochs
model_options['dispFreq'] = dispFreq
model_options['decay_c'] = decay_c
model_options['grad_clip'] = grad_clip
model_options['n_words'] = n_words
model_options['maxlen_w'] = maxlen_w
model_options['optimizer'] = optimizer
model_options['batch_size'] = batch_size
model_options['saveto'] = saveto
model_options['dictionary'] = dictionary
model_options['embeddings'] = embeddings
model_options['saveFreq'] = saveFreq
model_options['sampleFreq'] = sampleFreq
model_options['reload_'] = reload_
print model_options
# reload options
if reload_ and os.path.exists(saveto):
print 'reloading...' + saveto
with open('%s.pkl'%saveto, 'rb') as f:
models_options = pkl.load(f)
# load dictionary
print 'Loading dictionary...'
worddict = load_dictionary(dictionary)
# Load pre-trained embeddings, if applicable
if embeddings != None:
print 'Loading embeddings...'
with open(embeddings, 'rb') as f:
embed_map = pkl.load(f)
dim_word = len(embed_map.values()[0])
model_options['dim_word'] = dim_word
preemb = norm_weight(n_words, dim_word)
pz = defaultdict(lambda : 0)
for w in embed_map.keys():
pz[w] = 1
for w in worddict.keys()[:n_words-2]:
if pz[w] > 0:
preemb[worddict[w]] = embed_map[w]
else:
preemb = None
# Inverse dictionary
word_idict = dict()
for kk, vv in worddict.iteritems():
word_idict[vv] = kk
word_idict[0] = '<eos>'
word_idict[1] = 'UNK'
print 'Building model'
params = init_params(model_options, preemb=preemb)
# reload parameters
if reload_ and os.path.exists(saveto):
params = load_params(saveto, params)
tparams = init_tparams(params)
trng, inps, cost = build_model(tparams, model_options)
print 'Building sampler'
f_init, f_next = build_sampler(tparams, model_options, trng)
# before any regularizer
print 'Building f_log_probs...',
f_log_probs = theano.function(inps, cost, profile=False)
print 'Done'
# weight decay, if applicable
if decay_c > 0.:
decay_c = theano.shared(numpy.float32(decay_c), name='decay_c')
weight_decay = 0.
for kk, vv in tparams.iteritems():
weight_decay += (vv ** 2).sum()
weight_decay *= decay_c
cost += weight_decay
# after any regularizer
print 'Building f_cost...',
f_cost = theano.function(inps, cost, profile=False)
print 'Done'
print 'Done'
print 'Building f_grad...',
grads = tensor.grad(cost, wrt=itemlist(tparams))
f_grad_norm = theano.function(inps, [(g**2).sum() for g in grads], profile=False)
f_weight_norm = theano.function([], [(t**2).sum() for k,t in tparams.iteritems()], profile=False)
if grad_clip > 0.:
g2 = 0.
for g in grads:
g2 += (g**2).sum()
new_grads = []
for g in grads:
new_grads.append(tensor.switch(g2 > (grad_clip**2),
g / tensor.sqrt(g2) * grad_clip,
g))
grads = new_grads
lr = tensor.scalar(name='lr')
print 'Building optimizers...',
# (compute gradients), (updates parameters)
f_grad_shared, f_update = eval(optimizer)(lr, tparams, grads, inps, cost)
print 'Optimization'
# Each sentence in the minibatch have same length (for encoder)
train_iter = homogeneous_data.HomogeneousData([X,C], batch_size=batch_size, maxlen=maxlen_w)
uidx = 0
lrate = 0.01
for eidx in xrange(max_epochs):
n_samples = 0
print 'Epoch ', eidx
for x, c in train_iter:
n_samples += len(x)
uidx += 1
x, mask, ctx = homogeneous_data.prepare_data(x, c, worddict, stmodel, maxlen=maxlen_w, n_words=n_words)
if x == None:
print 'Minibatch with zero sample under length ', maxlen_w
uidx -= 1
continue
ud_start = time.time()
cost = f_grad_shared(x, mask, ctx)
f_update(lrate)
ud = time.time() - ud_start
if numpy.isnan(cost) or numpy.isinf(cost):
print 'NaN detected'
return 1., 1., 1.
if numpy.mod(uidx, dispFreq) == 0:
print 'Epoch ', eidx, 'Update ', uidx, 'Cost ', cost, 'UD ', ud
if numpy.mod(uidx, saveFreq) == 0:
print 'Saving...',
params = unzip(tparams)
numpy.savez(saveto, history_errs=[], **params)
pkl.dump(model_options, open('%s.pkl'%saveto, 'wb'))
print 'Done'
if numpy.mod(uidx, sampleFreq) == 0:
x_s = x
mask_s = mask
ctx_s = ctx
for jj in xrange(numpy.minimum(10, len(ctx_s))):
sample, score = gen_sample(tparams, f_init, f_next, ctx_s[jj].reshape(1, model_options['dimctx']), model_options,
trng=trng, k=1, maxlen=100, stochastic=False, use_unk=False)
print 'Truth ',jj,': ',
for vv in x_s[:,jj]:
if vv == 0:
break
if vv in word_idict:
print word_idict[vv],
else:
print 'UNK',
print
for kk, ss in enumerate([sample[0]]):
print 'Sample (', kk,') ', jj, ': ',
for vv in ss:
if vv == 0:
break
if vv in word_idict:
print word_idict[vv],
else:
print 'UNK',
print
print 'Seen %d samples'%n_samples
if __name__ == '__main__':
pass
|
mit
| -6,543,742,380,335,079,000
| 31.242678
| 133
| 0.536854
| false
| 3.773751
| false
| false
| false
|
sljrobin/dotfiles
|
dzen2/.dzen2/scripts/Music.py
|
1
|
6137
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Filename: Music.py
# Description: Functions for Music
# Author: Simon L. J. Robin | https://sljrobin.org
# Created: 2016-09-11 22:50:11
# Modified: 2016-09-25 23:50:25
#
################################################################################
import os
import subprocess
import sys
sys.path.insert(0, os.environ['HOME'] + "/.dzen2/lib")
import Colors
import Icons
################################################################################
class Music(object):
"""Functions for Music.
"""
def __format_metadata(self, color_artist, color_title, color_album,\
color_percentage, color_repeat, color_random):
"""Formats the song's metadata for printing.
Args:
color_artist: Artist's color.
color_title: Title's color.
color_album: Album's color.
color_percentage: Percentage's color.
color_repeat: Repeat's color.
color_random: Random's color.
"""
# Getting song's metadata
song_metadata = self.__get_metadata() # Metadata list
song_artist = song_metadata[0] # Artist
song_album = song_metadata[1] # Album
song_title = song_metadata[2] # Title
song_time = song_metadata[3] # Time
song_percentage = song_metadata[4] # Percentage
song_repeat = song_metadata[5] # Repeat
song_random = song_metadata[6] # Random
# Artist
sys.stdout.write("^fg(%s)[^fg()" % Colors.CL_BASE03)
sys.stdout.write("^fg(%s)%s^fg()" % (color_artist, song_artist))
sys.stdout.write("^fg(%s)][^fg()" % Colors.CL_BASE03)
# Title
sys.stdout.write("^fg(%s)%s^fg()" % (color_title, song_title))
sys.stdout.write("^fg(%s)][^fg()" % Colors.CL_BASE03)
# Album
sys.stdout.write("^fg(%s)%s^fg()" % (color_album, song_album))
sys.stdout.write("^fg(%s)][^fg()" % Colors.CL_BASE03)
# Time / Percentage
sys.stdout.write("^fg(%s)%s %s%%^fg()" % (color_percentage,\
song_time, song_percentage))
sys.stdout.write("^fg(%s)]^fg()" % Colors.CL_BASE03)
# Repeat
if song_repeat != "off":
sys.stdout.write("^fg(%s)[^fg()" % Colors.CL_BASE03)
sys.stdout.write("^fg(%s)R^fg()" % color_repeat)
sys.stdout.write("^fg(%s)]^fg()" % Colors.CL_BASE03)
# Random
if song_random != "off":
sys.stdout.write("^fg(%s)[^fg()" % Colors.CL_BASE03)
sys.stdout.write("^fg(%s)~^fg()" % color_random)
sys.stdout.write("^fg(%s)]^fg()" % Colors.CL_BASE03)
############################################################################
def __get_metadata(self):
"""Gets the song's metadata.
Returns:
Song's metadata.
"""
# Executing command and parsing output
metadata_format = '%artist%\\n%album%\\n%title%\\n%track%'
cmd = subprocess.Popen(['mpc', '--format', metadata_format],\
stdout=subprocess.PIPE)
cmd_out, cmd_err = cmd.communicate()
cmd_outparsed = cmd_out.split('\n')
# Getting status
status = self.__get_music_status()
# Getting Artist / Album / Title
artist = cmd_outparsed[0]
album = cmd_outparsed[1]
title = cmd_outparsed[2]
# Gettting Time / Percentage / Repeat / Random
for line in cmd_outparsed:
if "#" in line:
# Time
if status == "playing":
time = line.split(' ')[4]
elif status == "paused":
time = line.split(' ')[5]
# Percentage
if status == "playing":
percentage = line.split(' ')[5].translate(None, "()%")
elif status == "paused":
percentage = line.split(' ')[6].translate(None, "()%")
if "volume" in line:
# Repeat
repeat = line.split(' ')[5]
# Random
random = line.split(' ')[9]
# Parsing metadata
metadata = [artist, album, title,\
time, percentage,\
repeat, random]
return metadata
############################################################################
def __get_music_status(self):
"""Gets MPC status.
Returns:
MPC status.
"""
# Executing command and parsing output
cmd = subprocess.Popen(['mpc'], stdout=subprocess.PIPE)
cmd_out, cmd_err = cmd.communicate()
cmd_outparsed = cmd_out.split('\n')
# Looking for MPC status
status_line = cmd_outparsed[1]
for line in cmd_outparsed:
if "playing" in status_line:
status = "playing"
return status
elif "paused" in status_line:
status = "paused"
return status
else:
status = "stopped"
return status
############################################################################
def show_song(self):
"""Shows information about the current playing song.
"""
icon = Icons.Icons() # Icon
# Getting status
status = self.__get_music_status()
if status == "playing":
icon.show_icon("music_play")
self.__format_metadata(Colors.CL_BASE0B, Colors.CL_BASE0D,\
Colors.CL_BASE0A, Colors.CL_BASE08,\
Colors.CL_BASE09, Colors.CL_BASE0E)
elif status == "paused":
icon.show_icon("music_pause")
self.__format_metadata(Colors.CL_BASE04, Colors.CL_BASE04,\
Colors.CL_BASE04, Colors.CL_BASE04,\
Colors.CL_BASE04, Colors.CL_BASE04)
else:
icon.show_icon("music_stop")
|
gpl-2.0
| -8,595,044,930,207,759,000
| 36.193939
| 80
| 0.473032
| false
| 4.053501
| false
| false
| false
|
migasfree/migasfree
|
setup.py
|
1
|
6056
|
# -*- coding: UTF-8 -*-
# Copyright (c) 2011-2020 Jose Antonio Chavarría <jachavar@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__author__ = 'Jose Antonio Chavarría'
__license__ = 'GPLv3'
# http://guide.python-distribute.org/
# python setup.py --help-commands
# python setup.py build
# python setup.py sdist
# python setup.py bdist --format=rpm
# python setup.py --command-packages=stdeb.command bdist_deb (python-stdeb)
# http://zetcode.com/articles/packageinpython/
# TODO https://wiki.ubuntu.com/PackagingGuide/Python
# TODO https://help.ubuntu.com/community/PythonRecipes/DebianPackage
import sys
if not hasattr(sys, 'version_info') or sys.version_info < (3, 5, 0, 'final'):
raise SystemExit('migasfree-server requires Python 3.5 or later.')
import os
from distutils.core import setup
from distutils.command.install_data import install_data
PATH = os.path.dirname(__file__)
README = open(os.path.join(PATH, 'README.md')).read()
VERSION = __import__('migasfree').__version__
class InstallData(install_data):
def _find_other_files(self):
data_files = []
for directory in ['packages']:
for root, _, files in os.walk(directory):
final_files = []
for archive in files:
final_files.append(os.path.join(root, archive))
data_files.append(
(
'/usr/share/%s' % os.path.join('migasfree-server', root),
final_files
)
)
return data_files
def _find_doc_files(self):
data_files = []
for root, _, files in os.walk('doc'):
# first level does not matter
if root == 'doc':
continue
final_files = []
for archive in files:
final_files.append(os.path.join(root, archive))
# remove doc directory from root
tmp_dir = root.replace('doc/', '', 1)
data_files.append(
(
'/usr/share/doc/%s' % os.path.join(
'migasfree-server',
tmp_dir
),
final_files
)
)
return data_files
def run(self):
self.data_files.extend(self._find_other_files())
self.data_files.extend(self._find_doc_files())
install_data.run(self)
setup(
name='migasfree-server',
version=VERSION,
description='migasfree-server is a Django app to manage systems management',
long_description=README,
license='GPLv3',
author='Alberto Gacías',
author_email='alberto@migasfree.org',
url='http://www.migasfree.org/',
platforms=['Linux'],
packages=[
'migasfree',
'migasfree.server',
'migasfree.server.admin',
'migasfree.server.migrations',
'migasfree.server.models',
'migasfree.server.templatetags',
'migasfree.server.views',
'migasfree.catalog',
'migasfree.catalog.migrations',
'migasfree.settings',
'migasfree.stats',
'migasfree.stats.views',
],
package_dir={
'migasfree': 'migasfree',
'migasfree.server': 'migasfree/server',
'migasfree.server.admin': 'migasfree/server/admin',
'migasfree.server.migrations': 'migasfree/server/migrations',
'migasfree.server.models': 'migasfree/server/models',
'migasfree.server.templatetags': 'migasfree/server/templatetags',
'migasfree.server.views': 'migasfree/server/views',
'migasfree.catalog': 'migasfree/catalog',
'migasfree.catalog.migrations': 'migasfree/catalog/migrations',
'migasfree.stats': 'migasfree/stats',
'migasfree.stats.views': 'migasfree/stats/views',
},
cmdclass={
'install_data': InstallData,
},
package_data={
'migasfree': [
'i18n/*/LC_MESSAGES/*.mo',
'server/fixtures/*',
'server/static/ajax-select/*.css',
'server/static/ajax-select/*.js',
'server/static/ajax-select/images/*',
'server/static/css/*',
'server/static/img/*',
'server/static/js/*.js',
'server/static/js/d3/*',
'server/static/fonts/*',
'server/templates/*.html',
'server/templates/*/*.html',
'server/templates/*/*/*.html',
'server/templates/*/*/*/*.html',
'catalog/static/css/*',
'catalog/static/img/*',
'catalog/static/js/*.js',
'catalog/static/js/locales/*.js',
],
},
data_files=[
('/usr/share/doc/migasfree-server', [
'AUTHORS',
'COPYING',
'INSTALL',
'MANIFEST.in',
'README.md',
]),
],
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
gpl-3.0
| -6,168,221,606,599,370,000
| 32.076503
| 81
| 0.577895
| false
| 3.843175
| false
| false
| false
|
ganga-devs/ganga
|
ganga/GangaCore/Utility/execute.py
|
1
|
13130
|
import os
import base64
import subprocess
import threading
import pickle as pickle
import signal
from copy import deepcopy
from GangaCore.Core.exceptions import GangaException
from GangaCore.Utility.logging import getLogger
logger = getLogger()
def bytes2string(obj):
if isinstance(obj, bytes):
return obj.decode("utf-8")
if isinstance(obj, dict):
return {bytes2string(key): bytes2string(value) for key, value in obj.items()}
if isinstance(obj, list):
return [bytes2string(item) for item in obj]
if isinstance(obj, tuple):
return tuple(bytes2string(item) for item in obj)
return obj
def env_update_script(indent=''):
""" This function creates an extension to a python script, or just a python script to be run at the end of the
piece of code we're interested in.
This piece of code will dump the environment after the execution has taken place into a temporary file.
This returns a tuple of the script it's generated and the pipes file handlers used to store the end in memory
Args:
indent (str): This is the indent to apply to the script if this script is to be appended to a python file
"""
fdread, fdwrite = os.pipe()
os.set_inheritable(fdwrite, True)
this_script = '''
import os
import pickle as pickle
with os.fdopen(###FD_WRITE###,'wb') as envpipe:
pickle.dump(dict(os.environ), envpipe, 2)
'''
from GangaCore.GPIDev.Lib.File.FileUtils import indentScript
script = indentScript(this_script, '###INDENT###')
script = script.replace('###INDENT###' , indent )\
.replace('###FD_READ###' , str(fdread) )\
.replace('###FD_WRITE###', str(fdwrite))
return script, (fdread, fdwrite)
def python_wrapper(command, python_setup='', update_env=False, indent=''):
""" This section of code wraps the given python command inside a small wrapper class to allow us to control the output.
Optionally we can also append to the end of this file a script to allow us to extract the environment after we've
finished executing our command.
Args:
command (str): This is the python code to be executed (can be multi-line)
python_setup (str): This is some python code to be executed before the python code in question (aka a script header.
update_env (bool): Contol whether we want to capture the env after running
indent (str): This allows for an indent to be applied to the script so it can be placed inside other python scripts
This returns the file handler objects for the env_update_script, the python wrapper itself and the script which has been generated to be run
"""
fdread, fdwrite = os.pipe()
os.set_inheritable(fdwrite, True)
this_script = '''
from __future__ import print_function
import os, sys, traceback
import pickle as pickle
with os.fdopen(###PKL_FDWRITE###, 'wb') as PICKLE_STREAM:
def output(data):
pickle.dump(data, PICKLE_STREAM, 2)
local_ns = {'pickle' : pickle,
'PICKLE_STREAM' : PICKLE_STREAM,
'output' : output}
try:
full_command = """###SETUP### """
full_command += """ \n###COMMAND### """
exec(full_command, local_ns)
except:
pickle.dump(traceback.format_exc(), PICKLE_STREAM, 2)
'''
from GangaCore.GPIDev.Lib.File.FileUtils import indentScript
script = indentScript(this_script, '###INDENT###')
script = script.replace('###INDENT###' , indent )\
.replace('###SETUP###' , python_setup.strip())\
.replace('###COMMAND###' , command.strip() )\
.replace('###PKL_FDREAD###' , str(fdread) )\
.replace('###PKL_FDWRITE###', str(fdwrite) )
env_file_pipes = None
if update_env:
update_script, env_file_pipes = env_update_script()
script += update_script
return script, (fdread, fdwrite), env_file_pipes
def __reader(pipes, output_ns, output_var, require_output):
""" This function un-pickles a pickle from a file and return it as an element in a dictionary
Args:
pipes (tuple): This is a tuple containing the (read_pipe, write_pipe) from os.pipes containing the pickled object
output_ns (dict): This is the dictionary we should put the un-pickled object
output_var (str): This is the key we should use to determine where to put the object in the output_ns
require_output (bool): Should the reader give a warning if the pickle stream is not readable
"""
os.close(pipes[1])
with os.fdopen(pipes[0], 'rb') as read_file:
try:
# rcurrie this deepcopy hides a strange bug that the wrong dict is sometimes returned from here. Remove at your own risk
output_ns[output_var] = deepcopy(pickle.load(read_file))
except UnicodeDecodeError:
output_ns[output_var] = deepcopy(bytes2string(pickle.load(read_file, encoding="bytes")))
except Exception as err:
if require_output:
logger.error('Error getting output stream from command: %s', err)
def __timeout_func(process, timed_out):
""" This function is used to kill functions which are timing out behind the scenes and taking longer than a
threshold time to execute.
Args:
process (class): This is a subprocess class which knows of the pid of wrapping thread around the command we want to kill
timed_out (Event): A threading event to be set when the command has timed out
"""
if process.returncode is None:
timed_out.set()
try:
os.killpg(process.pid, signal.SIGKILL)
except Exception as e:
logger.error("Exception trying to kill process: %s" % e)
def start_timer(p, timeout):
""" Function to construct and return the timer thread and timed_out
Args:
p (object): This is the subprocess object which will be used to run the command of interest
timeout (int): This is the timeout in seconds after which the command will be killed
"""
# Start the background thread to catch timeout events
timed_out = threading.Event()
timer = threading.Timer(timeout, __timeout_func, args=(p, timed_out))
timer.daemon = True
if timeout is not None:
timer.start()
return timer, timed_out
def update_thread(pipes, thread_output, output_key, require_output):
""" Function to construct and return background thread used to read a pickled object into the thread_output for updating
the environment after executing a users code
Args:
started_threads (list): List containing background threads which have been started
pipes (tuple): Tuple containing (read_pipe, write_pipe) which is the pipe the pickled obj is written to
thread_output (dict): Dictionary containing the thread outputs which are used after executing the command
output_key (str): Used to know where in the thread_output to store the output of this thread
require_output (bool): Does the reader require valid pickled output.
"""
ev = threading.Thread(target=__reader, args=(pipes, thread_output, output_key, require_output))
ev.daemon = True
ev.start()
return ev
def execute(command,
timeout=None,
env=None,
cwd=None,
shell=True,
python_setup='',
eval_includes=None,
update_env=False,
):
"""
Execute an external command.
This will execute an external python command when shell=False or an external bash command when shell=True
Args:
command (str): This is the command that we want to execute in string format
timeout (int): This is the timeout which we want to assign to a function and it will be killed if it runs for longer than n seconds
env (dict): This is the environment to use for launching the new command
cwd (str): This is the cwd the command is to be executed within.
shell (bool): True for a bash command to be executed, False for a command to be executed within Python
python_setup (str): A python command to be executed beore the main command is
eval_includes (str): An string used to construct an environment which, if passed, is used to eval the stdout into a python object
update_env (bool): Should we update the env being passed to what the env was after the command finished running
"""
if update_env and env is None:
raise GangaException('Cannot update the environment if None given.')
if not shell:
# We want to run a python command inside a small Python wrapper
stream_command = 'python -'
command, pkl_file_pipes, env_file_pipes = python_wrapper(command, python_setup, update_env)
else:
# We want to run a shell command inside a _NEW_ shell environment.
# i.e. What I run here I expect to behave in the same way from the command line after I exit Ganga
stream_command = "bash "
if update_env:
# note the exec gets around the problem of indent and base64 gets
# around the \n
command_update, env_file_pipes = env_update_script()
command += ''';python -c "import base64;exec(base64.b64decode(%s))"''' % base64.b64encode(command_update.encode("utf-8"))
# Some minor changes to cleanup the getting of the env
if env is None:
env = os.environ
# Construct the object which will contain the environment we want to run the command in
p = subprocess.Popen(stream_command, shell=True, env=env, cwd=cwd, preexec_fn=os.setsid,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, close_fds=False)
# This is where we store the output
thread_output = {}
# Start the timer thread used to kill commands which have likely stalled
timer, timed_out = start_timer(p, timeout)
if update_env:
env_output_key = 'env_output'
update_env_thread = update_thread(env_file_pipes, thread_output, env_output_key, require_output=True)
if not shell:
pkl_output_key = 'pkl_output'
update_pkl_thread = update_thread(pkl_file_pipes, thread_output, pkl_output_key, require_output=False)
# Execute the main command of interest
logger.debug("Executing Command:\n'%s'" % str(command))
stdout, stderr = p.communicate(command)
# Close the timeout watching thread
logger.debug("stdout: %s" % stdout)
logger.debug("stderr: %s" % stderr)
timer.cancel()
if timeout is not None:
timer.join()
# Finish up and decide what to return
if stderr != '':
# this is still debug as using the environment from dirac default_env maked a stderr message dump out
# even though it works
logger.debug(stderr)
if timed_out.isSet():
return 'Command timed out!'
# Decode any pickled objects from disk
if update_env:
update_env_thread.join()
if env_output_key in thread_output:
env.update(thread_output[env_output_key])
else:
logger.error("Expected to find the updated env after running a command")
logger.error("Command: %s" % command)
logger.error("stdout: %s" % stdout)
logger.error("stderr: %s" % stderr)
raise RuntimeError("Missing update env after running command")
if not shell and not eval_includes:
update_pkl_thread.join()
if pkl_output_key in thread_output:
return thread_output[pkl_output_key]
stdout_temp = None
try:
# If output
if stdout:
if isinstance(stdout, bytes):
stdout_temp = pickle.loads(stdout)
else:
try:
stdout_temp = pickle.loads(stdout.encode("utf-8"))
except pickle.UnpicklingError:
stdout_temp = pickle.loads(stdout.encode("latin1"))
# Downsides to wanting to be explicit in how this failed is you need to know all the ways it can!
except (pickle.UnpicklingError, EOFError, ValueError) as err:
if not shell:
log = logger.error
else:
log = logger.debug
log("Command Failed to Execute:\n%s" % command)
log("Command Output is:\n%s" % stdout)
log("Error received:\n%s" % err)
if not stdout_temp:
local_ns = locals()
if isinstance(eval_includes, str):
try:
exec(eval_includes, {}, local_ns)
except:
logger.debug("Failed to eval the env, can't eval stdout")
pass
if isinstance(stdout, str) and stdout:
try:
stdout_temp = eval(stdout, {}, local_ns)
except Exception as err2:
logger.debug("Err2: %s" % str(err2))
pass
if stdout_temp:
stdout = stdout_temp
return stdout
|
gpl-2.0
| -4,757,166,225,924,737,000
| 42.190789
| 144
| 0.640823
| false
| 4.168254
| false
| false
| false
|
valmynd/MediaFetcher
|
src/plugins/youtube_dl/youtube_dl/extractor/hitbox.py
|
1
|
5692
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
clean_html,
parse_iso8601,
float_or_none,
int_or_none,
compat_str,
determine_ext,
)
class HitboxIE(InfoExtractor):
IE_NAME = 'hitbox'
_VALID_URL = r'https?://(?:www\.)?(?:hitbox|smashcast)\.tv/(?:[^/]+/)*videos?/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://www.hitbox.tv/video/203213',
'info_dict': {
'id': '203213',
'title': 'hitbox @ gamescom, Sub Button Hype extended, Giveaway - hitbox News Update with Oxy',
'alt_title': 'hitboxlive - Aug 9th #6',
'description': '',
'ext': 'mp4',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 215.1666,
'resolution': 'HD 720p',
'uploader': 'hitboxlive',
'view_count': int,
'timestamp': 1407576133,
'upload_date': '20140809',
'categories': ['Live Show'],
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'https://www.smashcast.tv/hitboxlive/videos/203213',
'only_matching': True,
}]
def _extract_metadata(self, url, video_id):
thumb_base = 'https://edge.sf.hitbox.tv'
metadata = self._download_json(
'%s/%s' % (url, video_id), video_id, 'Downloading metadata JSON')
date = 'media_live_since'
media_type = 'livestream'
if metadata.get('media_type') == 'video':
media_type = 'video'
date = 'media_date_added'
video_meta = metadata.get(media_type, [])[0]
title = video_meta.get('media_status')
alt_title = video_meta.get('media_title')
description = clean_html(
video_meta.get('media_description') or
video_meta.get('media_description_md'))
duration = float_or_none(video_meta.get('media_duration'))
uploader = video_meta.get('media_user_name')
views = int_or_none(video_meta.get('media_views'))
timestamp = parse_iso8601(video_meta.get(date), ' ')
categories = [video_meta.get('category_name')]
thumbs = [{
'url': thumb_base + video_meta.get('media_thumbnail'),
'width': 320,
'height': 180
}, {
'url': thumb_base + video_meta.get('media_thumbnail_large'),
'width': 768,
'height': 432
}]
return {
'id': video_id,
'title': title,
'alt_title': alt_title,
'description': description,
'ext': 'mp4',
'thumbnails': thumbs,
'duration': duration,
'uploader': uploader,
'view_count': views,
'timestamp': timestamp,
'categories': categories,
}
def _real_extract(self, url):
video_id = self._match_id(url)
player_config = self._download_json(
'https://www.smashcast.tv/api/player/config/video/%s' % video_id,
video_id, 'Downloading video JSON')
formats = []
for video in player_config['clip']['bitrates']:
label = video.get('label')
if label == 'Auto':
continue
video_url = video.get('url')
if not video_url:
continue
bitrate = int_or_none(video.get('bitrate'))
if determine_ext(video_url) == 'm3u8':
if not video_url.startswith('http'):
continue
formats.append({
'url': video_url,
'ext': 'mp4',
'tbr': bitrate,
'format_note': label,
'protocol': 'm3u8_native',
})
else:
formats.append({
'url': video_url,
'tbr': bitrate,
'format_note': label,
})
self._sort_formats(formats)
metadata = self._extract_metadata(
'https://www.smashcast.tv/api/media/video', video_id)
metadata['formats'] = formats
return metadata
class HitboxLiveIE(HitboxIE):
IE_NAME = 'hitbox:live'
_VALID_URL = r'https?://(?:www\.)?(?:hitbox|smashcast)\.tv/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://www.hitbox.tv/dimak',
'info_dict': {
'id': 'dimak',
'ext': 'mp4',
'description': 'md5:c9f80fa4410bc588d7faa40003fc7d0e',
'timestamp': int,
'upload_date': compat_str,
'title': compat_str,
'uploader': 'Dimak',
},
'params': {
# live
'skip_download': True,
},
}, {
'url': 'https://www.smashcast.tv/dimak',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if HitboxIE.suitable(url) else super(HitboxLiveIE, cls).suitable(url)
def _real_extract(self, url):
video_id = self._match_id(url)
player_config = self._download_json(
'https://www.smashcast.tv/api/player/config/live/%s' % video_id,
video_id)
formats = []
cdns = player_config.get('cdns')
servers = []
for cdn in cdns:
# Subscribe URLs are not playable
if cdn.get('rtmpSubscribe') is True:
continue
base_url = cdn.get('netConnectionUrl')
host = re.search(r'.+\.([^\.]+\.[^\./]+)/.+', base_url).group(1)
if base_url not in servers:
servers.append(base_url)
for stream in cdn.get('bitrates'):
label = stream.get('label')
if label == 'Auto':
continue
stream_url = stream.get('url')
if not stream_url:
continue
bitrate = int_or_none(stream.get('bitrate'))
if stream.get('provider') == 'hls' or determine_ext(stream_url) == 'm3u8':
if not stream_url.startswith('http'):
continue
formats.append({
'url': stream_url,
'ext': 'mp4',
'tbr': bitrate,
'format_note': label,
'rtmp_live': True,
})
else:
formats.append({
'url': '%s/%s' % (base_url, stream_url),
'ext': 'mp4',
'tbr': bitrate,
'rtmp_live': True,
'format_note': host,
'page_url': url,
'player_url': 'http://www.hitbox.tv/static/player/flowplayer/flowplayer.commercial-3.2.16.swf',
})
self._sort_formats(formats)
metadata = self._extract_metadata(
'https://www.smashcast.tv/api/media/live', video_id)
metadata['formats'] = formats
metadata['is_live'] = True
metadata['title'] = self._live_title(metadata.get('title'))
return metadata
|
gpl-3.0
| -2,053,292,684,659,356,200
| 25.598131
| 102
| 0.608046
| false
| 2.767137
| true
| false
| false
|
RedFantom/GSF-Parser
|
frames/strategies.py
|
1
|
14275
|
"""
Author: RedFantom
Contributors: Daethyra (Naiii) and Sprigellania (Zarainia)
License: GNU GPLv3 as in LICENSE
Copyright (C) 2016-2018 RedFantom
"""
# Standard Library
from ast import literal_eval
import sys
# UI Libraries
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
# Project Modules
from widgets.strategy.list import StrategiesList
from widgets.strategy.map import Map
from toplevels.strategy.settings import SettingsToplevel
from toplevels.strategy.map import MapToplevel
class StrategiesFrame(ttk.Frame):
"""
Frame to display a StrategiesList and Map widget to allow the user
to create and edit Strategies with custom item in them to visualize
their tactics. An interface to allow real-time Strategy editing is
also provided.
"""
def __init__(self, *args, **kwargs):
ttk.Frame.__init__(self, *args, **kwargs)
"""
The two core widgets of this frame, with lots of callbacks to support
the different functionality. Not all functionality is provided through
callbacks, and providing any other widget than the StrategiesFrame as a
master widget is inadvisable. This is the result of bad coding practices.
"""
self.list = StrategiesList(self, callback=self._set_phase, settings_callback=self.open_settings, frame=self)
self.map = Map(self, moveitem_callback=self.list.move_item_phase, additem_callback=self.list.add_item_to_phase,
canvasheight=385, canvaswidth=385)
self.large = None
self.settings = None
self.in_map = self.map
# Create the widgets to support the description section on the right of the frame.
self.description_header = ttk.Label(self, text="Description", font=("default", 12), justify=tk.LEFT)
self.description = tk.Text(self, width=20 if sys.platform != "linux" else 30, height=23, wrap=tk.WORD)
# Bind the KeyPress event to a callback. A KeyPress is fired when *any* key is pressed on the keyboard.
self.description.bind("<KeyPress>", self.set_description_callback)
self.description_scroll = ttk.Scrollbar(self, orient=tk.VERTICAL, command=self.description.yview)
self.description.config(yscrollcommand=self.description_scroll.set)
self.client = None
self.description_update_task = None
# This frame calls grid_widgets in its __init__ function
self.grid_widgets()
def open_settings(self, *args):
"""
Callback for the Settings button to open a SettingsToplevel.
Only one SettingsToplevel is allowed to be open at any given
time, to prevent any problems with the Client/Server
functionality. If a SettingsToplevel is already open, lifts the
SettingsToplevel to the front so it is visible to the user.
"""
if self.settings:
self.settings.lift()
return
"""
The StrategiesFrame instance is passed as an argument because
not all functionality is provided through callbacks, but some
code is directly executed on the StrategiesFrame instance. Bad
coding practices yet again.
"""
self.settings = SettingsToplevel(master=self, disconnect_callback=self.disconnect_callback)
def grid_widgets(self):
"""It is pretty obvious what this does"""
self.list.grid(column=0, row=1, sticky="nswe", rowspan=2)
self.map.grid(column=1, row=1, sticky="nswe", pady=5, rowspan=2)
self.description_header.grid(column=3, columnspan=2, sticky="w", pady=(5, 0), padx=5, row=1)
self.description.grid(column=3, row=2, sticky="nwe", padx=5, pady=(0, 5))
self.description_scroll.grid(column=4, row=2, sticky="ns")
def _set_phase(self, phase):
"""
Callback for the StrategiesList widget to call when a new Phase
is selected.
:param phase: Phase name
"""
for map in self.maps:
map.update_map(self.list.db[self.list.selected_strategy][phase])
return
def set_description_callback(self, *args):
"""Delay for issue #142"""
self.after(5, self.set_description)
def set_description(self):
"""
Update the description of a certain item in the database. Also
immediately saves the database, so the description is
automatically saved when updated.
"""
if self.client and self.settings.client_permissions[self.client.name][1] is False:
self.description.delete("1.0", tk.END)
self.description.insert("1.0",
self.list.db[self.list.selected_strategy][self.list.selected_phase].description)
if self.list.selected_phase is not None:
self.list.db[self.list.selected_strategy][self.list.selected_phase]. \
description = self.description.get("1.0", tk.END)
self.list.db.save_database()
else:
self.list.db[self.list.selected_strategy].description = self.description.get("1.0", tk.END)
self.list.db.save_database()
if self.settings is not None:
allowed = self.settings.client_permissions[self.client.name][1]
if self.client and (allowed is True or allowed == "True" or allowed == "Master"):
self.send_description()
def send_description(self):
"""
Function to make sure that the description only gets sent two
seconds after stopping typing when editing it, to lower
bandwidth requirements.
"""
if self.description_update_task:
self.after_cancel(self.description_update_task)
self.description_update_task = self.after(
2000, lambda: self.client.update_description(
self.list.selected_strategy, self.list.selected_phase,
self.description.get("1.0", tk.END)))
def show_large(self):
"""
Callback for the Edit (large map)-Button of the StrategiesList
widget to open a larger map in a Toplevel (the MapToplevel from
toplevels.strategy_toplevels)
"""
self.large = MapToplevel(frame=self)
if self.list.selected_phase is None:
return
self.large.map.update_map(self.list.db[self.list.selected_strategy][self.list.selected_phase])
# If the instance is connected to a network, then the Map in the MapToplevel should know about it.
if self.client:
self.large.map.client = self.client
def client_connected(self, client):
"""
Callback for the SettingsToplevel (when open) to call when a
Client object is connected to a network. Sets the client
attribute for this instance, calls another callback, sets the
client attribute for the Map instance and *starts the Client
Thread to start the functionality of the Client*.
"""
self.client = client
self.list.client_connected(client)
self.map.client = self.client
if self.in_map:
self.in_map.client = self.client
self.client.start()
def insert_callback(self, command, args):
"""
Callback that has numerous functions:
- Before doing anything checks if the Client object is valid for
operations to be performed
- Inserts a log entry for the command received into the
ServerToplevel widget if the client is a master client
- Executes the command of the network on the Map widgets with
the given arguments
* add_item
* move_item
* del_item
:param command: command received from the network
:param args: arguments to perform this command
:return: None
:raises: ValueError when the Client is not set or not logged in
:raises: ValueError when the command received is unknown
"""
print("Insert callback received: ", command, args)
# If the command is a login, then only a log should be created, and *all* Strategies in the database
# are sent to the new client to ensure smooth editing of the Strategies
# These are the commands with which the master can control the Server and its Clients
if command == "readonly":
target, allowed = args
if target != self.client.name:
return
allowed = literal_eval(allowed)
for map in self.maps:
map.set_readonly(allowed)
if allowed:
messagebox.showinfo("Info", "You are now allowed to edit the maps.")
else:
messagebox.showinfo("Info", "You are no longer allowed to edit the maps.")
elif command == "kicked":
messagebox.showerror("Info", "You were kicked from the Server.")
self.settings.disconnect_client()
return
elif command == "banned":
messagebox.showerror("Info", "You were banned from the Server.")
self.settings.disconnect_client()
return
elif command == "allowshare":
if not isinstance(args, list):
args = literal_eval(args)
_, name, allowed = args
if not isinstance(allowed, bool):
allowed = literal_eval(allowed)
self.settings.update_share(name, allowed)
if name != self.client.name:
return
if allowed:
messagebox.showinfo("Info", "You are now allowed by the Master of the Server to share your Strategies.")
else:
messagebox.showinfo("Info", "You are now no longer allowed by the Master of the Server to share your "
"Strategies.")
return
elif command == "allowedit":
_, name, allowed = args
if not isinstance(allowed, bool):
allowed = literal_eval(allowed)
if name == self.client.name:
if allowed:
messagebox.showinfo("Info", "You are now allowed by the Master of the Server to edit the "
"Strategies you have available. These edits will be shared with the "
"other users.")
for map in self.maps:
map.set_readonly(False)
else:
messagebox.showinfo("Info", "You are now no longer allowed by the Master of the Server to edit the "
"Strategies you have available.")
for map in self.maps:
map.set_readonly(True)
self.settings.update_edit(name, allowed)
return
elif command == "master":
name = args
if name == self.client.name:
messagebox.showinfo("Info", "You are now the Master of the Server.")
self.settings.update_master()
else:
self.settings.new_master(name)
return
elif command == "master_login":
name = args
self.settings._login_callback(name, "master")
elif command == "client_login":
name = args
self.settings._login_callback(name, "client")
elif command == "logout":
name = args
self.settings._logout_callback(name)
elif command == "description":
_, strategy, phase, description = args
if phase == "None":
phase = None
self.list.db[strategy][phase].description = description
if strategy == self.list.selected_strategy:
self.description.delete("1.0", tk.END)
self.description.insert("1.0", description)
# The arguments *always* include the Strategy name and Phase name for
# the operations to be performed on if these do not match the selected
# Strategy and Phase, then no visible changes occur on the Map widgets.
# However, the saving of the changes happen before this code is reached,
# and thus if the user moves to the other Strategy and Phase that the
# operations were performed on, the user will still see the changed
# elements
elif self.list.selected_strategy != args[0] or self.list.selected_phase != args[1]:
return
# Perform the operations on the Map instances to make the visual changes
elif command == "add_item":
_, _, text, font, color = args
for map in self.maps:
map.add_item(text, font=font, color=color)
elif command == "del_item":
_, _, text = args
for map in self.maps:
map.canvas.delete(map.items[text][0], map.items[text][1])
elif command == "move_item":
_, _, text, x, y = args
for map in self.maps:
rectangle, item = map.items[text]
if map is self.in_map:
coords = (int(int(x) / 768 * 385), int(int(y) / 768 * 385))
map.canvas.coords(item, *coords)
else:
map.canvas.coords(item, int(x), int(y))
map.canvas.coords(rectangle, map.canvas.bbox(item))
else:
raise ValueError("Unknown command: {0} with args {1}".format(command, args))
def disconnect_callback(self):
"""
Callback that is called when the Client is disconnected from the
Server, for whatever reason. All changes the master Client made
are already saved, so this code only resets the state of the
widgets in the StrategiesFrame instance.
"""
self.map.client = None
if self.in_map:
self.in_map.client = None
self.client = None
self.list.client = None
self.map.set_readonly(False)
@property
def maps(self):
"""Return list of Map objects available in StrategiesFrame instance"""
if self.in_map is not self.map:
return [self.map, self.in_map]
return [self.map]
|
gpl-3.0
| -627,416,942,596,403,500
| 44.31746
| 120
| 0.605044
| false
| 4.418137
| false
| false
| false
|
intip/aldryn-bootstrap3
|
aldryn_bootstrap3/south_migrations/0022_auto__add_field_boostrap3alertplugin_icon.py
|
1
|
25863
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Boostrap3AlertPlugin.icon'
db.add_column(u'aldryn_bootstrap3_boostrap3alertplugin', 'icon',
self.gf(u'django.db.models.fields.CharField')(default=u'', max_length=255, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Boostrap3AlertPlugin.icon'
db.delete_column(u'aldryn_bootstrap3_boostrap3alertplugin', 'icon')
models = {
u'aldryn_bootstrap3.boostrap3alertplugin': {
'Meta': {'object_name': 'Boostrap3AlertPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'context': (u'django.db.models.fields.CharField', [], {'default': "u'default'", 'max_length': '255'}),
'icon': (u'django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'})
},
u'aldryn_bootstrap3.boostrap3blockquoteplugin': {
'Meta': {'object_name': 'Boostrap3BlockquotePlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'reverse': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'aldryn_bootstrap3.boostrap3buttonplugin': {
'Meta': {'object_name': 'Boostrap3ButtonPlugin', '_ormbases': ['cms.CMSPlugin']},
'anchor': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'btn_block': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'btn_context': (u'django.db.models.fields.CharField', [], {'default': "u'default'", 'max_length': '255', 'blank': 'True'}),
'btn_size': (u'django.db.models.fields.CharField', [], {'default': "u'md'", 'max_length': '255', 'blank': 'True'}),
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['filer.File']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'icon_left': (u'django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'icon_right': (u'django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '256', 'blank': 'True'}),
'mailto': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'page_link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'txt_context': (u'django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "u'lnk'", 'max_length': '10'}),
'url': ('django.db.models.fields.URLField', [], {'default': "u''", 'max_length': '200', 'blank': 'True'})
},
u'aldryn_bootstrap3.boostrap3iconplugin': {
'Meta': {'object_name': 'Boostrap3IconPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'icon': (u'django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'})
},
u'aldryn_bootstrap3.boostrap3imageplugin': {
'Meta': {'object_name': 'Boostrap3ImagePlugin', '_ormbases': ['cms.CMSPlugin']},
'alt': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'aspect_ratio': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '10', 'blank': 'True'}),
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['filer.Image']"}),
'shape': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '64', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'})
},
u'aldryn_bootstrap3.boostrap3labelplugin': {
'Meta': {'object_name': 'Boostrap3LabelPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'context': (u'django.db.models.fields.CharField', [], {'default': "u'default'", 'max_length': '255'}),
'label': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '256', 'blank': 'True'})
},
u'aldryn_bootstrap3.boostrap3panelbodyplugin': {
'Meta': {'object_name': 'Boostrap3PanelBodyPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"})
},
u'aldryn_bootstrap3.boostrap3panelfooterplugin': {
'Meta': {'object_name': 'Boostrap3PanelFooterPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"})
},
u'aldryn_bootstrap3.boostrap3panelheadingplugin': {
'Meta': {'object_name': 'Boostrap3PanelHeadingPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'title': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'})
},
u'aldryn_bootstrap3.boostrap3panelplugin': {
'Meta': {'object_name': 'Boostrap3PanelPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'context': (u'django.db.models.fields.CharField', [], {'default': "u'default'", 'max_length': '255'})
},
u'aldryn_bootstrap3.boostrap3wellplugin': {
'Meta': {'object_name': 'Boostrap3WellPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'size': (u'django.db.models.fields.CharField', [], {'default': "u'md'", 'max_length': '255', 'blank': 'True'})
},
u'aldryn_bootstrap3.bootstrap3columnplugin': {
'Meta': {'object_name': 'Bootstrap3ColumnPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
u'lg_col': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'lg_offset': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'lg_pull': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'lg_push': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'md_col': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'md_offset': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'md_pull': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'md_push': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'sm_col': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'sm_offset': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'sm_pull': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'sm_push': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'tag': ('django.db.models.fields.SlugField', [], {'default': "u'div'", 'max_length': '50'}),
u'xs_col': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'xs_offset': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'xs_pull': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'xs_push': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
u'aldryn_bootstrap3.bootstrap3rowplugin': {
'Meta': {'object_name': 'Bootstrap3RowPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'), ('reverse_id', 'site', 'publisher_is_draft'))", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'xframe_options': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'all_files'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'owned_files'", 'null': 'True', 'to': u"orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_filer.file_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'filer.folder': {
'Meta': {'ordering': "(u'name',)", 'unique_together': "((u'parent', u'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'filer_owned_folders'", 'null': 'True', 'to': u"orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image'},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['aldryn_bootstrap3']
|
bsd-3-clause
| -6,558,731,824,654,015,000
| 92.710145
| 196
| 0.556819
| false
| 3.465032
| false
| false
| false
|
domanova/highres-cortex
|
bin/od_column-regionsMain.py
|
1
|
18809
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright CEA (2014).
# Copyright Université Paris XI (2014).
#
# Contributor: Olga Domanova <olga.domanova@cea.fr>.
#
# This file is part of highres-cortex, a collection of software designed
# to process high-resolution magnetic resonance images of the cerebral
# cortex.
#
# This software is governed by the CeCILL licence under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/or redistribute the software under the terms of the CeCILL
# licence as circulated by CEA, CNRS and INRIA at the following URL:
# <http://www.cecill.info/>.
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the licence, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of scientific
# software, that may mean that it is complicated to manipulate, and that
# also therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL licence and that you accept its terms.
# an example how to run this script
# time od_column-regionsMain.py -i /volatile/od243208/brainvisa_manual/ml140175/dist/classif_with_outer_boundaries_ml140175_L.nii.gz -d /volatile/od243208/brainvisa_manual/ad140157_columns/ -k ad140157_L
# od_column-regionsMain.py -i /neurospin/lnao/dysbrain/testBatchColumnsExtrProfiles/af140169/af140169_T1inT2_ColumnsCutNew20It/dist/classif_with_outer_boundaries_af140169_R_cut_noSulci_extended.nii.gz -d /neurospin/lnao/dysbrain/testBatchColumnsExtrProfiles/af140169/af140169_T1inT2_ColumnsCutNew20It/ -k af140169_R_cut_noSulci_extended
from soma import aims, aimsalgo
import sys, glob, os, subprocess, sys, time
import numpy as np
from optparse import OptionParser
import highres_cortex.cortex_topo, highres_cortex.div_gradn, highres_cortex.od_get_exchanged_propvol, highres_cortex.od_relabel_conjunction, highres_cortex.od_relabel, highres_cortex.od_randomize_labels
#read in the path and the directory
pathToClassifFile = None
pathToClassifFileWithoutBorders = None
data_directory = None
result_directory = None
heat_directory = None
keyWord = None
parser = OptionParser('Calculate column-regions in a cortical region')
parser.add_option('-i', dest='pathToClassifFile', help='Path to the volume with labeled cortex (100), and white matter (200), as well as the borders (50 and 150)') # if nothing is given: exit
parser.add_option('-j', dest='pathToClassifFileWithoutBorders', help='Path to the volume with labeled cortex (100), and white matter (200)') # if nothing is given: exit
parser.add_option('-d', dest='data_directory', help='directory for the results') # if nothing is given exit
parser.add_option('-k', dest='keyWord', help='KeyWord for the result files (including the patient ID and the hemisphere)') # if nothing is given exit
options, args = parser.parse_args(sys.argv)
print options
print args
if options.pathToClassifFile is None:
print >> sys.stderr, 'New: exit. No classification volume was given'
sys.exit(0)
else:
pathToClassifFile = options.pathToClassifFile
if options.pathToClassifFileWithoutBorders is None:
print >> sys.stderr, 'New: exit. No pathToClassifFileWithoutBorders volume was given'
sys.exit(0)
else:
pathToClassifFileWithoutBorders = options.pathToClassifFileWithoutBorders
if options.data_directory is None:
print >> sys.stderr, 'New: exit. No directory for results was given'
sys.exit(0)
else:
data_directory = options.data_directory
result_directory = data_directory + 'column_regions/'
heat_directory = data_directory + 'heat/'
iso_directory = data_directory + 'isovolume/'
if options.keyWord is None:
print >> sys.stderr, 'New: exit. No keyword for results was given'
sys.exit(0)
else:
keyWord = options.keyWord
# in the given directory create the subdirectory for the results
if not os.path.exists(result_directory):
os.makedirs(result_directory)
#AimsThreshold -b -m eq -t 50 \
#-i /volatile/od243208/brainvisa_manual/ml140175/classif_with_outer_boundaries_ml140175_L.nii.gz \
#-o /volatile/od243208/brainvisa_manual/ml140175/CSF_interface_ml140175_L.nii
volClassif = aims.read(pathToClassifFile)
arrSurfCSF = np.array(volClassif, copy = False)
arrSurfCSF[np.where(arrSurfCSF != 50)] = 0
arrSurfCSF[np.where(arrSurfCSF == 50)] = 32767
aims.write(volClassif, result_directory + 'CSF_interface_%s.nii' % (keyWord)) # OK
#AimsThreshold -b -m eq -t 150 \
#-i ../classif_with_outer_boundaries.nii.gz \
#-o white_interface.nii
volClassif = aims.read(pathToClassifFile)
arrSurfWhite = np.array(volClassif, copy = False)
arrSurfWhite[np.where(arrSurfWhite != 150)] = 0
arrSurfWhite[np.where(arrSurfWhite == 150)] = 32767
aims.write(volClassif, result_directory + 'white_interface_%s.nii' % (keyWord)) # OK
#ylLabelEachVoxel --verbose \
#-i CSF_interface.nii.gz \
#-o CSF_labelled_interface.nii \
#--first-label 100000001
subprocess.check_call(['ylLabelEachVoxel', '--verbose', '-i', result_directory + 'CSF_interface_%s.nii' % (keyWord), '-o', result_directory + 'CSF_labelled_interface_%s.nii' % (keyWord), '--first-label', '100000001']) # OK
#ylLabelEachVoxel --verbose \
#-i white_interface.nii.gz \
#-o white_labelled_interface.nii \
#--first-label 200000001
subprocess.check_call(['ylLabelEachVoxel', '--verbose', '-i', result_directory + 'white_interface_%s.nii' % (keyWord), '-o', result_directory + 'white_labelled_interface_%s.nii' % (keyWord), '--first-label', '200000001']) # OK
#AimsThreshold -b --fg -1 -m di -t 100 \
#-i ../classif.nii.gz \ # can take the classif with outer boundaries! as cortex is the same there
#-o negative_outside_cortex.nii
volClassif = aims.read(pathToClassifFile)
arrNegOutCortex = np.array(volClassif, copy = False)
arrNegOutCortex[np.where(arrNegOutCortex != 100)] = -1
arrNegOutCortex[np.where(arrNegOutCortex == 100)] = 0
aims.write(volClassif, result_directory + 'negative_outside_cortex_%s.nii' % (keyWord)) # OK
#AimsFileConvert -t S32 \
#-i negative_outside_cortex.nii \
#-o negative_outside_cortex_S32.nii
c = aims.Converter(intype=volClassif, outtype=aims.Volume('S32'))
volNegOutCortex = c(volClassif)
aims.write(volNegOutCortex, result_directory + 'negative_outside_cortex_S32_%s.nii' % (keyWord)) # OK
#AimsMerge -m sv \
#-i negative_outside_cortex_S32.nii \
#-M CSF_labelled_interface.nii \
#-o CSF_labelled_interface_negative_outside.nii
arrNegOutCortex = np.array(volNegOutCortex, copy = False)
volCSFLabelInt = aims.read(result_directory + 'CSF_labelled_interface_%s.nii' % (keyWord))
arrCSFLabelInt = np.array(volCSFLabelInt, copy = False)
arrNegOutCortex[arrCSFLabelInt != 0] = arrCSFLabelInt[arrCSFLabelInt != 0]
aims.write(volNegOutCortex, result_directory + 'CSF_labelled_interface_negative_outside_%s.nii' % (keyWord)) # OK
#AimsMerge -m ao -v 200000000 \
#-i CSF_labelled_interface_negative_outside.nii \
#-M white_labelled_interface.nii \
#-o propvol_CSF_labels.nii.gz
volWhiteLabInt = aims.read(result_directory + 'white_labelled_interface_%s.nii' % (keyWord))
arrWhiteLabInt = np.array(volWhiteLabInt, copy = False)
arrNegOutCortex[arrWhiteLabInt != 0] = 200000000
aims.write(volNegOutCortex, result_directory + 'propvol_CSF_labels_%s.nii.gz' % (keyWord)) # OK
#AimsMerge -m sv \
#-i negative_outside_cortex_S32.nii \
#-M white_labelled_interface.nii \
#-o white_labelled_interface_negative_outside.nii
volNegOutCortex = aims.read(result_directory + 'negative_outside_cortex_S32_%s.nii' % (keyWord))
arrNegOutCortex = np.array(volNegOutCortex, copy = False)
arrNegOutCortex[arrWhiteLabInt != 0] = arrWhiteLabInt[arrWhiteLabInt != 0]
aims.write(volNegOutCortex, result_directory + 'white_labelled_interface_negative_outside_%s.nii' % (keyWord)) # OK
#AimsMerge -m ao -v 100000000 \
#-i white_labelled_interface_negative_outside.nii \
#-M CSF_labelled_interface.nii \
#-o propvol_white_labels.nii.gz
arrNegOutCortex[np.where(arrCSFLabelInt != 0)] = 100000000
aims.write(volNegOutCortex, result_directory + 'propvol_white_labels_%s.nii.gz' % (keyWord)) # OK
subprocess.check_call(['time', 'ylPropagateAlongField', '--verbose', '--grad-field', heat_directory + 'heat_%s.nii.gz' % (keyWord), '--seeds', result_directory + 'propvol_CSF_labels_%s.nii.gz' % (keyWord), '--step', '-0.05', '--target-label', '200000000', '--output', result_directory + 'heat_CSF_labels_on_white_%s.nii.gz' % (keyWord)]) # OK
#ylPropagateAlongField --verbose \
#--grad-field ../heat/heat.nii.gz \
#--seeds propvol_CSF_labels.nii.gz \
#--step -0.05 \
#--target-label 200000000 \
#--output heat_CSF_labels_on_white.nii.gz
#time for the whole cortex 1:27.7
subprocess.check_call(['time', 'ylPropagateAlongField', '--verbose', '--grad-field', heat_directory + 'heat_%s.nii.gz' % (keyWord), '--seeds', result_directory + 'propvol_white_labels_%s.nii.gz' % (keyWord), '--step', '0.05', '--target-label', '100000000', '--output', result_directory + 'heat_white_labels_on_CSF_%s.nii.gz' % (keyWord)]) # OK
#ylPropagateAlongField --verbose \
#--grad-field ../heat/heat.nii.gz \
#--seeds propvol_white_labels.nii.gz \
#--step 0.05 \
#--target-label 100000000 \
#--output heat_white_labels_on_CSF.nii.gz
#time for the whole cortex 1:43.87
volCSF_labels_on_white = aims.read(result_directory + 'heat_CSF_labels_on_white_%s.nii.gz' % (keyWord))
volwhite_labels_on_CSF = aims.read(result_directory + 'heat_white_labels_on_CSF_%s.nii.gz' % (keyWord))
volClassif = aims.read(pathToClassifFile)
volExchangedPropVol = highres_cortex.od_get_exchanged_propvol.getExchangedPropagationVolume(volCSF_labels_on_white, volwhite_labels_on_CSF, volClassif, result_directory, keyWord)
aims.write(volExchangedPropVol, result_directory + "exchanged_propvol_%s.nii.gz" %(keyWord))
#python get_exchanged_propvol.py # -> exchanged_propvol.nii.gz
# Why is the previous step necessary?
#
# The obvious alternative is to do exactly as described in the OHBM paper: do
# the projections on the original labels of each voxel.
#
# The previous case aggregates the adjacent voxels of one interface that point
# towards the same voxel on the other interface. This reduces
# over-segmentation.
#
# Another way of reducing over-segmentation would be to aggregate together
# voxels that have one projection in common, instead of both (see conjunction
# step later on). But this introduces the problem of transitivity. This was
# investigated previously on the ferret data (under the name Billiard), but was
# considered a dead-end and the above solution seems to solve this problem most
# efficiently.
# There is a problem with the propagation of labels: the step size is fixed,
# which means that sometimes the point can skip the corner of a voxel, and thus
# go directly from a bulk voxel to an outside voxel. In this case it is
# recorded as a "dead-end" advection path, no resulting label is recorded and
# it appears as zero in the result.
#
# This problem also appears in the previous "exchange" step, but is mitigated
# by the subsequent connex component detection (each failed propagation is
# assigned a different label).
#
# Quick fix: fix the conjunction step to not aggregate zeros.
#
# TODO: the proper way to fix this would be to force the advection path to
# respect the boundaries of voxels, so that the corner of voxels cannot be
# skipped over. This would also prevent the advection path from crossing the
# thin CSF surface within the sulcus (comes from skeleton).
# I could take into account the fake cortex–CSF interface that exists at the
# cut plane, by assigning it a special label (e.g. 500000000) in the
# exchanged_propvol label. It would then need to be treated specially: any
# voxel that projects onto this label would be excluded from the region list,
# and thus would not take part in the merging step. This would prevent the
# creation of regions that connect to this spurious surface, but this would not
# prevent the nearby regions from being deformed by the perturbation of the
# field. It would thus probably be overkill to implement this special case.
# Care is needed when dealing with regions close to the cut plane anyway.
#AimsMerge -m oo -l 150 -v 0 \
#-i exchanged_propvol.nii.gz \
#-M ../classif_with_outer_boundaries.nii.gz \
#-o ./exchanged_labels_on_CSF.nii
arrExchangedPropVol = np.array(volExchangedPropVol, copy = False)
arrClassif = np.array(volClassif, copy = False)
arrExchangedPropVol[arrClassif == 150] = 0
aims.write(volExchangedPropVol, result_directory + 'exchanged_labels_on_CSF_%s.nii' %(keyWord)) # OK
#AimsMerge -m oo -l 50 -v 0 \
#-i ./exchanged_propvol.nii.gz \
#-M ../classif_with_outer_boundaries.nii.gz \
#-o ./exchanged_labels_on_white.nii
volExchangedPropVol = aims.read(result_directory + "exchanged_propvol_%s.nii.gz" %(keyWord))
arrExchangedPropVol = np.array(volExchangedPropVol, copy = False)
arrExchangedPropVol[arrClassif == 50] = 0
aims.write(volExchangedPropVol, result_directory + 'exchanged_labels_on_white_%s.nii' %(keyWord)) # OK
#ylPropagateAlongField --verbose \
#--grad-field ../heat/heat.nii.gz \
#--seeds exchanged_labels_on_CSF.nii \
#--step -0.05 \
#--target-label 0 \
#--output heat_CSF_on_bulk.nii.gz \
#--dest-points heat_CSF_points_on_bulk.nii.gz
subprocess.check_call(['time', 'ylPropagateAlongField', '--verbose', '--grad-field', heat_directory + 'heat_%s.nii.gz' % (keyWord), '--seeds',result_directory + 'exchanged_labels_on_CSF_%s.nii' %(keyWord), '--step', '-0.05', '--target-label', '0', '--output', result_directory + 'heat_CSF_on_bulk_%s.nii.gz' % (keyWord), '--dest-points', result_directory + 'heat_CSF_points_on_bulk_%s.nii.gz' % (keyWord)])
# time for the full cortex: 4:56.95
#ylPropagateAlongField --verbose \
#--grad-field ../heat/heat.nii.gz \
#--seeds exchanged_labels_on_white.nii \
#--step 0.05 \
#--target-label 0 \
#--output heat_white_on_bulk.nii.gz \
#--dest-points heat_white_points_on_bulk.nii.gz
subprocess.check_call(['time', 'ylPropagateAlongField', '--verbose', '--grad-field', heat_directory + 'heat_%s.nii.gz' % (keyWord), '--seeds',result_directory + 'exchanged_labels_on_white_%s.nii' %(keyWord), '--step', '0.05', '--target-label', '0', '--output', result_directory + 'heat_white_on_bulk_%s.nii.gz' % (keyWord), '--dest-points', result_directory + 'heat_white_points_on_bulk_%s.nii.gz' % (keyWord)])
# time for the full cortex: 5:59.33
#python relabel_conjunction.py # -> ./conjunction.nii.gz
vol1 = aims.read(result_directory + 'heat_CSF_on_bulk_%s.nii.gz' % (keyWord))
vol2 = aims.read(result_directory + 'heat_white_on_bulk_%s.nii.gz' % (keyWord))
volRelabeledConj = highres_cortex.od_relabel_conjunction.relabel_conjunctions(vol1, vol2)
aims.write(volRelabeledConj, result_directory + 'conjunction_%s.nii.gz' % (keyWord))
# Yann added to ensure cortical columns traverse the cortex:
#AimsConnectComp -c 26 \
#-i conjunction.nii.gz \
#-o conjunction_connected.nii.gz
subprocess.check_call(['AimsConnectComp', '-c', '26', '-i', result_directory + 'conjunction_%s.nii.gz' % (keyWord), '-o', result_directory + 'conjunction_connected_%s.nii.gz' % (keyWord)])
#ylMergeCortexColumnRegions --verbose 2 \
#-i conjunction.nii.gz \
#-o merged.nii \
#--proj-csf heat_CSF_points_on_bulk.nii.gz \
#--proj-white heat_white_points_on_bulk.nii.gz \
#--goal-diameter 1
# Yann changed!! to ensure cortical columns traverse the cortex and various diameters are allowed:
#ylMergeCortexColumnRegions --verbose 2 \
#-i conjunction_connected.nii.gz \
#-o merged.nii \
#--proj-csf heat_CSF_points_on_bulk.nii.gz \
#--proj-white heat_white_points_on_bulk.nii.gz \
#--classif ../classif.nii.gz \
#--goal-diameter 1
subprocess.check_call(['time', 'ylMergeCortexColumnRegions', '--verbose', '2', '-i', result_directory + 'conjunction_connected_%s.nii.gz' % (keyWord), '-o',result_directory + 'merged_%s.nii' %(keyWord), '--proj-csf', result_directory + 'heat_CSF_points_on_bulk_%s.nii.gz' % (keyWord), '--proj-white', result_directory + 'heat_white_points_on_bulk_%s.nii.gz' % (keyWord), '--classif', pathToClassifFileWithoutBorders, '--goal-diameter', '1'])
# time for the full cortex : 0:58.83
#python relabel.py
vol1 = aims.read(result_directory + 'merged_%s.nii' %(keyWord))
vol2 = highres_cortex.od_relabel.relabel(vol1)
aims.write(vol2, result_directory + 'merged_relabelled_%s.nii.gz' % (keyWord))
#python randomize_labels.py
vol1 = highres_cortex.od_randomize_labels.relabel(vol2)
aims.write(vol1, result_directory + 'merged_randomized_%s.nii.gz' %(keyWord))
print np.max(np.array(vol1)) # number of different columns 111067
## test for another diameter of cortical columns. E.g. of 3 mm, and 5 mm, and 9mm
#diams = [3, 5, 7, 9]
#diams = [9]
diams = [3, 5, 7, 9]
for diam in diams:
subprocess.check_call(['ylMergeCortexColumnRegions', '--verbose', '2', '-i', result_directory + 'conjunction_connected_%s.nii.gz' % (keyWord), '-o',result_directory + 'merged_%s_diam%s.nii' %(keyWord, diam), '--proj-csf', result_directory + 'heat_CSF_points_on_bulk_%s.nii.gz' % (keyWord), '--proj-white', result_directory + 'heat_white_points_on_bulk_%s.nii.gz' % (keyWord), '--classif', pathToClassifFileWithoutBorders, '--goal-diameter', str(diam)])
#python relabel.py
vol1 = aims.read(result_directory + 'merged_%s_diam%s.nii' %(keyWord, diam))
vol2 = highres_cortex.od_relabel.relabel(vol1)
aims.write(vol2, result_directory + 'merged_relabelled_%s_diam%s.nii.gz' % (keyWord, diam))
#python randomize_labels.py
vol1 = highres_cortex.od_randomize_labels.relabel(vol2)
aims.write(vol1, result_directory + 'merged_randomized_%s_diam%s.nii.gz' %(keyWord, diam))
print np.max(np.array(vol1)) # number of different columns
|
gpl-3.0
| -1,059,299,137,137,612,500
| 50.103261
| 461
| 0.718281
| false
| 2.953667
| false
| false
| false
|
demisto/content
|
Packs/ApiModules/Scripts/MicrosoftApiModule/MicrosoftApiModule.py
|
1
|
23628
|
import traceback
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import requests
import re
import base64
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from typing import Dict, Tuple, List, Optional
class Scopes:
graph = 'https://graph.microsoft.com/.default'
security_center = 'https://api.securitycenter.windows.com/.default'
# authorization types
OPROXY_AUTH_TYPE = 'oproxy'
SELF_DEPLOYED_AUTH_TYPE = 'self_deployed'
# grant types in self-deployed authorization
CLIENT_CREDENTIALS = 'client_credentials'
AUTHORIZATION_CODE = 'authorization_code'
REFRESH_TOKEN = 'refresh_token' # guardrails-disable-line
DEVICE_CODE = 'urn:ietf:params:oauth:grant-type:device_code'
REGEX_SEARCH_URL = r'(?P<url>https?://[^\s]+)'
SESSION_STATE = 'session_state'
class MicrosoftClient(BaseClient):
def __init__(self, tenant_id: str = '',
auth_id: str = '',
enc_key: str = '',
token_retrieval_url: str = 'https://login.microsoftonline.com/{tenant_id}/oauth2/v2.0/token',
app_name: str = '',
refresh_token: str = '',
auth_code: str = '',
scope: str = 'https://graph.microsoft.com/.default',
grant_type: str = CLIENT_CREDENTIALS,
redirect_uri: str = 'https://localhost/myapp',
resource: Optional[str] = '',
multi_resource: bool = False,
resources: List[str] = None,
verify: bool = True,
self_deployed: bool = False,
azure_ad_endpoint: str = 'https://login.microsoftonline.com',
*args, **kwargs):
"""
Microsoft Client class that implements logic to authenticate with oproxy or self deployed applications.
It also provides common logic to handle responses from Microsoft.
Args:
tenant_id: If self deployed it's the tenant for the app url, otherwise (oproxy) it's the token
auth_id: If self deployed it's the client id, otherwise (oproxy) it's the auth id and may also
contain the token url
enc_key: If self deployed it's the client secret, otherwise (oproxy) it's the encryption key
scope: The scope of the application (only if self deployed)
resource: The resource of the application (only if self deployed)
multi_resource: Where or not module uses a multiple resources (self-deployed, auth_code grant type only)
resources: Resources of the application (for multi-resource mode)
verify: Demisto insecure parameter
self_deployed: Indicates whether the integration mode is self deployed or oproxy
"""
super().__init__(verify=verify, *args, **kwargs) # type: ignore[misc]
if not self_deployed:
auth_id_and_token_retrieval_url = auth_id.split('@')
auth_id = auth_id_and_token_retrieval_url[0]
if len(auth_id_and_token_retrieval_url) != 2:
self.token_retrieval_url = 'https://oproxy.demisto.ninja/obtain-token' # guardrails-disable-line
else:
self.token_retrieval_url = auth_id_and_token_retrieval_url[1]
self.app_name = app_name
self.auth_id = auth_id
self.enc_key = enc_key
self.tenant_id = tenant_id
self.refresh_token = refresh_token
else:
self.token_retrieval_url = token_retrieval_url.format(tenant_id=tenant_id)
self.client_id = auth_id
self.client_secret = enc_key
self.tenant_id = tenant_id
self.auth_code = auth_code
self.grant_type = grant_type
self.resource = resource
self.scope = scope
self.redirect_uri = redirect_uri
self.auth_type = SELF_DEPLOYED_AUTH_TYPE if self_deployed else OPROXY_AUTH_TYPE
self.verify = verify
self.azure_ad_endpoint = azure_ad_endpoint
self.multi_resource = multi_resource
if self.multi_resource:
self.resources = resources if resources else []
self.resource_to_access_token: Dict[str, str] = {}
def http_request(
self, *args, resp_type='json', headers=None,
return_empty_response=False, scope: Optional[str] = None,
resource: str = '', **kwargs):
"""
Overrides Base client request function, retrieves and adds to headers access token before sending the request.
Args:
resp_type: Type of response to return. will be ignored if `return_empty_response` is True.
headers: Headers to add to the request.
return_empty_response: Return the response itself if the return_code is 206.
scope: A scope to request. Currently will work only with self-deployed app.
resource (str): The resource identifier for which the generated token will have access to.
Returns:
Response from api according to resp_type. The default is `json` (dict or list).
"""
if 'ok_codes' not in kwargs:
kwargs['ok_codes'] = (200, 201, 202, 204, 206, 404)
token = self.get_access_token(resource=resource, scope=scope)
default_headers = {
'Authorization': f'Bearer {token}',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
if headers:
default_headers.update(headers)
response = super()._http_request( # type: ignore[misc]
*args, resp_type="response", headers=default_headers, **kwargs)
# 206 indicates Partial Content, reason will be in the warning header.
# In that case, logs with the warning header will be written.
if response.status_code == 206:
demisto.debug(str(response.headers))
is_response_empty_and_successful = (response.status_code == 204)
if is_response_empty_and_successful and return_empty_response:
return response
# Handle 404 errors instead of raising them as exceptions:
if response.status_code == 404:
try:
error_message = response.json()
except Exception:
error_message = 'Not Found - 404 Response'
raise NotFoundError(error_message)
try:
if resp_type == 'json':
return response.json()
if resp_type == 'text':
return response.text
if resp_type == 'content':
return response.content
if resp_type == 'xml':
ET.parse(response.text)
return response
except ValueError as exception:
raise DemistoException('Failed to parse json object from response: {}'.format(response.content), exception)
def get_access_token(self, resource: str = '', scope: Optional[str] = None) -> str:
"""
Obtains access and refresh token from oproxy server or just a token from a self deployed app.
Access token is used and stored in the integration context
until expiration time. After expiration, new refresh token and access token are obtained and stored in the
integration context.
Args:
resource (str): The resource identifier for which the generated token will have access to.
scope (str): A scope to get instead of the default on the API.
Returns:
str: Access token that will be added to authorization header.
"""
integration_context = get_integration_context()
refresh_token = integration_context.get('current_refresh_token', '')
# Set keywords. Default without the scope prefix.
access_token_keyword = f'{scope}_access_token' if scope else 'access_token'
valid_until_keyword = f'{scope}_valid_until' if scope else 'valid_until'
if self.multi_resource:
access_token = integration_context.get(resource)
else:
access_token = integration_context.get(access_token_keyword)
valid_until = integration_context.get(valid_until_keyword)
if access_token and valid_until:
if self.epoch_seconds() < valid_until:
return access_token
auth_type = self.auth_type
if auth_type == OPROXY_AUTH_TYPE:
if self.multi_resource:
for resource_str in self.resources:
access_token, expires_in, refresh_token = self._oproxy_authorize(resource_str)
self.resource_to_access_token[resource_str] = access_token
self.refresh_token = refresh_token
else:
access_token, expires_in, refresh_token = self._oproxy_authorize(scope=scope)
else:
access_token, expires_in, refresh_token = self._get_self_deployed_token(
refresh_token, scope, integration_context)
time_now = self.epoch_seconds()
time_buffer = 5 # seconds by which to shorten the validity period
if expires_in - time_buffer > 0:
# err on the side of caution with a slightly shorter access token validity period
expires_in = expires_in - time_buffer
valid_until = time_now + expires_in
integration_context.update({
access_token_keyword: access_token,
valid_until_keyword: valid_until,
'current_refresh_token': refresh_token
})
# Add resource access token mapping
if self.multi_resource:
integration_context.update(self.resource_to_access_token)
set_integration_context(integration_context)
if self.multi_resource:
return self.resource_to_access_token[resource]
return access_token
def _oproxy_authorize(self, resource: str = '', scope: Optional[str] = None) -> Tuple[str, int, str]:
"""
Gets a token by authorizing with oproxy.
Args:
scope: A scope to add to the request. Do not use it.
resource: Resource to get.
Returns:
tuple: An access token, its expiry and refresh token.
"""
content = self.refresh_token or self.tenant_id
headers = self._add_info_headers()
oproxy_response = requests.post(
self.token_retrieval_url,
headers=headers,
json={
'app_name': self.app_name,
'registration_id': self.auth_id,
'encrypted_token': self.get_encrypted(content, self.enc_key),
'scope': scope
},
verify=self.verify
)
if not oproxy_response.ok:
msg = 'Error in authentication. Try checking the credentials you entered.'
try:
demisto.info('Authentication failure from server: {} {} {}'.format(
oproxy_response.status_code, oproxy_response.reason, oproxy_response.text))
err_response = oproxy_response.json()
server_msg = err_response.get('message')
if not server_msg:
title = err_response.get('title')
detail = err_response.get('detail')
if title:
server_msg = f'{title}. {detail}'
elif detail:
server_msg = detail
if server_msg:
msg += ' Server message: {}'.format(server_msg)
except Exception as ex:
demisto.error('Failed parsing error response - Exception: {}'.format(ex))
raise Exception(msg)
try:
gcloud_function_exec_id = oproxy_response.headers.get('Function-Execution-Id')
demisto.info(f'Google Cloud Function Execution ID: {gcloud_function_exec_id}')
parsed_response = oproxy_response.json()
except ValueError:
raise Exception(
'There was a problem in retrieving an updated access token.\n'
'The response from the Oproxy server did not contain the expected content.'
)
return (parsed_response.get('access_token', ''), parsed_response.get('expires_in', 3595),
parsed_response.get('refresh_token', ''))
def _get_self_deployed_token(self,
refresh_token: str = '',
scope: Optional[str] = None,
integration_context: Optional[dict] = None
) -> Tuple[str, int, str]:
if self.grant_type == AUTHORIZATION_CODE:
if not self.multi_resource:
return self._get_self_deployed_token_auth_code(refresh_token, scope=scope)
else:
expires_in = -1 # init variable as an int
for resource in self.resources:
access_token, expires_in, refresh_token = self._get_self_deployed_token_auth_code(refresh_token,
resource)
self.resource_to_access_token[resource] = access_token
return '', expires_in, refresh_token
elif self.grant_type == DEVICE_CODE:
return self._get_token_device_code(refresh_token, scope, integration_context)
else:
# by default, grant_type is CLIENT_CREDENTIALS
return self._get_self_deployed_token_client_credentials(scope=scope)
def _get_self_deployed_token_client_credentials(self, scope: Optional[str] = None) -> Tuple[str, int, str]:
"""
Gets a token by authorizing a self deployed Azure application in client credentials grant type.
Args:
scope; A scope to add to the headers. Else will get self.scope.
Returns:
tuple: An access token and its expiry.
"""
data = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': CLIENT_CREDENTIALS
}
# Set scope.
if self.scope or scope:
data['scope'] = scope if scope else self.scope
if self.resource:
data['resource'] = self.resource
response_json: dict = {}
try:
response = requests.post(self.token_retrieval_url, data, verify=self.verify)
if response.status_code not in {200, 201}:
return_error(f'Error in Microsoft authorization. Status: {response.status_code},'
f' body: {self.error_parser(response)}')
response_json = response.json()
except Exception as e:
return_error(f'Error in Microsoft authorization: {str(e)}')
access_token = response_json.get('access_token', '')
expires_in = int(response_json.get('expires_in', 3595))
return access_token, expires_in, ''
def _get_self_deployed_token_auth_code(
self, refresh_token: str = '', resource: str = '', scope: Optional[str] = None) -> Tuple[str, int, str]:
"""
Gets a token by authorizing a self deployed Azure application.
Returns:
tuple: An access token, its expiry and refresh token.
"""
data = assign_params(
client_id=self.client_id,
client_secret=self.client_secret,
resource=self.resource if not resource else resource,
redirect_uri=self.redirect_uri
)
if scope:
data['scope'] = scope
refresh_token = refresh_token or self._get_refresh_token_from_auth_code_param()
if refresh_token:
data['grant_type'] = REFRESH_TOKEN
data['refresh_token'] = refresh_token
else:
if SESSION_STATE in self.auth_code:
raise ValueError('Malformed auth_code parameter: Please copy the auth code from the redirected uri '
'without any additional info and without the "session_state" query parameter.')
data['grant_type'] = AUTHORIZATION_CODE
data['code'] = self.auth_code
response_json: dict = {}
try:
response = requests.post(self.token_retrieval_url, data, verify=self.verify)
if response.status_code not in {200, 201}:
return_error(f'Error in Microsoft authorization. Status: {response.status_code},'
f' body: {self.error_parser(response)}')
response_json = response.json()
except Exception as e:
return_error(f'Error in Microsoft authorization: {str(e)}')
access_token = response_json.get('access_token', '')
expires_in = int(response_json.get('expires_in', 3595))
refresh_token = response_json.get('refresh_token', '')
return access_token, expires_in, refresh_token
def _get_token_device_code(
self, refresh_token: str = '', scope: Optional[str] = None, integration_context: Optional[dict] = None
) -> Tuple[str, int, str]:
"""
Gets a token by authorizing a self deployed Azure application.
Returns:
tuple: An access token, its expiry and refresh token.
"""
data = {
'client_id': self.client_id,
'scope': scope
}
if refresh_token:
data['grant_type'] = REFRESH_TOKEN
data['refresh_token'] = refresh_token
else:
data['grant_type'] = DEVICE_CODE
if integration_context:
data['code'] = integration_context.get('device_code')
response_json: dict = {}
try:
response = requests.post(self.token_retrieval_url, data, verify=self.verify)
if response.status_code not in {200, 201}:
return_error(f'Error in Microsoft authorization. Status: {response.status_code},'
f' body: {self.error_parser(response)}')
response_json = response.json()
except Exception as e:
return_error(f'Error in Microsoft authorization: {str(e)}')
access_token = response_json.get('access_token', '')
expires_in = int(response_json.get('expires_in', 3595))
refresh_token = response_json.get('refresh_token', '')
return access_token, expires_in, refresh_token
def _get_refresh_token_from_auth_code_param(self) -> str:
refresh_prefix = "refresh_token:"
if self.auth_code.startswith(refresh_prefix): # for testing we allow setting the refresh token directly
demisto.debug("Using refresh token set as auth_code")
return self.auth_code[len(refresh_prefix):]
return ''
@staticmethod
def error_parser(error: requests.Response) -> str:
"""
Args:
error (requests.Response): response with error
Returns:
str: string of error
"""
try:
response = error.json()
demisto.error(str(response))
inner_error = response.get('error', {})
if isinstance(inner_error, dict):
err_str = f"{inner_error.get('code')}: {inner_error.get('message')}"
else:
err_str = inner_error
if err_str:
return err_str
# If no error message
raise ValueError
except ValueError:
return error.text
@staticmethod
def epoch_seconds(d: datetime = None) -> int:
"""
Return the number of seconds for given date. If no date, return current.
Args:
d (datetime): timestamp
Returns:
int: timestamp in epoch
"""
if not d:
d = MicrosoftClient._get_utcnow()
return int((d - MicrosoftClient._get_utcfromtimestamp(0)).total_seconds())
@staticmethod
def _get_utcnow() -> datetime:
return datetime.utcnow()
@staticmethod
def _get_utcfromtimestamp(_time) -> datetime:
return datetime.utcfromtimestamp(_time)
@staticmethod
def get_encrypted(content: str, key: str) -> str:
"""
Encrypts content with encryption key.
Args:
content: Content to encrypt
key: encryption key from oproxy
Returns:
timestamp: Encrypted content
"""
def create_nonce():
return os.urandom(12)
def encrypt(string, enc_key):
"""
Encrypts string input with encryption key.
Args:
string: String to encrypt
enc_key: Encryption key
Returns:
bytes: Encrypted value
"""
# String to bytes
try:
enc_key = base64.b64decode(enc_key)
except Exception as err:
return_error(f"Error in Microsoft authorization: {str(err)}"
f" Please check authentication related parameters.", error=traceback.format_exc())
# Create key
aes_gcm = AESGCM(enc_key)
# Create nonce
nonce = create_nonce()
# Create ciphered data
data = string.encode()
ct = aes_gcm.encrypt(nonce, data, None)
return base64.b64encode(nonce + ct)
now = MicrosoftClient.epoch_seconds()
encrypted = encrypt(f'{now}:{content}', key).decode('utf-8')
return encrypted
@staticmethod
def _add_info_headers() -> Dict[str, str]:
# pylint: disable=no-member
headers = {}
try:
headers = get_x_content_info_headers()
except Exception as e:
demisto.error('Failed getting integration info: {}'.format(str(e)))
return headers
def device_auth_request(self) -> dict:
response_json = {}
try:
response = requests.post(
url=f'{self.azure_ad_endpoint}/organizations/oauth2/v2.0/devicecode',
data={
'client_id': self.client_id,
'scope': self.scope
},
verify=self.verify
)
if not response.ok:
return_error(f'Error in Microsoft authorization. Status: {response.status_code},'
f' body: {self.error_parser(response)}')
response_json = response.json()
except Exception as e:
return_error(f'Error in Microsoft authorization: {str(e)}')
set_integration_context({'device_code': response_json.get('device_code')})
return response_json
def start_auth(self, complete_command: str) -> str:
response = self.device_auth_request()
message = response.get('message', '')
re_search = re.search(REGEX_SEARCH_URL, message)
url = re_search.group('url') if re_search else None
user_code = response.get('user_code')
return f"""### Authorization instructions
1. To sign in, use a web browser to open the page [{url}]({url})
and enter the code **{user_code}** to authenticate.
2. Run the **{complete_command}** command in the War Room."""
class NotFoundError(Exception):
"""Exception raised for 404 - Not Found errors.
Attributes:
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
|
mit
| 5,221,532,628,645,901,000
| 40.163763
| 119
| 0.577874
| false
| 4.367468
| false
| false
| false
|
kopringo/Scarky2
|
Scarky2/builder/models.py
|
1
|
3520
|
#-*- coding: utf-8 -*-
from django.db import models, IntegrityError
from django.contrib.auth.models import User
#from sphere_engine import ProblemsClientV3
from django.conf import settings
from django.utils import timezone
import json
import uuid
import code
from logging import Logger
logger = Logger(__file__)
# Create your models here.
class Language(models.Model):
label = models.CharField(max_length=32)
version = models.CharField(max_length=32)
remote_id = models.IntegerField()
visible = models.BooleanField(default=True)
def __unicode__(self):
return u'%s' % self.label
@staticmethod
def sync_languages():
#client = ProblemsClientV3(settings.SPHERE_ENGINE_TOKEN)
languages = client.problems.languages()
languages = json.loads(languages)
for language in languages:
l = Language()
l.label = language['name']
l.version = language['ver']
l.remote_id = language['id']
l.save()
PROBLEM_RANK = (
('bin-date', 'Binary by date'),
('bin-time', 'Binary by time'),
('bin-source', 'Binary by length of source code'),
)
class Problem(models.Model):
code = models.CharField(max_length=8, unique=True)
date = models.DateTimeField()
remote_code = models.CharField(max_length=32)
user = models.ForeignKey(User, blank=True, null=True)
secret = models.CharField(max_length=40)
saved = models.BooleanField(default=False)
name = models.CharField(max_length=128)
content = models.TextField()
input = models.FileField(upload_to='uploaded')
output = models.FileField(upload_to='uploaded')
rank = models.CharField(max_length=16, choices=PROBLEM_RANK)
languages = models.ManyToManyField('Language')
date_start = models.DateTimeField(blank=True, null=True)
date_stop = models.DateTimeField(blank=True, null=True)
website = models.URLField(blank=True)
resource = models.CharField(max_length=128, blank=True)
email = models.EmailField(blank=True)
stats_visits = models.IntegerField(default=0)
stats_submissions = models.IntegerField(default=0)
@staticmethod
def create_problem(user=None):
i = 0
while True:
code = str(uuid.uuid1())[0:8]
secret = str(uuid.uuid1())
try:
problem = Problem()
problem.code = code
problem.secret = secret
problem.date = timezone.now()
problem.user = user
problem.save()
return problem
except IntegrityError as e:
logger.exception(e)
i = i + 1
if i > 10:
raise Exception('create_problem exception')
def __unicode__(self):
return u'%s. %s' % (str(self.id), self.name)
class ProblemFile(models.Model):
name = models.CharField(max_length=128)
oname = models.CharField(max_length=128)
problem = models.ForeignKey('Problem')
class Submission(models.Model):
date = models.DateTimeField()
problem = models.ForeignKey(Problem)
language = models.ForeignKey('Language')
status = models.IntegerField(default=0)
time = models.FloatField(default=0.0)
mem = models.IntegerField(default=0)
remote_id = models.IntegerField(default=0)
def __unicode__(self):
return u'%s' % str(self.id)
#
|
mit
| 6,629,684,123,329,770,000
| 29.608696
| 64
| 0.618466
| false
| 4.0553
| false
| false
| false
|
krishnatray/data_science_project_portfolio
|
galvanize/TechnicalExcercise/q1.py
|
1
|
2695
|
# Q1 Technical Challenge
# Author: Sushil K Sharma
# -----------------------
"""
Problem Statement:
Create a text content analyzer. This is a tool used by writers to find statistics such as word and sentence count on essays or articles they are writing. Write a Python program that analyzes input from a file and compiles statistics on it.
The program should output: 1. The total word count 2. The count of unique words 3. The number of sentences
"""
# Assumptions:
#-------------
# 1. I have assumed that sentences are ended by period.
# 2. This program is case insensitive i.e. ignored the case for counting words.
def content_analyzer(input_text):
# assumptions: this program is case insensitive i.e. "Word", "WORD", "wOrd", etc. considerd same.
arr = input_text.lower().split()
lines=input_text.split(". ")
# dictionary to keep track of unique words
unique_words = {}
# Initialize Counters
word_count = 0; unique_word_count = 0; sentences_count =0; sentences_length_sum =0
for word in arr:
word_count +=1
if word in unique_words:
unique_words[word] += 1
else:
unique_words[word] = 1
unique_word_count += 1
for sentence in lines:
sentences_count += 1
sentences_length_sum += len(sentence)
avg_sentence_length=0
if sentences_count > 0:
avg_sentence_length = sentences_length_sum / sentences_count
# Print Results
print ("Results:")
print ("-------")
print ("Total word count:", word_count)
print ("Unique Words:", unique_word_count)
print ("Sentences:",sentences_count)
# Brownie points
# --------------
# 1. The ability to calculate the average sentence length in words
print ("Avg. Sentence Length:",sentences_count)
# 2. A list of words used, in order of descending frequency
print ("A list of words used, in order of descending frequency:")
print("--------------------------------------------------------")
unique_words_sorted = sorted(unique_words, key=unique_words.get, reverse=True)
for word in unique_words_sorted:
print(f"{word} {unique_words[word]}" )
# Brownie point
# 4 : The ability to accept input from STDIN, or from a file specified on the command line.
print("**************************")
print("**** Content Analyzer ****")
print("**************************\n")
input_text= input("Please enter a few sentences: ")
content_analyzer(input_text)
print("*************************************")
print("**** Completed: Content Analyzer ****")
print("*************************************")
|
mit
| -2,832,477,704,902,959,000
| 33.460526
| 239
| 0.589239
| false
| 4.064857
| false
| false
| false
|
maltsev/LatexWebOffice
|
app/views/auth.py
|
1
|
8497
|
# -*- coding: utf-8 -*-
"""
* Purpose : managing user account registration and login
* Creation Date : 22-10-2014
* Last Modified : Mo 02 Mär 2015 15:23:28 CET
* Author : maltsev
* Coauthors : mattis, christian
* Sprintnumber : 1
* Backlog entry : RUA1, RUA4
"""
import re
import urllib
import datetime
from django.shortcuts import redirect, render_to_response
from django.contrib import messages, auth
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from django.template import RequestContext
from django.core.mail import EmailMessage
from django.core.urlresolvers import reverse
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth import get_user_model
User = get_user_model()
from app.common.constants import ERROR_MESSAGES
import settings
from app.models.recoverkey import RecoverKey
# see
# https://docs.djangoproject.com/en/dev/topics/auth/default/#django.contrib.auth.login
## Default handler for login requests by the client that sends the client the login page.
# If correct login details were sent with the request (over POST data), the user will be redirected to a success page.
# Otherwise an error message will be inserted into the django messages queue.
# @param request The HttpRequest Object
def login(request):
if request.user.is_authenticated():
return redirect('projekt')
email = ''
if request.session.has_key('email'):
email=request.session.get('email')
del request.session['email']
if request.method == 'POST' and 'action' in request.POST and 'email' in request.POST:
email = request.POST['email']
if request.POST['action']=='login':
password = request.POST['password']
# Email is case-insensitive, but login is case-sensitive
user = auth.authenticate(username=email.lower(), password=password)
if user is not None:
if user.is_active:
auth.login(request, user)
return redirect('projekt')
else:
messages.error(request, ERROR_MESSAGES['INACTIVEACCOUNT'] % email)
else:
messages.error(request, ERROR_MESSAGES['WRONGLOGINCREDENTIALS'])
elif request.POST['action'] == 'password-lost':
try:
user = User.objects.get(email__iexact=email)
recoverKey = RecoverKey.objects.getByUser(user)
subject="Latexweboffice Passwortreset"
url = request.build_absolute_uri(reverse('recoverpw'))+'?'+urllib.urlencode({'email': email, 'key': recoverKey.key})
body=u"""
Hallo!
Jemand hat einen Link zur Passwortwiederherstellung angefordert: %s
Falls dies nicht von Ihnen angefordert wurde, ignorieren Sie bitte diese Email.
Mit freundlichen Grüßen,
Ihr LatexWebOfficeteam
"""
emailsend=EmailMessage(subject, body % url)
emailsend.to=[email]
emailsend.send()
except ObjectDoesNotExist:
pass
messages.success(request,ERROR_MESSAGES['EMAILPWRECOVERSEND']% email)
sso_url = ''
if 'SSO_URL' in dir(settings):
sso_url = settings.SSO_URL
params = {'email': email, 'IS_SSO_ENABLED': settings.IS_SSO_ENABLED, 'SSO_URL': sso_url}
return render_to_response('login.html', params, context_instance=RequestContext(request))
def lostPwHandler(request):
if request.method == 'GET' and 'email' in request.GET and 'key' in request.GET:
email = request.GET['email']
key = request.GET['key']
try:
user = User.objects.get(email__iexact=email)
if RecoverKey.objects.isValid(user, key):
return render_to_response('passwordrecover.html', {'email':email,'key':key}, context_instance=RequestContext(request))
except ObjectDoesNotExist:
pass
elif request.method == 'POST' and 'email' in request.POST and 'key' in request.POST and 'password1' in request.POST:
email=request.POST['email']
key=request.POST['key']
try:
user=User.objects.get(email__iexact=email)
if RecoverKey.objects.isValid(user, key):
user.set_password(request.POST['password1'])
RecoverKey.objects.get(key=key).delete()
user.save()
messages.success(request,ERROR_MESSAGES['PASSWORDCHANGED'])
request.session['email'] = email
return redirect('login')
except ObjectDoesNotExist:
pass
return render_to_response('passwordrecoverwrong.html',context_instance=RequestContext(request))
## Logout
# @param request The HttpRequest Object
@login_required
def logout(request):
auth.logout(request)
if 'SSO_LOGOUT_URL' in dir(settings) and request.build_absolute_uri().find('https://sso.') == 0:
return redirect(settings.SSO_LOGOUT_URL)
else:
return redirect('login')
## Default handler for registration requests by the client that sends the user the registration page.
# If correct registration details were sent with the request (over POST data), the user will be logged in
# and redirected to the start page
# Otherwise an error message will be inserted into the django messages queue.
# @param request The HttpRequest Object
def registration(request):
if request.user.is_authenticated():
return redirect('projekt')
email = ''
first_name = ''
if request.method == 'POST':
first_name = request.POST['first_name']
email = request.POST['email'].lower()
password1 = request.POST['password1']
password2 = request.POST['password2']
# boolean, true if there are errors in the user data
foundErrors = False
# validation checks
# no empty fields
if first_name == '' or email == '' or password1 == '':
messages.error(request, ERROR_MESSAGES['NOEMPTYFIELDS'])
foundErrors = True
# email already registered
if User.objects.filter(username__iexact=email).count() != 0:
messages.error(request, ERROR_MESSAGES['EMAILALREADYEXISTS'])
foundErrors = True
# no valid email format
if not validEmail(email):
messages.error(request, ERROR_MESSAGES['INVALIDEMAIL'])
foundErrors = True
# passwords may not contain any spaces
if ' ' in password1:
messages.error((request), ERROR_MESSAGES['NOSPACESINPASSWORDS'])
foundErrors = True
# passwords do not match
if password1 != password2:
messages.error(request, ERROR_MESSAGES['PASSWORDSDONTMATCH'])
foundErrors = True
# if all validation checks pass, create new user
if not foundErrors:
new_user = User.objects.create_user(email, email, password=password1)
new_user.first_name = first_name
new_user.save()
# user login and redirection to start page
user = auth.authenticate(username=email, password=password1)
if user is not None:
if user.is_active:
auth.login(request, user)
return redirect('projekt')
else:
messages.error(request, ERROR_MESSAGES['LOGINORREGFAILED'])
sso_url = ''
if 'SSO_URL' in dir(settings):
sso_url = settings.SSO_URL
return render_to_response('registration.html',
{'first_name': first_name, 'IS_SSO_ENABLED': settings.IS_SSO_ENABLED, 'SSO_URL': sso_url, 'email': email},
context_instance=RequestContext(request))
@csrf_exempt
#Überprüft, ob eine Emailadresse bereits registiert ist. Falls sie registiert ist, wird false zurückgesendet. Andernfalls true.
def userexists(request):
from django.http import HttpResponse
if request.method=='POST' and request.POST.get('email'):
if User.objects.filter(username=request.POST.get('email')).exists():
return HttpResponse("false")
return HttpResponse('true')
# Helper function to check if a email address is valid
def validEmail(email):
regex_email=re.compile("^[a-zA-Z0-9.!#$%&'*+\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$")
return regex_email.match(email)
|
gpl-3.0
| 1,439,265,107,998,456,800
| 35.286325
| 163
| 0.641032
| false
| 3.923752
| false
| false
| false
|
ContinuumIO/dask
|
dask/array/random.py
|
2
|
19970
|
import numbers
import warnings
from itertools import product
from numbers import Integral
from operator import getitem
import numpy as np
from .core import (
normalize_chunks,
Array,
slices_from_chunks,
asarray,
broadcast_shapes,
broadcast_to,
)
from .creation import arange
from ..base import tokenize
from ..highlevelgraph import HighLevelGraph
from ..utils import ignoring, random_state_data, derived_from, skip_doctest
def doc_wraps(func):
""" Copy docstring from one function to another """
warnings.warn(
"dask.array.random.doc_wraps is deprecated and will be removed in a future version",
FutureWarning,
)
def _(func2):
if func.__doc__ is not None:
func2.__doc__ = skip_doctest(func.__doc__)
return func2
return _
class RandomState(object):
"""
Mersenne Twister pseudo-random number generator
This object contains state to deterministically generate pseudo-random
numbers from a variety of probability distributions. It is identical to
``np.random.RandomState`` except that all functions also take a ``chunks=``
keyword argument.
Parameters
----------
seed: Number
Object to pass to RandomState to serve as deterministic seed
RandomState: Callable[seed] -> RandomState
A callable that, when provided with a ``seed`` keyword provides an
object that operates identically to ``np.random.RandomState`` (the
default). This might also be a function that returns a
``randomgen.RandomState``, ``mkl_random``, or
``cupy.random.RandomState`` object.
Examples
--------
>>> import dask.array as da
>>> state = da.random.RandomState(1234) # a seed
>>> x = state.normal(10, 0.1, size=3, chunks=(2,))
>>> x.compute()
array([10.01867852, 10.04812289, 9.89649746])
See Also
--------
np.random.RandomState
"""
def __init__(self, seed=None, RandomState=None):
self._numpy_state = np.random.RandomState(seed)
self._RandomState = RandomState
def seed(self, seed=None):
self._numpy_state.seed(seed)
def _wrap(
self, funcname, *args, size=None, chunks="auto", extra_chunks=(), **kwargs
):
""" Wrap numpy random function to produce dask.array random function
extra_chunks should be a chunks tuple to append to the end of chunks
"""
if size is not None and not isinstance(size, (tuple, list)):
size = (size,)
args_shapes = {ar.shape for ar in args if isinstance(ar, (Array, np.ndarray))}
args_shapes.union(
{ar.shape for ar in kwargs.values() if isinstance(ar, (Array, np.ndarray))}
)
shapes = list(args_shapes)
if size is not None:
shapes.extend([size])
# broadcast to the final size(shape)
size = broadcast_shapes(*shapes)
chunks = normalize_chunks(
chunks,
size, # ideally would use dtype here
dtype=kwargs.get("dtype", np.float64),
)
slices = slices_from_chunks(chunks)
def _broadcast_any(ar, shape, chunks):
if isinstance(ar, Array):
return broadcast_to(ar, shape).rechunk(chunks)
if isinstance(ar, np.ndarray):
return np.ascontiguousarray(np.broadcast_to(ar, shape))
# Broadcast all arguments, get tiny versions as well
# Start adding the relevant bits to the graph
dsk = {}
dsks = []
lookup = {}
small_args = []
dependencies = []
for i, ar in enumerate(args):
if isinstance(ar, (np.ndarray, Array)):
res = _broadcast_any(ar, size, chunks)
if isinstance(res, Array):
dependencies.append(res)
dsks.append(res.dask)
lookup[i] = res.name
elif isinstance(res, np.ndarray):
name = "array-{}".format(tokenize(res))
lookup[i] = name
dsk[name] = res
small_args.append(ar[tuple(0 for _ in ar.shape)])
else:
small_args.append(ar)
small_kwargs = {}
for key, ar in kwargs.items():
if isinstance(ar, (np.ndarray, Array)):
res = _broadcast_any(ar, size, chunks)
if isinstance(res, Array):
dependencies.append(res)
dsks.append(res.dask)
lookup[key] = res.name
elif isinstance(res, np.ndarray):
name = "array-{}".format(tokenize(res))
lookup[key] = name
dsk[name] = res
small_kwargs[key] = ar[tuple(0 for _ in ar.shape)]
else:
small_kwargs[key] = ar
sizes = list(product(*chunks))
seeds = random_state_data(len(sizes), self._numpy_state)
token = tokenize(seeds, size, chunks, args, kwargs)
name = "{0}-{1}".format(funcname, token)
keys = product(
[name], *([range(len(bd)) for bd in chunks] + [[0]] * len(extra_chunks))
)
blocks = product(*[range(len(bd)) for bd in chunks])
vals = []
for seed, size, slc, block in zip(seeds, sizes, slices, blocks):
arg = []
for i, ar in enumerate(args):
if i not in lookup:
arg.append(ar)
else:
if isinstance(ar, Array):
dependencies.append(ar)
arg.append((lookup[i],) + block)
else: # np.ndarray
arg.append((getitem, lookup[i], slc))
kwrg = {}
for k, ar in kwargs.items():
if k not in lookup:
kwrg[k] = ar
else:
if isinstance(ar, Array):
dependencies.append(ar)
kwrg[k] = (lookup[k],) + block
else: # np.ndarray
kwrg[k] = (getitem, lookup[k], slc)
vals.append(
(_apply_random, self._RandomState, funcname, seed, size, arg, kwrg)
)
meta = _apply_random(
self._RandomState,
funcname,
seed,
(0,) * len(size),
small_args,
small_kwargs,
)
dsk.update(dict(zip(keys, vals)))
graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)
return Array(graph, name, chunks + extra_chunks, meta=meta)
@derived_from(np.random.RandomState, skipblocks=1)
def beta(self, a, b, size=None, chunks="auto", **kwargs):
return self._wrap("beta", a, b, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def binomial(self, n, p, size=None, chunks="auto", **kwargs):
return self._wrap("binomial", n, p, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def chisquare(self, df, size=None, chunks="auto", **kwargs):
return self._wrap("chisquare", df, size=size, chunks=chunks, **kwargs)
with ignoring(AttributeError):
@derived_from(np.random.RandomState, skipblocks=1)
def choice(self, a, size=None, replace=True, p=None, chunks="auto"):
dependencies = []
# Normalize and validate `a`
if isinstance(a, Integral):
# On windows the output dtype differs if p is provided or
# absent, see https://github.com/numpy/numpy/issues/9867
dummy_p = np.array([1]) if p is not None else p
dtype = np.random.choice(1, size=(), p=dummy_p).dtype
len_a = a
if a < 0:
raise ValueError("a must be greater than 0")
else:
a = asarray(a)
a = a.rechunk(a.shape)
dtype = a.dtype
if a.ndim != 1:
raise ValueError("a must be one dimensional")
len_a = len(a)
dependencies.append(a)
a = a.__dask_keys__()[0]
# Normalize and validate `p`
if p is not None:
if not isinstance(p, Array):
# If p is not a dask array, first check the sum is close
# to 1 before converting.
p = np.asarray(p)
if not np.isclose(p.sum(), 1, rtol=1e-7, atol=0):
raise ValueError("probabilities do not sum to 1")
p = asarray(p)
else:
p = p.rechunk(p.shape)
if p.ndim != 1:
raise ValueError("p must be one dimensional")
if len(p) != len_a:
raise ValueError("a and p must have the same size")
dependencies.append(p)
p = p.__dask_keys__()[0]
if size is None:
size = ()
elif not isinstance(size, (tuple, list)):
size = (size,)
chunks = normalize_chunks(chunks, size, dtype=np.float64)
if not replace and len(chunks[0]) > 1:
err_msg = (
"replace=False is not currently supported for "
"dask.array.choice with multi-chunk output "
"arrays"
)
raise NotImplementedError(err_msg)
sizes = list(product(*chunks))
state_data = random_state_data(len(sizes), self._numpy_state)
name = "da.random.choice-%s" % tokenize(
state_data, size, chunks, a, replace, p
)
keys = product([name], *(range(len(bd)) for bd in chunks))
dsk = {
k: (_choice, state, a, size, replace, p)
for k, state, size in zip(keys, state_data, sizes)
}
graph = HighLevelGraph.from_collections(
name, dsk, dependencies=dependencies
)
return Array(graph, name, chunks, dtype=dtype)
# @derived_from(np.random.RandomState, skipblocks=1)
# def dirichlet(self, alpha, size=None, chunks="auto"):
@derived_from(np.random.RandomState, skipblocks=1)
def exponential(self, scale=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("exponential", scale, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def f(self, dfnum, dfden, size=None, chunks="auto", **kwargs):
return self._wrap("f", dfnum, dfden, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def gamma(self, shape, scale=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("gamma", shape, scale, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def geometric(self, p, size=None, chunks="auto", **kwargs):
return self._wrap("geometric", p, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def gumbel(self, loc=0.0, scale=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("gumbel", loc, scale, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def hypergeometric(self, ngood, nbad, nsample, size=None, chunks="auto", **kwargs):
return self._wrap(
"hypergeometric", ngood, nbad, nsample, size=size, chunks=chunks, **kwargs
)
@derived_from(np.random.RandomState, skipblocks=1)
def laplace(self, loc=0.0, scale=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("laplace", loc, scale, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def logistic(self, loc=0.0, scale=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("logistic", loc, scale, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def lognormal(self, mean=0.0, sigma=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("lognormal", mean, sigma, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def logseries(self, p, size=None, chunks="auto", **kwargs):
return self._wrap("logseries", p, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def multinomial(self, n, pvals, size=None, chunks="auto", **kwargs):
return self._wrap(
"multinomial",
n,
pvals,
size=size,
chunks=chunks,
extra_chunks=((len(pvals),),),
)
@derived_from(np.random.RandomState, skipblocks=1)
def negative_binomial(self, n, p, size=None, chunks="auto", **kwargs):
return self._wrap("negative_binomial", n, p, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def noncentral_chisquare(self, df, nonc, size=None, chunks="auto", **kwargs):
return self._wrap(
"noncentral_chisquare", df, nonc, size=size, chunks=chunks, **kwargs
)
@derived_from(np.random.RandomState, skipblocks=1)
def noncentral_f(self, dfnum, dfden, nonc, size=None, chunks="auto", **kwargs):
return self._wrap(
"noncentral_f", dfnum, dfden, nonc, size=size, chunks=chunks, **kwargs
)
@derived_from(np.random.RandomState, skipblocks=1)
def normal(self, loc=0.0, scale=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("normal", loc, scale, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def pareto(self, a, size=None, chunks="auto", **kwargs):
return self._wrap("pareto", a, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def permutation(self, x):
from .slicing import shuffle_slice
if isinstance(x, numbers.Number):
x = arange(x, chunks="auto")
index = np.arange(len(x))
self._numpy_state.shuffle(index)
return shuffle_slice(x, index)
@derived_from(np.random.RandomState, skipblocks=1)
def poisson(self, lam=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("poisson", lam, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def power(self, a, size=None, chunks="auto", **kwargs):
return self._wrap("power", a, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def randint(self, low, high=None, size=None, chunks="auto", dtype="l", **kwargs):
return self._wrap(
"randint", low, high, size=size, chunks=chunks, dtype=dtype, **kwargs
)
@derived_from(np.random.RandomState, skipblocks=1)
def random_integers(self, low, high=None, size=None, chunks="auto", **kwargs):
return self._wrap(
"random_integers", low, high, size=size, chunks=chunks, **kwargs
)
@derived_from(np.random.RandomState, skipblocks=1)
def random_sample(self, size=None, chunks="auto", **kwargs):
return self._wrap("random_sample", size=size, chunks=chunks, **kwargs)
random = random_sample
@derived_from(np.random.RandomState, skipblocks=1)
def rayleigh(self, scale=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("rayleigh", scale, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def standard_cauchy(self, size=None, chunks="auto", **kwargs):
return self._wrap("standard_cauchy", size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def standard_exponential(self, size=None, chunks="auto", **kwargs):
return self._wrap("standard_exponential", size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def standard_gamma(self, shape, size=None, chunks="auto", **kwargs):
return self._wrap("standard_gamma", shape, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def standard_normal(self, size=None, chunks="auto", **kwargs):
return self._wrap("standard_normal", size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def standard_t(self, df, size=None, chunks="auto", **kwargs):
return self._wrap("standard_t", df, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def tomaxint(self, size=None, chunks="auto", **kwargs):
return self._wrap("tomaxint", size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def triangular(self, left, mode, right, size=None, chunks="auto", **kwargs):
return self._wrap(
"triangular", left, mode, right, size=size, chunks=chunks, **kwargs
)
@derived_from(np.random.RandomState, skipblocks=1)
def uniform(self, low=0.0, high=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("uniform", low, high, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def vonmises(self, mu, kappa, size=None, chunks="auto", **kwargs):
return self._wrap("vonmises", mu, kappa, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def wald(self, mean, scale, size=None, chunks="auto", **kwargs):
return self._wrap("wald", mean, scale, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def weibull(self, a, size=None, chunks="auto", **kwargs):
return self._wrap("weibull", a, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def zipf(self, a, size=None, chunks="auto", **kwargs):
return self._wrap("zipf", a, size=size, chunks=chunks, **kwargs)
def _choice(state_data, a, size, replace, p):
state = np.random.RandomState(state_data)
return state.choice(a, size=size, replace=replace, p=p)
def _apply_random(RandomState, funcname, state_data, size, args, kwargs):
"""Apply RandomState method with seed"""
if RandomState is None:
RandomState = np.random.RandomState
state = RandomState(state_data)
func = getattr(state, funcname)
return func(*args, size=size, **kwargs)
_state = RandomState()
seed = _state.seed
beta = _state.beta
binomial = _state.binomial
chisquare = _state.chisquare
if hasattr(_state, "choice"):
choice = _state.choice
exponential = _state.exponential
f = _state.f
gamma = _state.gamma
geometric = _state.geometric
gumbel = _state.gumbel
hypergeometric = _state.hypergeometric
laplace = _state.laplace
logistic = _state.logistic
lognormal = _state.lognormal
logseries = _state.logseries
multinomial = _state.multinomial
negative_binomial = _state.negative_binomial
noncentral_chisquare = _state.noncentral_chisquare
noncentral_f = _state.noncentral_f
normal = _state.normal
pareto = _state.pareto
permutation = _state.permutation
poisson = _state.poisson
power = _state.power
rayleigh = _state.rayleigh
random_sample = _state.random_sample
random = random_sample
randint = _state.randint
random_integers = _state.random_integers
triangular = _state.triangular
uniform = _state.uniform
vonmises = _state.vonmises
wald = _state.wald
weibull = _state.weibull
zipf = _state.zipf
"""
Standard distributions
"""
standard_cauchy = _state.standard_cauchy
standard_exponential = _state.standard_exponential
standard_gamma = _state.standard_gamma
standard_normal = _state.standard_normal
standard_t = _state.standard_t
|
bsd-3-clause
| 2,665,391,172,766,181,000
| 37.330134
| 92
| 0.595143
| false
| 3.77077
| false
| false
| false
|
odahoda/noisicaa
|
noisicaa/builtin_nodes/sample_track/node_description.py
|
1
|
1608
|
#!/usr/bin/python3
# @begin:license
#
# Copyright (c) 2015-2019, Benjamin Niemann <pink@odahoda.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @end:license
from noisicaa import node_db
SampleTrackDescription = node_db.NodeDescription(
uri='builtin://sample-track',
display_name='Sample Track',
type=node_db.NodeDescription.PROCESSOR,
node_ui=node_db.NodeUIDescription(
type='builtin://sample-track',
),
processor=node_db.ProcessorDescription(
type='builtin://sample-script',
),
builtin_icon='track-type-sample',
ports=[
node_db.PortDescription(
name='out:left',
direction=node_db.PortDescription.OUTPUT,
types=[node_db.PortDescription.AUDIO],
),
node_db.PortDescription(
name='out:right',
direction=node_db.PortDescription.OUTPUT,
types=[node_db.PortDescription.AUDIO],
),
]
)
|
gpl-2.0
| 120,325,731,447,951,020
| 31.816327
| 73
| 0.688433
| false
| 3.893462
| false
| false
| false
|
SqueezeStudioAnimation/omtk
|
python/omtk/modules/rigLimb.py
|
1
|
14788
|
import pymel.core as pymel
import collections
from omtk import constants
from omtk.core.classModule import Module
from omtk.core.classCtrl import BaseCtrl
from omtk.core.utils import decorator_uiexpose
from omtk.modules import rigIK
from omtk.modules import rigFK
from omtk.modules import rigTwistbone
from omtk.libs import libRigging
from omtk.libs import libCtrlShapes
from omtk.libs import libAttr
from omtk.libs import libPython
class BaseAttHolder(BaseCtrl):
def __createNode__(self, size=None, refs=None, **kwargs):
# Resolve size automatically if refs are provided.
ref = next(iter(refs), None) if isinstance(refs, collections.Iterable) else refs
if size is None and ref is not None:
size = libRigging.get_recommended_ctrl_size(ref)
else:
size = 1.0
node = libCtrlShapes.create_shape_attrholder(size=size, **kwargs)
# Hide default keyable attributes
node.t.set(channelBox=False)
node.r.set(channelBox=False)
node.s.set(channelBox=False)
return node
class CtrlElbow(BaseCtrl):
def __createNode__(self, size=None, refs=None, *args, **kwargs):
# Resolve size automatically if refs are provided
ref = next(iter(refs), None) if isinstance(refs, collections.Iterable) else refs
if size is None and ref is not None:
size = libRigging.get_recommended_ctrl_size(ref) * 1.25
else:
size = 1.0
return libCtrlShapes.create_shape_cross(size=size, **kwargs)
class Limb(Module):
"""
Generic IK/FK setup. Twistbones are included.
"""
kAttrName_State = 'fkIk' # The name of the IK/FK attribute
_CLASS_SYS_IK = rigIK.IK
_CLASS_SYS_FK = rigFK.FK
_CLASS_CTRL_ATTR = BaseAttHolder
_CLASS_CTRL_ELBOW = CtrlElbow
_CLASS_SYS_TWIST = rigTwistbone.Twistbone
def __init__(self, *args, **kwargs):
super(Limb, self).__init__(*args, **kwargs)
self.sysIK = None
self.sysFK = None
self.sys_twist = []
self.create_twist = True
self.ctrl_elbow = None
self.attState = None
self.offset_ctrl_ik = None
self.ctrl_attrs = None
self.STATE_IK = 1.0
self.STATE_FK = 0.0
def build(self, *args, **kwargs):
super(Limb, self).build(*args, **kwargs)
nomenclature_anm = self.get_nomenclature_anm()
nomenclature_rig = self.get_nomenclature_rig()
# Resolve IK system name
# Create IK system
self.sysIK = self.init_module(
self._CLASS_SYS_IK,
self.sysIK,
inputs=self.chain_jnt,
suffix='ik',
)
self.sysIK.build(constraint=False, **kwargs)
# Create FK system
self.sysFK = self.init_module(
self._CLASS_SYS_FK,
self.sysFK,
inputs=self.chain_jnt,
suffix='fk',
)
# We want to keep the name of the input on the fk
self.sysFK._FORCE_INPUT_NAME = True
self.sysFK.build(constraint=False, **kwargs)
# Create twistbone system if needed
if self.create_twist:
num_twist_sys = self.sysIK.iCtrlIndex
# Ensure the twistbone list have the proper size
libPython.resize_list(self.sys_twist, num_twist_sys)
# If the IK system is a quad, we need to have two twist system
for i, sys_twist in enumerate(self.sys_twist):
# Resolve module name
# todo: validate name
twist_nomenclature = self.get_nomenclature().copy()
twist_nomenclature.add_tokens('bend')
twist_nomenclature += self.rig.nomenclature(self.chain_jnt[i].stripNamespace().nodeName())
# twist_nomenclature = self.get_nomenclature() + self.rig.nomenclature(self.chain_jnt[i].name())
sys_twist = self.init_module(
self._CLASS_SYS_TWIST,
sys_twist,
inputs=self.chain_jnt[i:(i + 2)],
# suffix='bend'
)
self.sys_twist[i] = sys_twist
sys_twist.name = twist_nomenclature.resolve()
sys_twist.build(num_twist=3, create_bend=True, **kwargs)
# Lock X and Y axis on the elbow/knee ctrl
if self.rig.DEFAULT_UPP_AXIS == constants.Axis.y:
libAttr.lock_hide_rotation(self.sysFK.ctrls[1], z=False)
elif self.rig.DEFAULT_UPP_AXIS == constants.Axis.z:
libAttr.lock_hide_rotation(self.sysFK.ctrls[1], y=False)
# Store the offset between the ik ctrl and it's joint equivalent.
# Useful when they don't match for example on a leg setup.
self.offset_ctrl_ik = self.sysIK.ctrl_ik.getMatrix(worldSpace=True) * self.chain_jnt[self.iCtrlIndex].getMatrix(
worldSpace=True).inverse()
# Add attributes to the attribute holder.
# Add ikFk state attribute on the grp_rig.
# This is currently controlled by self.ctrl_attrs.
pymel.addAttr(self.grp_rig, longName=self.kAttrName_State, hasMinValue=True, hasMaxValue=True, minValue=0,
maxValue=1, defaultValue=1, k=True)
attr_ik_weight = self.grp_rig.attr(self.kAttrName_State)
attr_fk_weight = libRigging.create_utility_node('reverse', inputX=attr_ik_weight).outputX
# Create attribute holder (this is where the IK/FK attribute will be stored)
# Note that this is production specific and should be defined in a sub-class implementation.
jnt_hand = self.chain_jnt[self.sysIK.iCtrlIndex]
ctrl_attrs_name = nomenclature_anm.resolve('atts')
if not isinstance(self.ctrl_attrs, self._CLASS_CTRL_ATTR):
self.ctrl_attrs = self._CLASS_CTRL_ATTR()
self.ctrl_attrs.build(name=ctrl_attrs_name, refs=jnt_hand)
self.ctrl_attrs.setParent(self.grp_anm)
pymel.parentConstraint(jnt_hand, self.ctrl_attrs.offset)
pymel.addAttr(self.ctrl_attrs, longName=self.kAttrName_State, hasMinValue=True, hasMaxValue=True, minValue=0,
maxValue=1, defaultValue=1, k=True)
pymel.connectAttr(self.ctrl_attrs.attr(self.kAttrName_State), self.grp_rig.attr(self.kAttrName_State))
# Create a chain for blending ikChain and fkChain
chain_blend = pymel.duplicate(list(self.chain_jnt), renameChildren=True, parentOnly=True)
for input_, node in zip(self.chain_jnt, chain_blend):
blend_nomenclature = nomenclature_rig.rebuild(input_.stripNamespace().nodeName())
node.rename(blend_nomenclature.resolve('blend'))
# Blend ikChain with fkChain
constraint_ik_chain = self.sysIK._chain_ik
if getattr(self.sysIK, '_chain_quad_ik', None):
constraint_ik_chain = self.sysIK._chain_quad_ik
# Note: We need to set the parent of the chain_blend BEFORE creating the constraint.
# Otherwise we might expose oneself to evaluation issues (happened on maya 2018.2).
# The symptom is the chain_blend rotation being aligned to the world and the rig being build on top.
# At first the scene would seem ok, however doing a dgdirty or reloading the scene would introduce flipping.
chain_blend[0].setParent(self.grp_rig)
for blend, oIk, oFk in zip(chain_blend, constraint_ik_chain, self.sysFK.ctrls):
# Note that maintainOffset should not be necessary, however the rigLegQuad IK can be flipped in some
# rare cases. For now since prod need it we'll activate the flag (see Task #70938), however it would
# be appreciated if the ugliness of the rigLegQuad module don't bleed into the rigLimb module.
constraint = pymel.parentConstraint(oIk, oFk, blend, maintainOffset=True)
attr_weight_ik, attr_weight_fk = constraint.getWeightAliasList()
pymel.connectAttr(attr_ik_weight, attr_weight_ik)
pymel.connectAttr(attr_fk_weight, attr_weight_fk)
#
# Create elbow chain
# This provide the elbow ctrl, an animator friendly way of cheating the elbow on top of the blend chain.
#
# Create a chain that provide the elbow controller and override the blend chain
# (witch should only be nodes already)
chain_elbow = pymel.duplicate(self.chain_jnt[:self.sysIK.iCtrlIndex + 1], renameChildren=True, parentOnly=True)
for input_, node in zip(self.chain_jnt, chain_elbow):
nomenclature_elbow = nomenclature_rig.rebuild(input_.stripNamespace().nodeName())
node.rename(nomenclature_elbow.resolve('elbow')) # todo: find a better name???
chain_elbow[0].setParent(self.grp_rig)
# Create elbow ctrl
# Note that this only affect the chain until @iCtrlIndex
for i in range(1, self.sysIK.iCtrlIndex):
ctrl_elbow_name = nomenclature_anm.resolve('elbow{:02}'.format(i))
ctrl_elbow_parent = chain_blend[i]
if not isinstance(self.ctrl_elbow, self._CLASS_CTRL_ELBOW):
self.ctrl_elbow = self._CLASS_CTRL_ELBOW(create_offset=True)
ctrl_elbow_ref = self.chain_jnt[i] # jnt_elbow
self.ctrl_elbow.build(refs=ctrl_elbow_ref)
self.ctrl_elbow.rename(ctrl_elbow_name)
self.ctrl_elbow.setParent(self.grp_anm)
pymel.parentConstraint(ctrl_elbow_parent, self.ctrl_elbow.offset, maintainOffset=False)
pymel.pointConstraint(chain_blend[0], chain_elbow[0], maintainOffset=False)
pymel.aimConstraint(self.ctrl_elbow, chain_elbow[i - 1], worldUpType=2,
worldUpObject=chain_blend[i - 1]) # Object Rotation Up
pymel.aimConstraint(chain_blend[i + 1], chain_elbow[i], worldUpType=2,
worldUpObject=chain_blend[i]) # Object Rotation Up
pymel.pointConstraint(self.ctrl_elbow, chain_elbow[i], maintainOffset=False)
# Constraint the last elbow joint on the blend joint at the ctrl index
pymel.parentConstraint(chain_blend[self.sysIK.iCtrlIndex], chain_elbow[self.sysIK.iCtrlIndex])
# Constraint input chain
# Note that we only constraint to the elbow chain until @iCtrlIndex.
# Afterward we constraint to the blend chain.
for i in range(self.sysIK.iCtrlIndex):
inn = self.chain_jnt[i]
ref = chain_elbow[i]
pymel.parentConstraint(ref, inn, maintainOffset=True) # todo: set to maintainOffset=False?
for i in range(self.sysIK.iCtrlIndex, len(self.chain_jnt)):
inn = self.chain_jnt[i]
ref = chain_blend[i]
pymel.parentConstraint(ref, inn, maintainOffset=True) # todo: set to maintainOffset=False?
# Connect visibility
pymel.connectAttr(attr_ik_weight, self.sysIK.grp_anm.visibility)
pymel.connectAttr(attr_fk_weight, self.sysFK.grp_anm.visibility)
# Connect globalScale
pymel.connectAttr(self.grp_rig.globalScale, self.sysIK.grp_rig.globalScale, force=True)
self.globalScale = self.grp_rig.globalScale # Expose the attribute, the rig will reconise it.
# Parent sub-modules so they are affected by displayLayer assignment and such.
self.sysIK.grp_anm.setParent(self.grp_anm)
self.sysIK.grp_rig.setParent(self.grp_rig)
self.sysFK.grp_anm.setParent(self.grp_anm)
# Patch in case twist network exist, but twist are set to false
if self.create_twist:
for sys_twist in self.sys_twist:
if sys_twist.create_bend:
sys_twist.grp_anm.setParent(self.grp_anm)
sys_twist.grp_rig.setParent(self.grp_rig)
self.attState = attr_ik_weight # Expose state
def unbuild(self):
for twist_sys in self.sys_twist:
twist_sys.unbuild()
if self.sysIK and self.sysIK.is_built():
self.sysIK.unbuild()
if self.sysFK and self.sysFK.is_built():
self.sysFK.unbuild()
super(Limb, self).unbuild()
self.attState = None
def parent_to(self, parent):
# Do nothing as everything is handled by the sysIK and sysFK modules.
pass
#
# Functions called for IK/FK switch (animation tools)
#
def snap_ik_to_fk(self):
# Position ikCtrl
ctrl_ik_tm = self.chain_jnt[self.sysIK.iCtrlIndex].getMatrix(worldSpace=True)
self.sysIK.ctrl_ik.node.setMatrix(self.offset_ctrl_ik * ctrl_ik_tm, worldSpace=True)
# Position swivel
# pos_ref = self.sysFK.ctrls[self.sysIK.iCtrlIndex - 1].getTranslation(space='world')
pos_s = self.sysFK.ctrls[0].getTranslation(space='world')
pos_m = self.sysFK.ctrls[self.sysIK.iCtrlIndex - 1].getTranslation(space='world')
pos_e = self.sysFK.ctrls[self.sysIK.iCtrlIndex].getTranslation(space='world')
length_start = pos_m.distanceTo(pos_s)
length_end = pos_m.distanceTo(pos_e)
length_ratio = length_start / (length_start + length_end)
pos_middle = (pos_e - pos_s) * length_ratio + pos_s
dir_swivel = pos_m - pos_middle
dir_swivel.normalize()
pos_swivel = (dir_swivel * self.sysIK.swivelDistance) + pos_middle
self.sysIK.ctrl_swivel.node.setTranslation(pos_swivel, space='world')
def snap_fk_to_ik(self):
for ctrl, jnt in zip(self.sysFK.ctrls, self.chain_jnt):
ctrl.node.setMatrix(jnt.getMatrix(worldSpace=True), worldSpace=True)
def switch_to_ik(self):
self.snap_ik_to_fk()
attr_state = libAttr.get_settable_attr(self.attState)
if attr_state:
attr_state.set(self.STATE_IK)
def switch_to_fk(self):
self.snap_fk_to_ik()
attr_state = libAttr.get_settable_attr(self.attState)
if attr_state:
attr_state.set(self.STATE_FK)
def iter_ctrls(self):
for ctrl in super(Limb, self).iter_ctrls():
yield ctrl
if self.sysIK:
for ctrl in self.sysIK.iter_ctrls():
yield ctrl
if self.sysFK:
for ctrl in self.sysFK.iter_ctrls():
yield ctrl
yield self.ctrl_attrs
yield self.ctrl_elbow
@decorator_uiexpose()
def assign_twist_weights(self):
for module in self.sys_twist:
if module.__class__.__name__ == self._CLASS_SYS_TWIST.__name__:
module.assign_twist_weights()
@decorator_uiexpose()
def unassign_twist_weights(self):
for module in self.sys_twist:
if module.__class__.__name__ == self._CLASS_SYS_TWIST.__name__:
module.unassign_twist_weights()
def register_plugin():
return Limb
|
mit
| -8,440,778,741,539,320,000
| 43.275449
| 120
| 0.632675
| false
| 3.383208
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.