content stringlengths 5 1.05M |
|---|
# -*- coding: utf-8 -*-
#
# packaging.py
#
# Copyright (C) 2012, 2013 Steve Canny scanny@cisco.com
#
# This module is part of python-pptx and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
'''
The :mod:`pptx.packaging` module coheres around the concerns of reading and
writing presentations to and from a .pptx file. In doing so, it hides the
complexities of the package "directory" structure, reading and writing parts
to and from the package, zip file manipulation, and traversing relationship
items.
The main API class is :class:`pptx.packaging.Package` which provides the
methods :meth:`open`, :meth:`marshal`, and :meth:`save`.
'''
import os
import posixpath
import re
from StringIO import StringIO
from lxml import etree
from zipfile import ZipFile, is_zipfile, ZIP_DEFLATED
import pptx.spec
from pptx.exceptions import (
CorruptedPackageError, DuplicateKeyError, NotXMLError,
PackageNotFoundError)
from pptx.spec import qtag
from pptx.spec import PTS_HASRELS_NEVER, PTS_HASRELS_OPTIONAL
# import logging
# log = logging.getLogger('pptx.packaging')
PKG_BASE_URI = '/'
# ============================================================================
# API Classes
# ============================================================================
class Package(object):
"""
Return a new package instance. Package is initially empty, call
:meth:`open` to open an on-disk package or ``marshal()`` followed by
``save()`` to save an in-memory Office document.
"""
PKG_RELSITEM_URI = '/_rels/.rels'
def __init__(self):
super(Package, self).__init__()
self.__relationships = []
@property
def parts(self):
"""
Return a list of :class:`pptx.packaging.Part` corresponding to the
parts in this package.
"""
return [part for part in self.__walkparts(self.relationships)]
@property
def relationships(self):
"""
A tuple of :class:`pptx.packaging.Relationship` containing the package
relationships for this package. Note these are not all the
relationships in the package, just those from the package to top-level
parts such as ``/ppt/presentation.xml`` and ``/docProps/core.xml``.
These are useful primarily as the starting point to walk the part
graph via its relationships.
"""
return tuple(self.__relationships)
def open(self, file):
"""
Load the package contained in *file*, where *file* can be a path to a
file or directory (a string), or a file-like object. If *file* is a
path to a directory, the directory must contain an expanded package
such as is produced by unzipping an OPC package file.
"""
fs = FileSystem(file)
cti = _ContentTypesItem().load(fs)
self.__relationships = [] # discard any rels from prior load
parts_dict = {} # track loaded parts, graph is cyclic
pkg_rel_elms = fs.getelement(Package.PKG_RELSITEM_URI)\
.findall(qtag('pr:Relationship'))
for rel_elm in pkg_rel_elms:
rId = rel_elm.get('Id')
reltype = rel_elm.get('Type')
partname = '/%s' % rel_elm.get('Target')
part = Part()
parts_dict[partname] = part
part._load(fs, partname, cti, parts_dict)
rel = Relationship(rId, self, reltype, part)
self.__relationships.append(rel)
fs.close()
return self
def marshal(self, model_pkg):
"""
Load the contents of a model-side package such that it can be saved to
a package file.
"""
part_dict = {} # keep track of marshaled parts, graph is cyclic
for rel in model_pkg._relationships:
# unpack working values for target part and relationship
rId = rel._rId
reltype = rel._reltype
model_part = rel._target
partname = model_part.partname
# create package-part for target
part = Part()
part_dict[partname] = part
part._marshal(model_part, part_dict)
# create marshaled version of relationship
marshaled_rel = Relationship(rId, self, reltype, part)
self.__relationships.append(marshaled_rel)
return self
def save(self, file):
"""
Save this package to *file*, where *file* can be either a path to a
file (a string) or a file-like object.
"""
# open a zip filesystem for writing package
zipfs = ZipFileSystem(file, 'w')
# write [Content_Types].xml
cti = _ContentTypesItem().compose(self.parts)
zipfs.write_element(cti.element, '/[Content_Types].xml')
# write pkg rels item
zipfs.write_element(self.__relsitem_element, self.PKG_RELSITEM_URI)
for part in self.parts:
# write part item
zipfs.write_blob(part.blob, part.partname)
# write rels item if part has one
if part.relationships:
zipfs.write_element(part._relsitem_element, part._relsitemURI)
zipfs.close()
@property
def __relsitem_element(self):
nsmap = {None: pptx.spec.nsmap['pr']}
element = etree.Element(qtag('pr:Relationships'), nsmap=nsmap)
for rel in self.__relationships:
element.append(rel._element)
return element
@classmethod
def __walkparts(cls, rels, parts=None):
"""
Recursive generator method, walk relationships to iterate over all
parts in this package. Leave out *parts* parameter in call to visit
all parts.
"""
# initial call can leave out parts parameter as a signal to initialize
if parts is None:
parts = []
for rel in rels:
part = rel.target
if part in parts: # only visit each part once (graph is cyclic)
continue
parts.append(part)
yield part
for part in cls.__walkparts(part.relationships, parts):
yield part
class Part(object):
"""
Part instances are not intended to be constructed externally.
:class:`pptx.packaging.Part` instances are constructed and initialized
internally to the :meth:`Package.open` or :meth:`Package.marshal` methods.
The following |Part| instance attributes can be accessed once the part has
been loaded as part of a package:
.. attribute:: typespec
An instance of |PartTypeSpec| appropriate to the type of this part. The
|PartTypeSpec| instance provides attributes such as *content_type*,
*baseURI*, etc. That are useful in several contexts.
.. attribute:: blob
The binary contents of this part contained in a byte string. For XML
parts, this is simply the XML text. For binary parts such as an image,
this is the string of bytes corresponding exactly to the bytes on disk
for the binary object.
"""
def __init__(self):
super(Part, self).__init__()
self.__partname = None
self.__relationships = []
self.typespec = None
self.blob = None
@property
def content_type(self):
"""Content type of this part"""
assert self.typespec, 'Part.content_type called before typespec set'
return self.typespec.content_type
@property
def partname(self):
"""
Package item URI for this part, commonly known as its part name,
e.g. ``/ppt/slides/slide1.xml``
"""
return self.__partname
@property
def relationships(self):
"""
Tuple of |Relationship| instances, each representing a relationship
from this part to another part.
"""
return tuple(self.__relationships)
def _load(self, fs, partname, ct_dict, parts_dict):
"""
Load part identified as *partname* from filesystem *fs* and propagate
the load to related parts.
"""
# calculate working values
baseURI = os.path.split(partname)[0]
content_type = ct_dict[partname]
# set persisted attributes
self.__partname = partname
self.blob = fs.getblob(partname)
self.typespec = PartTypeSpec(content_type)
# load relationships and propagate load to target parts
self.__relationships = [] # discard any rels from prior load
rel_elms = self.__get_rel_elms(fs)
for rel_elm in rel_elms:
rId = rel_elm.get('Id')
reltype = rel_elm.get('Type')
target_relpath = rel_elm.get('Target')
target_partname = posixpath.abspath(posixpath.join(baseURI,
target_relpath))
if target_partname in parts_dict:
target_part = parts_dict[target_partname]
else:
target_part = Part()
parts_dict[target_partname] = target_part
target_part._load(fs, target_partname, ct_dict, parts_dict)
# create relationship to target_part
rel = Relationship(rId, self, reltype, target_part)
self.__relationships.append(rel)
return self
def _marshal(self, model_part, part_dict):
"""
Load the contents of model-side part such that it can be saved to
disk. Propagate marshalling to related parts.
"""
# unpack working values
content_type = model_part._content_type
# assign persisted attributes from model part
self.__partname = model_part.partname
self.blob = model_part._blob
self.typespec = PartTypeSpec(content_type)
# load relationships and propagate marshal to target parts
for rel in model_part._relationships:
# unpack working values for target part and relationship
rId = rel._rId
reltype = rel._reltype
model_target_part = rel._target
partname = model_target_part.partname
# create package-part for target
if partname in part_dict:
part = part_dict[partname]
else:
part = Part()
part_dict[partname] = part
part._marshal(model_target_part, part_dict)
# create marshalled version of relationship
marshalled_rel = Relationship(rId, self, reltype, part)
self.__relationships.append(marshalled_rel)
@property
def _relsitem_element(self):
nsmap = {None: pptx.spec.nsmap['pr']}
element = etree.Element(qtag('pr:Relationships'), nsmap=nsmap)
for rel in self.__relationships:
element.append(rel._element)
return element
@property
def _relsitemURI(self):
"""
Return theoretical package URI for this part's relationships item,
without regard to whether this part actually has a relationships item.
"""
head, tail = os.path.split(self.__partname)
return '%s/_rels/%s.rels' % (head, tail)
def __get_rel_elms(self, fs):
"""
Helper method for _load(). Return list of this relationship elements
for this part from *fs*. Returns empty list if there are no
relationships for this part, either because parts of this type never
have relationships or its relationships are optional and none exist in
this filesystem (package).
"""
relsitemURI = self.__relsitemURI(self.typespec, self.__partname, fs)
if relsitemURI is None:
return []
if relsitemURI not in fs:
tmpl = "required relationships item '%s' not found in package"
raise CorruptedPackageError(tmpl % relsitemURI)
root_elm = fs.getelement(relsitemURI)
return root_elm.findall(qtag('pr:Relationship'))
@staticmethod
def __relsitemURI(typespec, partname, fs):
"""
REFACTOR: Combine this logic into __get_rel_elms, it's the only caller
and logic is partially redundant.
Return package URI for this part's relationships item. Returns None if
a part of this type never has relationships. Also returns None if a
part of this type has only optional relationships and the package
contains no rels item for this part.
"""
if typespec.has_rels == PTS_HASRELS_NEVER:
return None
head, tail = os.path.split(partname)
relsitemURI = '%s/_rels/%s.rels' % (head, tail)
if typespec.has_rels == PTS_HASRELS_OPTIONAL:
return relsitemURI if relsitemURI in fs else None
return relsitemURI
class Relationship(object):
"""
Return a new |Relationship| instance with local identifier *rId* that
associates *source* with *target*. *source* is an instance of either
|Package| or |Part|. *target* is always an instance of |Part|. Note that
*rId* is only unique within the scope of *source*. Relationships do not
have a globally unique identifier.
The following attributes are available from |Relationship| instances:
.. attribute:: rId
The source-local identifier for this relationship.
.. attribute:: reltype
The relationship type URI for this relationship. These are defined in
the ECMA spec and look something like:
'http://schemas.openxmlformats.org/.../relationships/slide'
.. attribute:: target
The target :class:`pptx.packaging.Part` instance in this relationship.
"""
def __init__(self, rId, source, reltype, target):
super(Relationship, self).__init__()
self.__source = source
self.rId = rId
self.reltype = reltype
self.target = target
@property
def _element(self):
"""
The :class:`ElementTree._Element` instance containing the XML
representation of this Relationship.
"""
element = etree.Element('Relationship')
element.set('Id', self.rId)
element.set('Type', self.reltype)
element.set('Target', self.__target_relpath)
return element
@property
def __baseURI(self):
"""Return the directory part of the source itemURI."""
if isinstance(self.__source, Part):
return os.path.split(self.__source.partname)[0]
return PKG_BASE_URI
@property
def __target_relpath(self):
# workaround for posixpath bug in 2.6, doesn't generate correct
# relative path when *start* (second) parameter is root ('/')
if self.__baseURI == '/':
relpath = self.target.partname[1:]
else:
relpath = posixpath.relpath(self.target.partname, self.__baseURI)
return relpath
class PartTypeSpec(object):
"""
Return an instance of |PartTypeSpec| containing metadata for parts of type
*content_type*. Instances are cached, so no more than one instance for a
particular content type is in memory.
Instances provide the following attributes:
.. attribute:: content_type
MIME type-like string that identifies how content is encoded for parts
of this type. In most cases it corresponds to a particular XML
sub-schema, although binary parts have other encoding schemes. As an
example, the content type for a theme part is
``application/vnd.openxmlformats-officedocument.theme+xml``. Each
part's content type is indicated in the content types item
(``[Content_Types].xml``) located in the package root.
.. attribute:: basename
The root of the partname "filename" segment for parts of this type. For
example, *basename* for slide layout parts is ``slideLayout`` and the
partname for a slide layout matches the regular expression
``/ppt/slideLayouts/slideLayout[1-9][0-9]*.xml``, e.g.
``/ppt/slideLayouts/slideLayout1.xml``.
Note that while *basename* also usually corresponds to the base of the
immediate parent "directory" name for tuple parts, this is not
guaranteed. One example is theme parts, with partnames like
``/ppt/theme/theme1.xml``. Use *baseURI* to determine the "directory"
portion of a partname.
.. attribute:: ext
The extension of the partname "filename" segment for parts of this
type. For example, *ext* for a presentation part
(``/ppt/presentation.xml``) is ``.xml``. Note that the leading period
is included in the extension, consistent with the behavior of
:func:`os.path.split`.
.. attribute:: cardinality
One of :attr:`pptx.spec.PTS_CARDINALITY_SINGLETON` or
:attr:`pptx.spec.PTS_CARDINALITY_TUPLE`, corresponding to whether at
most one or multiple parts of this type may appear in the package.
``/ppt/presentation.xml`` is an example of a singleton part.
``/ppt/slideLayouts/slideLayout4.xml`` is an example of a tuple part.
The term *tuple* in this context is drawn from set theory in math and
has no direct relationship to the Python tuple class.
.. attribute:: required
Boolean expressing whether at least one instance of this part type must
appear in the package. ``presentation`` is an example of a required
part type. ``notesMaster`` is an example of a optional part type.
.. attribute:: baseURI
The "directory" portion of the partname for parts of this type. The
term *URI* is used because although part names (and other package item
URIs) strongly resemble filesystem paths, and are readily operated on
with functions from :mod:`os.path`, they have no direct correspondence
to location in a file system (otherwise all packages would overwrite
each other in the root directory :).
For example, *baseURI* for slide layout parts is ``/ppt/slideLayouts``.
.. attribute:: has_rels
One of ``pptx.spec.PTS_HASRELS_ALWAYS``,
``pptx.spec.PTS_HASRELS_NEVER``, or ``pptx.spec.PTS_HASRELS_OPTIONAL``,
indicating whether parts of this type always, never, or sometimes have
relationships, respectively.
.. attribute:: reltype
The string used in the ``Type`` attribute of a ``Relationship`` XML
element where a part of this content type is the target of the
relationship. A relationship type is a URI string of the same form as a
web page URL. For example, *reltype* for a part named
``/ppt/slides/slide1.xml`` would look something like
``http://schemas.openxmlformats.org/.../relationships/slide``.
"""
__instances = {}
def __new__(cls, content_type):
"""
Only create new instance on first call for content_type. After that,
use cached instance.
"""
# if there's not a matching instance in the cache, create one
if content_type not in cls.__instances:
inst = super(PartTypeSpec, cls).__new__(cls)
cls.__instances[content_type] = inst
# return the instance; note that __init__() gets called either way
return cls.__instances[content_type]
def __init__(self, content_type):
"""Initialize spec attributes from constant values in pptx.spec."""
# skip loading if this instance is from the cache
if hasattr(self, '_loaded'):
return
# otherwise initialize new instance
self._loaded = True
if content_type not in pptx.spec.pml_parttypes:
tmpl = "no content type '%s' in pptx.spec.pml_parttypes"
raise KeyError(tmpl % content_type)
ptsdict = pptx.spec.pml_parttypes[content_type]
# load attributes from spec constants dictionary
# e.g. 'application/vnd.open...ment.presentationml.slideMaster+xml'
self.content_type = content_type
# e.g. 'slideMaster'
self.basename = ptsdict['basename']
# e.g. '.xml'
self.cardinality = ptsdict['cardinality']
# e.g. PTS_CARDINALITY_SINGLETON or PTS_CARDINALITY_TUPLE
self.ext = ptsdict['ext']
# e.g. False
self.required = ptsdict['required']
# e.g. '/ppt/slideMasters'
self.baseURI = ptsdict['baseURI']
# e.g. PTS_HASRELS_ALWAYS, PTS_HASRELS_NEVER, or PTS_HASRELS_OPTIONAL
self.has_rels = ptsdict['has_rels']
# e.g. 'http://schemas.openxmlformats.org/.../metadata/core-properties'
self.reltype = ptsdict['reltype']
@property
def format(self):
"""One of ``'xml'`` or ``'binary'``."""
return 'xml' if self.ext == '.xml' else 'binary'
# ============================================================================
# Support Classes
# ============================================================================
class _ContentTypesItem(object):
"""
Lookup content type by part name using dictionary syntax, e.g.
``content_type = cti['/ppt/presentation.xml']``.
"""
def __init__(self):
super(_ContentTypesItem, self).__init__()
self.__defaults = None
self.__overrides = None
def __getitem__(self, partname):
"""
Return the content type for the part with *partname*.
"""
# raise exception if called before load()
if self.__defaults is None or self.__overrides is None:
tmpl = "lookup _ContentTypesItem['%s'] attempted before load"
raise ValueError(tmpl % partname)
# first look for an explicit content type
if partname in self.__overrides:
return self.__overrides[partname]
# if not, look for a default based on the extension
ext = os.path.splitext(partname)[1] # get extension of partname
# with leading dot trimmed off
ext = ext[1:] if ext.startswith('.') else ext
if ext in self.__defaults:
return self.__defaults[ext]
# if neither of those work, raise an exception
tmpl = "no content type for part '%s' in [Content_Types].xml"
raise LookupError(tmpl % partname)
def __len__(self):
"""
Return sum count of Default and Override elements.
"""
count = len(self.__defaults) if self.__defaults is not None else 0
count += len(self.__overrides) if self.__overrides is not None else 0
return count
def compose(self, parts):
"""
Assemble a [Content_Types].xml item based on the contents of *parts*.
"""
# extensions in this dict include leading '.'
def_cts = pptx.spec.default_content_types
# initialize working dictionaries for defaults and overrides
self.__defaults = dict((ext[1:], def_cts[ext])
for ext in ('.rels', '.xml'))
self.__overrides = {}
# compose appropriate element for each part
for part in parts:
ext = os.path.splitext(part.partname)[1]
# if extension is '.xml', assume an override. There might be a
# fancier way to do this, otherwise I don't know what 'xml'
# Default entry is for.
if ext == '.xml':
self.__overrides[part.partname] = part.content_type
elif ext in def_cts:
self.__defaults[ext[1:]] = def_cts[ext]
else:
tmpl = "extension '%s' not found in default_content_types"
raise LookupError(tmpl % (ext))
return self
@property
def element(self):
nsmap = {None: pptx.spec.nsmap['ct']}
element = etree.Element(qtag('ct:Types'), nsmap=nsmap)
if self.__defaults:
for ext in sorted(self.__defaults.keys()):
subelm = etree.SubElement(element, qtag('ct:Default'))
subelm.set('Extension', ext)
subelm.set('ContentType', self.__defaults[ext])
if self.__overrides:
for partname in sorted(self.__overrides.keys()):
subelm = etree.SubElement(element, qtag('ct:Override'))
subelm.set('PartName', partname)
subelm.set('ContentType', self.__overrides[partname])
return element
def load(self, fs):
"""
Retrieve [Content_Types].xml from specified file system and load it.
Returns a reference to this _ContentTypesItem instance to allow
generative call, e.g. ``cti = _ContentTypesItem().load(fs)``.
"""
element = fs.getelement('/[Content_Types].xml')
defaults = element.findall(qtag('ct:Default'))
overrides = element.findall(qtag('ct:Override'))
self.__defaults = dict((d.get('Extension'), d.get('ContentType'))
for d in defaults)
self.__overrides = dict((o.get('PartName'), o.get('ContentType'))
for o in overrides)
return self
# ============================================================================
# FileSystem Classes
# ============================================================================
class FileSystem(object):
"""
Factory for filesystem interface instances.
A FileSystem object provides access to on-disk package items via their URI
(e.g. ``/_rels/.rels`` or ``/ppt/presentation.xml``). This allows parts to
be accessed directly by part name, which for a part is identical to its
item URI. The complexities of translating URIs into file paths or zip item
names, and file and zip file access specifics are all hidden by the
filesystem class. |FileSystem| acts as the Factory, returning the
appropriate concrete filesystem class depending on what it finds at *path*.
"""
def __new__(cls, file):
# if *file* is a string, treat it as a path
if isinstance(file, basestring):
path = file
if is_zipfile(path):
fs = ZipFileSystem(path)
elif os.path.isdir(path):
fs = DirectoryFileSystem(path)
else:
raise PackageNotFoundError("Package not found at '%s'" % path)
else:
fs = ZipFileSystem(file)
return fs
class BaseFileSystem(object):
"""
Base class for FileSystem classes, providing common methods.
"""
def __init__(self):
super(BaseFileSystem, self).__init__()
def __contains__(self, itemURI):
"""
Allows use of 'in' operator to test whether an item with the specified
URI exists in this filesystem.
"""
return itemURI in self.itemURIs
def getblob(self, itemURI):
"""Return byte string of item identified by *itemURI*."""
if itemURI not in self:
raise LookupError("No package item with URI '%s'" % itemURI)
stream = self.getstream(itemURI)
blob = stream.read()
stream.close()
return blob
def getelement(self, itemURI):
"""
Return ElementTree element of XML item identified by *itemURI*.
"""
if itemURI not in self:
raise LookupError("No package item with URI '%s'" % itemURI)
stream = self.getstream(itemURI)
try:
parser = etree.XMLParser(remove_blank_text=True)
element = etree.parse(stream, parser).getroot()
except etree.XMLSyntaxError:
raise NotXMLError("package item %s is not XML" % itemURI)
stream.close()
return element
class DirectoryFileSystem(BaseFileSystem):
"""
Provides access to package members that have been expanded into an on-disk
directory structure.
Inherits __contains__(), getelement(), and path from BaseFileSystem.
"""
def __init__(self, path):
"""
*path* is the path to a directory containing an expanded package.
"""
super(DirectoryFileSystem, self).__init__()
if not os.path.isdir(path):
tmpl = "path '%s' not a directory"
raise ValueError(tmpl % path)
self.__path = os.path.abspath(path)
def close(self):
"""
Provides interface consistency with |ZipFileSystem|, but does nothing,
a directory file system doesn't need closing.
"""
pass
def getstream(self, itemURI):
"""
Return file-like object containing package item identified by
*itemURI*. Remember to call close() on the stream when you're done
with it to free up the memory it uses.
"""
if itemURI not in self:
raise LookupError("No package item with URI '%s'" % itemURI)
path = os.path.join(self.__path, itemURI[1:])
with open(path, 'rb') as f:
stream = StringIO(f.read())
return stream
@property
def itemURIs(self):
"""
Return list of all filenames under filesystem root directory,
formatted as item URIs. Each URI is the relative path of that file
with a leading slash added, e.g. '/ppt/slides/slide1.xml'. Although
not strictly necessary, the results are sorted for neatness' sake.
"""
itemURIs = []
for dirpath, dirnames, filenames in os.walk(self.__path):
for filename in filenames:
item_path = os.path.join(dirpath, filename)
itemURI = item_path[len(self.__path):] # leave leading slash
itemURIs.append(itemURI.replace(os.sep, '/'))
return sorted(itemURIs)
class ZipFileSystem(BaseFileSystem):
"""
Return new instance providing access to zip-format OPC package contained
in *file*, where *file* can be either a path to a zip file (a string) or a
file-like object. If mode is 'w', a new zip archive is written to *file*.
If *file* is a path and a file with that name already exists, it is
truncated.
Inherits :meth:`__contains__`, :meth:`getelement`, and :attr:`path` from
BaseFileSystem.
"""
def __init__(self, file, mode='r'):
super(ZipFileSystem, self).__init__()
if 'w' in mode:
self.zipf = ZipFile(file, 'w', compression=ZIP_DEFLATED)
else:
self.zipf = ZipFile(file, 'r')
def close(self):
"""
Close the |ZipFileSystem| instance, necessary to complete the write
process with the instance is opened for writing.
"""
self.zipf.close()
def getstream(self, itemURI):
"""
Return file-like object containing package item identified by
*itemURI*. Remember to call close() on the stream when you're done
with it to free up the memory it uses.
"""
if itemURI not in self:
raise LookupError("No package item with URI '%s'" % itemURI)
membername = itemURI[1:] # trim off leading slash
stream = StringIO(self.zipf.read(membername))
return stream
@property
def itemURIs(self):
"""
Return list of archive members formatted as item URIs. Each member
name is the archive-relative path of that file. A forward-slash is
prepended to form the URI, e.g. '/ppt/slides/slide1.xml'. Although
not strictly necessary, the results are sorted for neatness' sake.
"""
names = self.zipf.namelist()
# zip archive can contain entries for directories, so get rid of those
itemURIs = [('/%s' % nm) for nm in names if not nm.endswith('/')]
return sorted(itemURIs)
def write_blob(self, blob, itemURI):
"""
Write *blob* to zip file as binary stream named *itemURI*.
"""
if itemURI in self:
tmpl = "Item with URI '%s' already in package"
raise DuplicateKeyError(tmpl % itemURI)
membername = itemURI[1:] # trim off leading slash
self.zipf.writestr(membername, blob)
def write_element(self, element, itemURI):
"""
Write *element* to zip file as an XML document named *itemURI*.
"""
if itemURI in self:
tmpl = "Item with URI '%s' already in package"
raise DuplicateKeyError(tmpl % itemURI)
membername = itemURI[1:] # trim off leading slash
xml = etree.tostring(element, encoding='UTF-8', pretty_print=True,
standalone=True)
xml = prettify_nsdecls(xml)
self.zipf.writestr(membername, xml)
# ============================================================================
# Utility functions
# ============================================================================
def prettify_nsdecls(xml):
"""
Wrap and indent second and later attributes on the root element so
namespace declarations don't run off the page in the text editor and can
be more easily inspected.
"""
lines = xml.splitlines()
# if entire XML document is all on one line, don't mess with it
if len(lines) < 2:
return xml
# if don't find xml declaration on first line, bail
if not lines[0].startswith('<?xml'):
return xml
# if don't find an unindented opening element on line 2, bail
if not lines[1].startswith('<'):
return xml
rootline = lines[1]
# split rootline into element tag part and attributes parts
attr_re = re.compile(r'([-a-zA-Z0-9_:.]+="[^"]*" */?>?)')
substrs = [substr.strip() for substr in attr_re.split(rootline) if substr]
# substrings look something like:
# ['<p:sld', 'xmlns:p="html://..."', 'name="Office Theme>"']
# if there's only one attribute there's no need to wrap
if len(substrs) < 3:
return xml
indent = ' ' * (len(substrs[0])+1)
# join element tag and first attribute onto same line
newrootline = ' '.join(substrs[:2])
# indent remaining attributes on following lines
for substr in substrs[2:]:
newrootline += '\n%s%s' % (indent, substr)
lines[1] = newrootline
return '\n'.join(lines)
|
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
from .forms import *
from Profile.forms import ProfileUpdateForm
from Profile.models import Profile
from .models import Posts,Like,Logins
from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.views.generic import ListView, DetailView, CreateView, UpdateView
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.urls import reverse_lazy
# Create your views here.
def domains(request):
context = {
'dom': Domains.objects.all(),
'profile': Profile.objects.get(user=request.user)
}
return render(request, 'domains.html', context)
def notifications(request):
context = {
'profile': Profile.objects.get(user=request.user)
}
return render(request, 'notifications.html', context)
def register(request):
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
user = form.save()
user.save()
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=user.username, password=raw_password)
login(request, user)
messages.success(request, f"Your account has been created successfully")
return redirect('/')
else:
return render(request, 'register.html', {'form': form})
else:
form = LoginForm()
return render(request, 'register.html', {'form': form})
@login_required
def blog_post_like(request):
user = request.user
if request.method == 'POST':
post_id = request.POST.get('post_id')
post_obj = Posts.objects.get(id=post_id)
if user in post_obj.likes.all():
post_obj.likes.remove(user)
else:
post_obj.likes.add(user)
like, created = Like.objects.get_or_create(user=user, post_id=post_id)
if not created:
if like.value=='Like':
like.value='Unlike'
else:
like.value='Like'
else:
like.value='Like'
post_obj.save()
like.save()
return redirect('home_page')
@login_required
def post_comment_create_and_list_view(request):
thisPost = Posts.objects.all()
# initials
p_form = PostForm()
c_form = CommentModelForm()
profile = Profile.objects.get(user=request.user)
if 'submit_c_form' in request.POST:
c_form = CommentModelForm(request.POST)
if c_form.is_valid():
instance = c_form.save(commit=False)
instance.user = profile
instance.post = Posts.objects.get(id=request.POST.get('post_id'))
instance.save()
c_form = CommentModelForm()
context = {
'p_form': p_form,
'c_form': c_form,
'posts': thisPost,
}
return redirect('home_page')
"""class PostListView(ListView):
model = Posts
template_name = 'home.html'
context_object_name = 'home_posts'
extra_context = {
"form": PostForm(),
"c_form": CommentModelForm(),
#'profile': Profile.objects.get(user=self.request.user)
}
ordering = ['-createdTime']"""
def post_list_view(request):
context = {
'home_posts': Posts.objects.all(),
"form": PostForm(),
"c_form": CommentModelForm(),
'profile': Profile.objects.get(user=request.user)
}
return render(request, 'home.html', context)
class PostDetailView(LoginRequiredMixin, DetailView):
model = Posts
template_name = 'detail_post.html'
class PostCreateView(LoginRequiredMixin, CreateView):
form_class = PostForm
model = Posts
template_name = 'home.html'
success_url = reverse_lazy('home_page')
def form_valid(self, form):
profile = Profile.objects.get(user=self.request.user)
form.instance.author = profile
return super().form_valid(form)
class PostUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
form_class = PostForm
model = Posts
template_name = 'update_posts.html'
success_url = reverse_lazy('home_page')
def form_valid(self, form):
profile = Profile.objects.get(user=self.request.user)
if form.instance.author == profile:
return super().form_valid(form)
else:
form.add_error(None, "You need to be the author of the post in order to update it")
return super().form_invalid(form)
def test_func(self):
post = self.get_object()
profile = Profile.objects.get(user=self.request.user)
if profile == post.author:
return True
return False
|
"""
Copyright (c) 2016 Cyrill Jauner
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys, RPSNetwork, Graphs, pickle, time
from thread import *
#
# User input to start the game.
#
USR_START = 's'
#
# The number of graphs in a game.
#
GRAPH_NUMBERS = 3
#
# Flag for the input_handler function. Indicates that the input should be string.
#
REQ_STRING = 0
#
# Flag for the input_handler function. Indicates that the input should be int.
#
REQ_INTEGER = 1
#
# Flag for the input_handler function. Indicates that the input should be a valid port number.
#
REQ_PORT = 2
#
# The game result for winning.
#
RES_WIN = 1
#
# The game result for loosing.
#
RES_LOOSE = 0
#
# The game result for a draw.
#
RES_DRAW = -1
class Player:
"""
This class represents a player. A player object stores the name, the RPSClient
and a graph list.
"""
def __init__(self):
"""
Creates a new player object.
"""
self.name = ''
self.client = None
self.graphs = []
def look_for_server(self, port):
"""
Invokes the discover function of self.client.
:param port: The port number of the RPSServer.
:return: The address tuple of the RPSServer or None, if it can't find a server.
"""
return self.client.discover(port)
def connect(self, srv_addr):
"""
Connects the self.client to the given address.
:param srv_addr: Address tuple of the server.
"""
self.client.connect(srv_addr)
def send(self, msg):
"""
Sends the given message with self.client.
:param msg: String message to send.
"""
self.client.send(msg)
def receive(self):
"""
Receives a message with self.client.
:return: The received message or None if the socket of the client is None.
"""
return self.client.receive()
def add_graph(self, g):
"""
Appends the given graph to self.graphs.
:param g: Reference to graph object.
"""
self.graphs.append(g)
def send_graph(self, i):
"""
Sends the graph on the given index with self.client.
:param i: The index of a graph in self.graphs.
"""
g = self.graphs[i]
self.send(pickle.dumps(g.edges))
def load_graph(self, dmp):
"""
Receives a graphs and appends it at self.graphs.
:param dmp: A pickle dump that contains the edge list of a graph.
"""
edges = pickle.loads(dmp)
self.graphs.append(Graphs.Graph(edges))
def get_graph(self, i):
"""
Returns the graph at index i in self.graphs or None if i is out of range.
:param i: The index of the graph.
:return: The graph at index i or None.
"""
if 0 <= i < len(self.graphs):
return self.graphs[i]
else:
return None
def get_graph_index(self, g):
"""
Returns the index of the given graph in the self.graphs list.
:param g: The graph to find the index of.
:return: The index of the graph or -1 if the graph is not found.
"""
for i in range(0, len(self.graphs)):
if (self.graphs[i]).edges == g.edges:
return i
return -1
def separator(num_lines):
"""
Prints a string of *-signs to the console.
:param num_lines: The number of lines to print out.
"""
for i in range(0, num_lines):
print '******************************************'
def input_handler(req=REQ_STRING, prompt=''):
"""
Helper function to read user input from the console.
:param req: Value for the required data type.
:param prompt: String to show on the console.
:return: The users input if it is valid or None.
"""
inp = None
if req == REQ_STRING:
inp = raw_input(prompt)
elif req == REQ_INTEGER:
try:
inp = int(raw_input(prompt))
except ValueError:
inp = None
elif req == REQ_PORT:
inp = input_handler(REQ_INTEGER, prompt)
if inp is None or inp < 0 or inp > 65535:
inp = None
return inp
def init():
"""
Initializes the game. Asks for the user name and connection details. The server address can be None
if no server can be found and a own server can not be started.
:return: [0] A player object and [1] the server address.
"""
# Waits for the user input.
# Exits the program if the input is not correct.
inp = input_handler()
if inp == USR_START:
print 'Type your name'
inp = input_handler(REQ_STRING, 'Your name: ')
else:
sys.exit(0)
# Creates a new player object to store the name and a client object.
player = Player()
player.name = inp
player.client = RPSNetwork.RPSClient()
# Asks for the server port.
print 'Type the servers port'
inp = input_handler(REQ_PORT, 'Port number:')
while inp is None:
print "Your input is not valid..."
inp = input_handler(REQ_INTEGER, 'Port number:')
# Discovers the network for a RPS server.
srv_addr = player.look_for_server(inp)
# Starts own server if no server can be found.
if srv_addr is None:
print 'No RPS Server found, try to start a new server'
print 'On which port should your server listen?'
# Waits for a valid port number
inp = input_handler(REQ_PORT, 'Port number:')
while inp is None:
print "Your input is not valid..."
inp = input_handler(REQ_INTEGER, 'Port number:')
# Creates a new RPSServer
server = RPSNetwork.RPSServer()
# Starts new server threads
start_new_thread(server.start, (inp, inp))
# Wait for the server startup
time.sleep(2)
if not server.running:
print 'Unable to start server'
sys.exit(0)
else:
srv_addr = ('localhost', inp)
separator(1)
return player, srv_addr
def connect(player, srv_addr):
"""
Connects the given player to the given server address.
:param player: The player object.
:param srv_addr: Server address, port tuple.
"""
# Connects this player with the server and send the player name.
player.connect(srv_addr)
player.send(player.name)
print 'Successfully connected with '+str(srv_addr[0])
print 'Wait for other players'
# Waits for the opponent
opponents_name = player.receive()
print 'Your opponent is '+opponents_name
def share_graphs(player):
"""
The first player has to generate and share graphs. The players client must be connected to
invoke this function.
:param player: The player object.
:return: Whether the sharing was successfully or not.
"""
separator(1)
success = False
# Waits for the server response. There are two possible cases
# either, this player has to generate graphs
# or, this player gets graphs from the other player
srv_req = player.receive()
if srv_req is not None:
if srv_req == RPSNetwork.GRAPHS_NEED:
# This player has to generate graphs
# Each graph is sent to the server
print 'Generate graphs...'
for i in range(0, GRAPH_NUMBERS):
# Creates new random graphs
g = Graphs.random_graph(100, 2)
player.add_graph(g)
player.send_graph(i)
print 'All graphs sent'
success = True
elif srv_req == RPSNetwork.GRAPHS_SEND_START:
# This player receives graphs
srv_req = player.receive()
while srv_req != RPSNetwork.GRAPHS_SEND_END:
# Receive graphs until the end request is received
player.load_graph(srv_req)
srv_req = player.receive()
print 'All graphs received'
success = True
else:
print 'The server is not accessible'
separator(1)
return success
def ask_for_graph(player):
"""
Lets the user choose a graph. This function checks the user input.
As long as the input is not valid, the function asks for a new value.
:param player: The player object.
:return: The chosen index.
"""
print 'Your turn. Choose an integer in the range 0 to 2'
print 'The values stands for: 0-Rock, 1-Paper, 2-Scissor'
i = input_handler(REQ_INTEGER, 'Choice:')
while i is None:
print 'The input was not correct. Try again!'
i = input_handler(REQ_INTEGER, 'Choice:')
print "You're choice " + ['Rock', 'Paper', 'Scissor'][i]
separator(1)
return i
def ask_for_isomorphic_graph(player):
"""
Lets the user choose a graph and returns a random isomorphic copy.
:param player: The player object.
:return: The graph object, The isomorphism.
"""
return (ask_for_graph(player)).isomorphic_copy()
def oppon_turn(player):
"""
Receives a request of the opponent and loads it with pickle.
:param player: The player object.
:return: The pickle load result of the request.
"""
print 'Wait for opponents turn...'
return pickle.loads(player.receive())
def calc_result(my_i, op_i):
"""
Calculates the game result. Player 1 must be the own player object and player 2 the opponent.
There are three possible results:
- Player 1 has won
- Player 2 has won
- It's a draw
The RES_ constants represents the three results.
:param my_i: The graph index of player 1
:param op_i: The graph index of player 2
:return: The game result.
"""
res = RES_LOOSE
op_choice_txt = ""
if my_i != op_i:
if my_i == 0:
# My choice was rock
if op_i == 2:
# Opponent choose scissor
op_choice_txt = 'Scissor'
res = RES_WIN
elif my_i == 1:
# My choice was paper
if op_i == 0:
# Opponent choose rock
op_choice_txt = 'Rock'
res = RES_WIN
elif my_i == 2:
# My choice was scissor
if op_i == 1:
# Opponent choose paper
op_choice_txt = 'Paper'
res = RES_WIN
else:
res = RES_DRAW
print 'The opponent choose '+op_choice_txt
return res
def finish_turn(player, my_i, op_graph):
"""
Finish the current turn.
:param player: The player object. If the given graph is not valid, the game will be exited.
:param op_graph: The graph object of the opponent.
"""
op_i = player.get_graph_index(op_graph)
if op_i == -1:
print 'The received graph is not correct. The game is exited.'
sys.exit(1)
else:
game_result = calc_result(my_i, op_i)
if game_result == RES_WIN:
print 'You won!'
elif game_result == RES_LOOSE:
print 'You loose...'
elif game_result == RES_DRAW:
print "It's a draw"
return game_result
def play(player):
"""
Handles the game turns. This function should be invoked after init(), connect() and share_graphs().
:param player: The player object.
:return:
"""
srv_req = player.receive()
is_over = False
game_result = None
if srv_req == RPSNetwork.TURN_NEED:
# Asks for rock, paper or scissor.
choice = ask_for_graph(player)
my_g = player.get_graph(choice)
# A isomorphic copy of the chosen graph will be sent to the opponent.
my_iso_g, iso = my_g.isomorphic_copy()
dmp = pickle.dumps(my_iso_g)
player.send(dmp)
# Receives the opponents chosen graph.
op_g = oppon_turn(player)
# Send the isomorphism
player.send(pickle.dumps(iso))
# Check if the game is over and determine the winner.
game_result = finish_turn(player, choice, op_g)
elif srv_req == RPSNetwork.TURN_SEND:
# Receives the opponents chosen graph.
op_g = oppon_turn(player)
# Asks for rock, paper or scissor.
choice = ask_for_graph(player)
my_g = player.get_graph(choice)
dmp = pickle.dumps(my_g)
player.send(dmp)
# Receives the opponents isomorphism
op_iso = oppon_turn(player)
# Calculates the opponents graph back.
inv_func = Graphs.inv_permut_function(op_iso)
op_edges = Graphs.apply_isomorphism(op_g.edges, inv_func)
op_g = Graphs.Graph(op_edges)
# Check if the game is over and determine the winner.
game_result = finish_turn(player, choice, op_g)
if game_result != RES_DRAW:
is_over = True
player.send(str(game_result))
return is_over
def play_again(player):
"""
Handles a regame. Asks the given player if he want to play again.
After that, it waits for the opponents answer.
:param player: The player object.
:return: True, if both players wants to play again.
"""
print 'Type a to play again'
again = input_handler(REQ_STRING, 'input: ')
if again == 'a':
player.send(RPSNetwork.PLAY_AGAIN_TRUE)
print 'Wait for the opponents response'
op_answer = player.receive()
if op_answer == RPSNetwork.PLAY_AGAIN_TRUE:
return True
else:
player.send(RPSNetwork.PLAY_AGAIN_FALSE)
return False
|
"""Queries the database for specific genes."""
import sys
from bloom_filter import encode
import database
def main(f=None):
"""Reads in queries from a file and searches for them. If no file present,
reads in quieries from the standard input and searches for them.
Note: queries from standard in have a maximum of 1023 characters.
Args:
f: A text file containing queries. Each query should be on its own line.
"""
if f:
try:
with open(f, 'r') as queries_file:
text = queries_file.read()
queries = text.splitlines()
queries_file.close()
except:
print("Requires a text file with the queries.")
sys.exit(2)
for query_sequence in queries:
if query_sequence:
print("Query: ", query_sequence.upper(), "\n")
gene, iou = query(query_sequence)
print("Best IOU: ", iou, "\n")
print("Sequence: ", gene.sequence)
print("---------------------------------------------\n")
else:
print("Enter query: ")
for line in sys.stdin:
query_sequence = line
gene, iou = query(query_sequence)
print("Best IOU: ", iou)
print("Sequence: ", gene.sequence, "\n")
print("Enter query: ")
def query(query_sequence):
"""Encodes a query and searches for it in the data base.
Args:
query: A genetic sequence (string) to be searched for.
Returns:
The 'Gene' that is the 'best match' to the query.
The IOU for the 'best match' and the query.
"""
print("encoding query...")
query = encode(query_sequence)
print("...query complete")
print("performing search...")
gene, iou = database.search(query)
print("...search complete \n")
return gene, iou
if __name__ == '__main__':
if(len(sys.argv) > 1):
main(sys.argv[1])
else:
main()
sys.exit(0)
|
from pathlib import Path
from gpxpy import gpx as gpxpy
import matplotlib.pyplot as plt
import osmnx as ox
class Plotter(object):
FIGSIZE = (15,15)
COLOR_BACKGROUND = '#10627a'
COLOR_FOOTPRINT = '#083440'
COLOR_ROUTE = '#f30c2c'
def __init__(self, walk_id, render_dir):
self._walk_id = walk_id
self._render_dir = Path(render_dir).joinpath(walk_id)
_ensure_dir_exists(self._render_dir)
def plot_map(self, start_point, distance, type_, footprint=None):
_, self._ax = ox.plot_figure_ground(point=start_point,
dist=distance,
network_type=type_,
bgcolor=self.COLOR_BACKGROUND,
default_width=4,
figsize=self.FIGSIZE,
show=False,
close=False)
if footprint is not None:
footprint.plot(ax=self._ax, color=self.COLOR_FOOTPRINT, alpha=0.75)
def plot_route(self, graph, route):
if self._ax is None:
raise Exception('Need to plot the map first before the route.')
ox.plot_graph_route(graph, route, ax=self._ax, route_colors=self.COLOR_ROUTE,
alpha=1, orig_dest_size=250, route_linewidth=4,
show=False, close=False)
def close(self):
plt.savefig(self._render_dir.joinpath(f'{self._walk_id}.png'))
plt.savefig(self._render_dir.joinpath(f'{self._walk_id}.svg'))
plt.close()
return self._render_dir
class GPXRenderer(object):
def __init__(self, walk_name, walk_slug, render_dir):
self._walk_name = walk_name
self._walk_slug = walk_slug
self._render_dir = Path(render_dir).joinpath(walk_slug)
_ensure_dir_exists(self._render_dir)
def render_route(self, graph, route):
gpx = gpxpy.GPX()
gpx.name = self._walk_name
nodes = graph.nodes()
track = gpxpy.GPXTrack()
track.name = self._walk_name
segment = gpxpy.GPXTrackSegment()
track.segments.append(segment)
for node_id in route:
node = nodes[node_id]
segment_point = gpxpy.GPXTrackPoint(latitude=node['y'], longitude=node['x'])
segment.points.append(segment_point)
gpx.tracks.append(track)
with open(self._render_dir.joinpath(f'{self._walk_slug}.gpx'), 'w') as fp:
fp.write(gpx.to_xml())
def _ensure_dir_exists(path):
path.mkdir(parents=True, exist_ok=True)
|
# -*- coding: utf-8 -*-
# !/usr/bin/python
#
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
# -------------------------------------------------------------------------
"""
Usage
=====
Put usage instructions here.
Output
======
Put output information here.
Relevant Links:
https://pythonhosted.org/an_example_pypi_project/sphinx.html
https://github.com/ideasman42/pyfbx_i42
https://pypi.org/project/py-fbx/
https://www.quora.com/How-do-I-execute-Maya-script-without-lauching-Maya
https://stackoverflow.com/questions/27437733/use-external-python-script-to-open-maya-and-run-another-script-inside-maya
Notes:
-- Materials information can be extracted from ASCII fbx pretty easily
-- binary is possible but more difficult
-- FBX files could be exported as ASCII files
-- I could use regex there to extract material information
-- I couldn't get pyfbx_i42 to work,
-- ^ purportedly it can extract information from binary files.
-- You may just have to use the specified python versions
-- Jonny wants me to use pathlib wherever possible for OO pathing, as well as python-box (aka Box) for dict access
# Things to do:
--> Create the cube demo for Jonny with 4 attached materials
--> Create mapping widget for stacked layout
--> Allow export of JSON file for demo
--> Allow custom field entries for description, etc.
--> Need to figure out how to clear pointers for stored data properly
"""
from PySide2 import QtWidgets, QtCore, QtGui
from PySide2.QtCore import Signal, Slot, QThread, QAbstractItemModel, QModelIndex, QObject
from maya import OpenMayaUI as omui
from maya.standalone import initialize
from shiboken2 import wrapInstance
import pymel.core as pm
# import sphinx
# import azpy
import json
import os
import re
mayaMainWindowPtr = omui.MQtUtil.mainWindow()
mayaMainWindow = wrapInstance(long(mayaMainWindowPtr), QtWidgets.QWidget)
class MayaToLumberyard(QtWidgets.QWidget):
def __init__(self, parent=None):
super(MayaToLumberyard, self).__init__(parent)
self.app = QtWidgets.QApplication.instance()
self.setParent(mayaMainWindow)
self.setWindowFlags(QtCore.Qt.Window)
self.setGeometry(50, 50, 600, 500)
self.setObjectName('MaterialsToLumberyard')
self.setWindowTitle('Maya To Lumberyard')
self.isTopLevel()
self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowMinMaxButtonsHint)
self.desktop_location = os.path.join(os.path.expanduser('~'), 'Desktop')
self.bold_font_large = QtGui.QFont('Plastique', 7, QtGui.QFont.Bold)
self.medium_font = QtGui.QFont('Plastique', 7, QtGui.QFont.Normal)
self.target_file_list = []
self.materials_dict = {}
self.material_definitions = {}
self.processed_materials = []
self.current_scene = pm.sceneName()
self.model = None
self.total_transfer_materials = 1
self.main_container = QtWidgets.QVBoxLayout(self)
self.main_container.setAlignment(QtCore.Qt.AlignTop)
self.setLayout(self.main_container)
# Header Bar ------>
self.header_bar_layout = QtWidgets.QHBoxLayout()
self.select_files_button = QtWidgets.QPushButton('Select Files')
self.select_files_button.clicked.connect(self.choose_files_clicked)
self.select_files_button.setFixedSize(80, 35)
self.header_bar_layout.addWidget(self.select_files_button)
self.header_bar_layout.addSpacing(15)
self.use_current_file_checkbox = QtWidgets.QCheckBox('Use Current File')
self.use_current_file_checkbox.setFont(self.bold_font_large)
self.use_current_file_checkbox.clicked.connect(self.use_current_file_clicked)
self.header_bar_layout.addWidget(self.use_current_file_checkbox)
self.header_bar_layout.addSpacing(100)
self.switch_layout_combobox = QtWidgets.QComboBox()
self.switch_layout_combobox.setEnabled(False)
self.switch_layout_combobox.setFixedSize(250, 30)
self.combobox_items = ['Target Files', 'Extracted Values', 'Material Tree']
self.switch_layout_combobox.setStyleSheet('QComboBox {padding-left:6px;}')
self.switch_layout_combobox.addItems(self.combobox_items)
self.header_bar_layout.addWidget(self.switch_layout_combobox)
self.header_bar_layout.addSpacing(4)
self.main_container.addSpacing(5)
self.main_container.addLayout(self.header_bar_layout)
# Separation Line ------>
self.separatorLayout1 = QtWidgets.QHBoxLayout()
self.line1 = QtWidgets.QLabel()
self.line1.setFrameStyle(QtWidgets.QFrame.HLine | QtWidgets.QFrame.Sunken)
self.line1.setLineWidth(1)
self.line1.setFixedHeight(10)
self.separatorLayout1.addWidget(self.line1)
self.main_container.addLayout(self.separatorLayout1)
# ++++++++++++++++++++++++++++++++++++++++++++++++#
# File Source Table / Attributes (Stacked Layout) #
# ++++++++++++++++++++++++++++++++++++++++++++++++#
self.content_stacked_layout = QtWidgets.QStackedLayout()
self.main_container.addLayout(self.content_stacked_layout)
self.switch_layout_combobox.currentIndexChanged.connect(self.layout_combobox_changed)
# --- Files Table
self.target_files_table = QtWidgets.QTableWidget()
self.target_files_table.setFocusPolicy(QtCore.Qt.NoFocus)
self.target_files_table.setColumnCount(2)
self.target_files_table.setAlternatingRowColors(True)
self.target_files_table.setHorizontalHeaderLabels(['File List', ''])
self.target_files_table.horizontalHeader().setStyleSheet('QHeaderView::section {padding-top:9px; padding-left:10px;}')
self.target_files_table.verticalHeader().hide()
files_header = self.target_files_table.horizontalHeader()
files_header.setFixedHeight(30)
files_header.setDefaultAlignment(QtCore.Qt.AlignLeft)
files_header.setContentsMargins(10, 10, 0, 0)
files_header.setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)
files_header.setSectionResizeMode(1, QtWidgets.QHeaderView.ResizeToContents)
self.target_files_table.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.content_stacked_layout.addWidget(self.target_files_table)
# --- Scene Information Table
self.material_tree_view = QtWidgets.QTreeView()
self.headers = ['Key', 'Value']
self.material_tree_view.setStyleSheet('QTreeView::item {height:25px;} QHeaderView::section {height:30px; padding-left:10px}')
self.material_tree_view.setFocusPolicy(QtCore.Qt.NoFocus)
self.material_tree_view.setAlternatingRowColors(True)
self.material_tree_view.setUniformRowHeights(True)
self.content_stacked_layout.addWidget(self.material_tree_view)
# --- LY Material Definitions
self.material_definitions_widget = QtWidgets.QWidget()
self.material_definitions_layout = QtWidgets.QHBoxLayout(self.material_definitions_widget)
self.material_definitions_layout.setSpacing(0)
self.material_definitions_layout.setContentsMargins(0, 0, 0, 0)
self.material_definitions_frame = QtWidgets.QFrame(self.material_definitions_widget)
self.material_definitions_frame.setGeometry(0, 0, 5000, 5000)
self.material_definitions_frame.setStyleSheet('background-color:rgb(150,150,150);')
self.title_bar_widget = QtWidgets.QWidget()
self.title_bar_layout = QtWidgets.QHBoxLayout(self.title_bar_widget)
self.title_bar_layout.setContentsMargins(17, 17, 17, 0)
self.title_bar_layout.setAlignment(QtCore.Qt.AlignTop)
self.title_bar_frame = QtWidgets.QFrame(self.title_bar_widget)
self.title_bar_frame.setGeometry(0, 0, 5000, 60)
self.title_bar_frame.setStyleSheet('background-color:rgb(102,69,153);')
self.material_definitions_layout.addWidget(self.title_bar_widget)
self.material_name = QtWidgets.QCheckBox('StingrayPBS2')
self.material_name.setStyleSheet('spacing:10px; color:white')
self.material_name.setFont(self.bold_font_large)
self.material_name.setChecked(True)
self.title_bar_layout.addWidget(self.material_name)
# Forward/Back Buttons -------------------------->>
self.previous_next_button_layout = QtWidgets.QHBoxLayout()
self.item_count = QtWidgets.QLabel('1 of 10')
self.previous_next_button_layout.addWidget(self.item_count)
self.previous_next_button_layout.addSpacing(10)
self.previous_next_button_layout.setContentsMargins(0, 0, 0, 0)
self.previous_next_button_layout.setAlignment(QtCore.Qt.AlignRight)
self.previous_button = QtWidgets.QToolButton()
self.previous_button.setArrowType(QtCore.Qt.LeftArrow)
self.previous_button.setStyleSheet('background-color:rgb(110,110,110);')
self.previous_button.clicked.connect(self.previous_button_clicked)
self.previous_next_button_layout.addWidget(self.previous_button)
self.next_button = QtWidgets.QToolButton()
self.next_button.setArrowType(QtCore.Qt.RightArrow)
self.next_button.setStyleSheet('background-color:rgb(110,110,110);')
self.next_button.clicked.connect(self.next_button_clicked)
self.previous_next_button_layout.addWidget(self.next_button)
self.title_bar_layout.addLayout(self.previous_next_button_layout)
self.content_stacked_layout.addWidget(self.material_definitions_widget)
# File processing buttons ------>
self.process_files_layout = QtWidgets.QHBoxLayout()
self.main_container.addLayout(self.process_files_layout)
self.process_files_button = QtWidgets.QPushButton('Process Listed Files')
self.process_files_button.setFixedHeight(50)
self.process_files_button.clicked.connect(self.process_files_clicked)
self.process_files_layout.addWidget(self.process_files_button)
self.reset_button = QtWidgets.QPushButton('Reset')
self.reset_button.setFixedSize(50, 50)
self.reset_button.clicked.connect(self.reset_clicked)
self.reset_button.setEnabled(False)
self.process_files_layout.addWidget(self.reset_button)
self.initialize_window()
def initialize_window(self):
if self.current_scene:
self.target_file_list = [self.current_scene]
self.use_current_file_checkbox.setChecked(True)
self.populate_files_table()
def populate_files_table(self):
self.target_files_table.setRowCount(0)
for index, entry in enumerate(self.target_file_list):
entry = entry[1] if type(entry) == list else entry
self.target_files_table.insertRow(index)
item = QtWidgets.QTableWidgetItem(' {}'.format(entry))
self.target_files_table.setRowHeight(index, 45)
remove_button = QtWidgets.QPushButton(' Remove ')
remove_button.setStyleSheet('border-width:0px; background-color:rgb(100,100,100);')
remove_button.clicked.connect(self.remove_file_clicked)
self.target_files_table.setItem(index, 0, item)
self.target_files_table.setCellWidget(index, 1, remove_button)
def process_file_list(self):
file_processing_errors = []
for maya_file_location in self.target_file_list:
try:
if maya_file_location != self.current_scene:
pm.openFile(maya_file_location, force=True)
self.current_scene = maya_file_location
self.get_scene_materials_description()
except Exception as e:
file_processing_errors.append([maya_file_location, e])
# Create Model with extracted values from file list
self.set_material_model()
# Setup Lumberyard Material File Values
self.map_materials()
print ('MaterialDefinitions:'.format(self.material_definitions))
print json.dumps(self.material_definitions, sort_keys=True, indent=4)
# Update UI Layout
self.set_material_view()
self.switch_layout_combobox.setCurrentIndex(2)
self.set_ui_buttons()
def map_materials(self):
root = self.model.rootItem
for row in range(self.model.rowCount()):
name = self.model.get_attribute_value('MaterialName', root.child(row))
material_type = self.model.get_attribute_value('MaterialType', root.child(row))
file_connections = {}
shader_attributes = {}
for childIndex in range(root.child(row).childCount()):
child_item = root.child(row).child(childIndex)
child_value = child_item.itemData
if child_item.childCount():
target_dict = file_connections if child_value[0] == 'FileConnections' else shader_attributes
for subChildIndex in range(child_item.childCount()):
sub_child_data = child_item.child(subChildIndex).itemData
target_dict[sub_child_data[0]] = sub_child_data[1]
self.set_pbr_material_description(name, material_type, file_connections)
def reset_all_values(self):
pass
# Need to figure out how to clear pointers for stored data properly
# self.target_files_table.setRowCount(0)
# self.model.beginResetModel()
# self.model.qDeleteAll(mResults)
# self.model.endResetModel()
# self.initialize_window()
############################
# Getters/Setters ##########
############################
@staticmethod
def get_materials(target_mesh):
shading_group = pm.listConnections(pm.PyNode(target_mesh), type='shadingEngine')
materials = pm.ls(pm.listConnections(shading_group), materials=1)
return list(set(materials))
@staticmethod
def get_shader(material_name):
connections = pm.listConnections(material_name, type='shadingEngine')[0]
shader_name = '{}.surfaceShader'.format(connections)
shader = pm.listConnections(shader_name)[0]
return shader
@staticmethod
def get_shader_information(shader):
shader_file_connections = {}
for node in pm.listConnections(shader, type='file', c=True):
shader_file_connections[str(node[0])] = str(pm.getAttr(node[1].fileTextureName))
shader_attributes = {}
for shader_attribute in pm.listAttr(shader, s=True, iu=True):
try:
shader_attributes[str(shader_attribute)] = pm.getAttr('{}.{}'.format(shader, shader_attribute))
except pm.MayaAttributeError as e:
print ('MayaAttributeError: {}'.format(e))
return shader_file_connections, shader_attributes
@staticmethod
def get_shader_properties(name, material_type, file_connections):
""" This system will probably need rethinking if DCCs and compatible materials grow """
attr_list = {}
if material_type == 'StingrayPBS':
naming_exceptions = {'color': 'baseColor', 'ao': 'ambientOcclusion'}
maps = 'color, metallic, roughness, normal, emissive, ao, opacity'.split(', ')
for m in maps:
texture_attribute = 'TEX_{}_map'.format(m)
for tex in file_connections.keys():
if tex.find(texture_attribute) != -1:
key = m if m not in naming_exceptions else naming_exceptions.get(m)
attr_list[key] = {'useTexture': 'true',
'textureMap': file_connections.get('{}.{}'.format(name, texture_attribute))}
return attr_list
@staticmethod
def get_increment(name):
last_number = re.compile(r'(?:[^\d]*(\d+)[^\d]*)+')
number_found = last_number.search(name)
if number_found:
next_number = str(int(number_found.group(1)) + 1)
start, end = number_found.span(1)
name = name[:max(end - len(next_number), start)] + next_number + name[end:]
return name
@staticmethod
def get_material_template(shader_type):
definitions = os.path.join(os.path.dirname(os.path.abspath(__file__)), '{}.material'.format(shader_type))
if os.path.exists(definitions):
with open(definitions) as f:
return json.load(f)
def get_scene_materials_description(self):
scene_geo = pm.ls(v=True, geometry=True)
for target_mesh in scene_geo:
material_list = self.get_materials(target_mesh)
for material_name in material_list:
material_type = pm.nodeType(material_name, api=True)
material_listed = [x for x in self.materials_dict if self.materials_dict[x]['MaterialName'] == material_name]
if not material_listed:
self.set_material_dict(str(material_name), str(material_type), target_mesh)
else:
mesh_list = self.materials_dict[material_name].get('AppliedMesh')
if not isinstance(mesh_list, list):
self.materials_dict[material_name]['AppliedMesh'] = [mesh_list, target_mesh]
else:
mesh_list.append(target_mesh)
def set_material_dict(self, material_name, material_type, material_mesh):
shader = self.get_shader(material_name)
shader_file_connections, shader_attributes = self.get_shader_information(shader)
material_dict = {'MaterialName': material_name, 'MaterialType': material_type, 'AppliedMesh': material_mesh,
'FileConnections': shader_file_connections, 'SceneName': str(self.current_scene),
'MaterialAttributes': shader_attributes}
material_name = 'Material_{}'.format(self.total_transfer_materials)
self.materials_dict[material_name] = material_dict
self.total_transfer_materials += 1
def set_material_model(self):
self.model = MaterialsModel(self.headers, self.materials_dict)
def set_material_view(self):
self.material_tree_view.setModel(self.model)
self.material_tree_view.expandAll()
self.material_tree_view.resizeColumnToContents(0)
def set_pbr_material_description(self, name, material_type, file_connections):
# Build dictionary for material description based on extracted values
default_settings = self.get_material_template('standardpbr.template')
material = {'description': name,
'materialType': default_settings.get('materialType'),
'parentMaterial': default_settings.get('parentMaterial'),
'materialTypeVersion': default_settings.get('materialTypeVersion'),
'properties': self.get_shader_properties(name, material_type, file_connections)}
self.material_definitions[name if name not in self.material_definitions.keys() else self.get_increment(name)] = material
def set_ui_buttons(self):
display_index = self.content_stacked_layout.currentIndex()
self.switch_layout_combobox.setEnabled(True)
# Target Files
if display_index == 0:
self.use_current_file_checkbox.setEnabled(True)
self.select_files_button.setEnabled(True)
self.reset_button.setEnabled(True)
self.process_files_button.setText('Process Listed Files')
# Extracted Values
elif display_index == 1:
self.reset_button.setEnabled(True)
self.process_files_button.setEnabled(False)
self.use_current_file_checkbox.setEnabled(False)
self.select_files_button.setEnabled(False)
# Material Tree
else:
self.use_current_file_checkbox.setEnabled(False)
self.select_files_button.setEnabled(False)
self.process_files_button.setText('Export Selected Materials')
if self.material_definitions:
self.process_files_button.setEnabled(True)
############################
# Button Actions ###########
############################
def use_current_file_clicked(self):
self.current_scene = pm.sceneName()
if self.use_current_file_checkbox.isChecked():
self.target_file_list.insert(0, self.current_scene)
self.target_file_list = list(set(self.target_file_list))
else:
if self.current_scene in self.target_file_list:
del self.target_file_list[self.target_file_list.index(self.current_scene)]
self.populate_files_table()
def remove_file_clicked(self):
file_index = self.target_files_table.indexAt(self.sender().pos())
target_file = self.target_file_list[file_index.row()]
if target_file == pm.sceneName():
self.use_current_file_checkbox.setChecked(False)
del self.target_file_list[file_index.row()]
self.populate_files_table()
def process_files_clicked(self):
self.process_file_list()
def choose_files_clicked(self):
dialog = QtWidgets.QFileDialog(self, 'Shift-Select Target Files', self.desktop_location)
dialog.setFileMode(QtWidgets.QFileDialog.ExistingFile)
dialog.setNameFilter('Maya Files (*.ma *.mb *.fbx)')
dialog.setOption(QtWidgets.QFileDialog.DontUseNativeDialog, True)
file_view = dialog.findChild(QtWidgets.QListView, 'listView')
# Workaround for selecting multiple files with File Dialog
if file_view:
file_view.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
f_tree_view = dialog.findChild(QtWidgets.QTreeView)
if f_tree_view:
f_tree_view.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
if dialog.exec_() == QtWidgets.QDialog.Accepted:
self.target_file_list = dialog.selectedFiles()
if self.target_file_list:
self.populate_files_table()
self.process_files_button.setEnabled(True)
def layout_combobox_changed(self):
self.content_stacked_layout.setCurrentIndex(self.switch_layout_combobox.currentIndex())
self.set_ui_buttons()
def reset_clicked(self):
self.reset_all_values()
def previous_button_clicked(self):
print ('Previous button clicked')
def next_button_clicked(self):
print ('Next button clicked')
class MaterialsModel(QAbstractItemModel):
def __init__(self, headers, data, parent=None):
super(MaterialsModel, self).__init__(parent)
self.rootItem = TreeNode(headers)
self.parents = [self.rootItem]
self.indentations = [0]
self.create_data(data)
def create_data(self, data, indent=-1):
if type(data) == dict:
indent += 1
position = 4 * indent
for key, value in data.iteritems():
if position > self.indentations[-1]:
if self.parents[-1].childCount() > 0:
self.parents.append(self.parents[-1].child(self.parents[-1].childCount() - 1))
self.indentations.append(position)
else:
while position < self.indentations[-1] and len(self.parents) > 0:
self.parents.pop()
self.indentations.pop()
parent = self.parents[-1]
parent.insertChildren(parent.childCount(), 1, parent.columnCount())
parent.child(parent.childCount() - 1).setData(0, key)
value_string = str(value) if type(value) != dict else str('')
parent.child(parent.childCount() - 1).setData(1, value_string)
try:
self.create_data(value, indent)
except RuntimeError:
pass
@staticmethod
def get_attribute_value(search_string, search_column):
for childIndex in range(search_column.childCount()):
child_item = search_column.child(childIndex)
child_value = child_item.itemData
if child_value[0] == search_string:
return child_value[1]
return None
def index(self, row, column, index=QModelIndex()):
""" Returns the index of the item in the model specified by the given row, column and parent index """
if not self.hasIndex(row, column, index):
return QModelIndex()
if not index.isValid():
item = self.rootItem
else:
item = index.internalPointer()
child = item.child(row)
if child:
return self.createIndex(row, column, child)
return QModelIndex()
def parent(self, index):
"""
Returns the parent of the model item with the given index If the item has no parent,
an invalid QModelIndex is returned
"""
if not index.isValid():
return QModelIndex()
item = index.internalPointer()
if not item:
return QModelIndex()
parent = item.parentItem
if parent == self.rootItem:
return QModelIndex()
else:
return self.createIndex(parent.childNumber(), 0, parent)
def rowCount(self, index=QModelIndex()):
"""
Returns the number of rows under the given parent. When the parent is valid it means that
rowCount is returning the number of children of parent
"""
if index.isValid():
parent = index.internalPointer()
else:
parent = self.rootItem
return parent.childCount()
def columnCount(self, index=QModelIndex()):
""" Returns the number of columns for the children of the given parent """
return self.rootItem.columnCount()
def data(self, index, role=QtCore.Qt.DisplayRole):
""" Returns the data stored under the given role for the item referred to by the index """
if index.isValid() and role == QtCore.Qt.DisplayRole:
return index.internalPointer().data(index.column())
elif not index.isValid():
return self.rootItem.data(index.column())
def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole):
""" Returns the data for the given role and section in the header with the specified orientation """
if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
return self.rootItem.data(section)
class TreeNode(object):
def __init__(self, data, parent=None):
self.parentItem = parent
self.itemData = data
self.children = []
def child(self, row):
return self.children[row]
def childCount(self):
return len(self.children)
def childNumber(self):
if self.parentItem is not None:
return self.parentItem.children.index(self)
def columnCount(self):
return len(self.itemData)
def data(self, column):
return self.itemData[column]
def insertChildren(self, position, count, columns):
if position < 0 or position > len(self.children):
return False
for row in range(count):
data = [v for v in range(columns)]
item = TreeNode(data, self)
self.children.insert(position, item)
def parent(self):
return self.parentItem
def setData(self, column, value):
if column < 0 or column >= len(self.itemData):
return False
self.itemData[column] = value
def delete_instances():
for obj in mayaMainWindow.children():
if str(type(obj)) == "<class 'DCC_Materials.maya_materials_export.MayaToLumberyard'>":
if obj.__class__.__name__ == "MayaToLumberyard":
obj.setParent(None)
obj.deleteLater()
def show_ui():
delete_instances()
ui = MayaToLumberyard(mayaMainWindow)
ui.show()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Muhammad Bilal Shaikh"
__copyright__ = "Copyright 2021, AR-ResNetX"
__license__ = "GPL"
__version__ = "1.0.1"
__email__ = "mbs.techy@gmail.com"
__status__ = "Production"
import pickle
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import pretrainedmodels
from tqdm import tqdm
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
import torch.optim as optim
# import torchvision module to handle image manipulation
import torchvision
from torchvision import transforms
import torchvision.transforms as transforms
from torch.autograd import Variable
import configparser
# calculate train time, writing train data to files etc.
import time
import numpy as np
from sklearn.svm import SVC
def extract_features(model, dl):
lbls = []
model.eval()
device = 'cuda:0'
model.cuda(device)
with torch.no_grad():
features = None
for batch in tqdm(dl, disable=True):
images = batch[0]
labels = batch[1]
images = images.to(device)
# labels = labels.to(device)
output = model(images)
lbls.append(labels)
# print(labels)
if features is not None:
features = torch.cat((features, output), 0)
else:
features = output
return (features.cpu().numpy(), lbls)
def flatten_list(t):
flat_list = [item for sublist in t for item in sublist]
flat_list = np.array(flat_list)
return flat_list
def train(TRAIN_DATA_PATH, TEST_DATA_PATH, HPARAM):
print(torch.__version__)
print(torchvision.__version__)
# set device
if torch.cuda.is_available():
device = torch.device(('cuda:0'
if torch.cuda.is_available() else 'cpu'))
print (torch.cuda.get_device_properties(device),
torch.cuda.set_device(device),
torch.cuda.current_device())
transform = transforms.Compose([transforms.Resize(299),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(), transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])]) # preffered size for network
train_data = torchvision.datasets.ImageFolder(root=TRAIN_DATA_PATH,
transform=transform)
test_data = torchvision.datasets.ImageFolder(root=TEST_DATA_PATH,
transform=transform)
train_data_loader = torch.utils.data.DataLoader(train_data,
batch_size=int(HPARAM['batch_size']), shuffle=True,
num_workers=4)
test_data_loader = torch.utils.data.DataLoader(test_data,
batch_size=int(HPARAM['batch_size']), shuffle=True,
num_workers=4)
# prepare model
model_name = 'inceptionresnetv2' # could be fbresnet152 or inceptionresnetv2
model = pretrainedmodels.__dict__[model_name](num_classes=1000,
pretrained='imagenet')
model.last_linear = nn.Identity() # freeze the model
# num_ftrs = model.last_linear.in_features
# model.last_linear = nn.Linear(num_ftrs, 50)
for p in model.parameters():
p.requires_grad = False
# num_ftrs = model.last_linear.in_features
# Here the size of each output sample is set to 2.
# Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)).
# model.fc = nn.Linear(num_ftrs, 50)
PATH = 'models/IRv2.pt'
torch.save(model, PATH)
# model = torch.load(PATH)
optimizer = optim.Adam(model.parameters(), lr=0.0005)
def train(model, loader, epochs=1):
model.to(device)
model.train()
print('Training...')
for epoch in range(epochs):
start = time.time()
model.train()
running_loss = 0
for (i, batch) in enumerate(loader, int(HPARAM['epoch'])):
images = batch[0]
labels = batch[1]
images = images.to(device)
labels = labels.to(device)
preds = model(images)
loss = F.cross_entropy(preds, labels) # Adam, SGD, RSPROP
optimizer.zero_grad()
loss = Variable(loss, requires_grad=True)
loss.backward()
optimizer.step()
running_loss += loss.data
if i % 10 == 9:
end = time.time()
# print ('[epoch %d,imgs %5d] time: %0.3f s'%(epoch+1,(i+1)*4,(end-start)))
# print("[epoch %d,imgs %5d] loss: %.7f time: %0.3f s" % (epoch + 1, (i + 1) * 4, running_loss / 100, (end - start)) )
# tb.add_scalar('Loss', loss, epoch+1)
start = time.time()
running_loss = 0
train(model, train_data_loader)
print('Extracting Features...')
(train_feat, train_lbls) = extract_features(model,
train_data_loader)
(test_feat, test_lbls) = extract_features(model, test_data_loader)
return (train_feat, train_lbls, test_feat, test_lbls)
# randomforest, logisticregression, SVM , KNN, LD,
def get_vis_features():
trainpath = \
'/home/muhammadbsheikh/workspace/projects/mmaction/mmaction2/train_ucf50_feature.pkl'
testpath = \
'/home/muhammadbsheikh/workspace/projects/mmaction/mmaction2/test_ucf50_feature.pkl'
trainfile = open(trainpath, 'rb')
testfile = open(testpath, 'rb')
return (np.array(pickle.load(trainfile)),
np.array(pickle.load(testfile)))
def svm(
X,
Y,
x_lbls,
y_lbls,
):
print ('Train-Without FFT')
svm = SVC(kernel='linear').fit(X, x_lbls)
preds = svm.predict(Y)
print ('SVM Accuracy:', metrics.accuracy_score(y_lbls, preds))
knn_clf = KNeighborsClassifier(n_neighbors=3).fit(X, x_lbls)
knn_preds = knn_clf.predict(Y)
print ('KNN Accuracy:', metrics.accuracy_score(y_lbls, knn_preds))
# Random Forest
rf_clf = RandomForestClassifier(n_estimators=100).fit(X, x_lbls)
rf_preds = rf_clf.predict(Y)
print ('RF Accuracy:', metrics.accuracy_score(y_lbls, rf_preds))
def config():
settings = configparser.ConfigParser()
settings._interpolation = configparser.ExtendedInterpolation()
settings.read(os.getcwd() + '/config.ini')
# print(settings.sections())
trainpath = settings.get('data', 'trainpath')
testpath = settings.get('data', 'testpath')
os.environ['CUDA_VISIBLE_DEVICES'] = settings.get('sys', 'gpu')
torch.set_printoptions(linewidth=120)
torch.set_grad_enabled(True) # On by default, leave it here for clarity
os.chdir(settings.get('sys', 'work_dir'))
HPARAM = {'epoch': settings.get('hparam', 'epoch'),
'batch_size': settings.get('hparam', 'batch_size'),
'l_r': settings.get('hparam', 'l_r')}
return (trainpath, testpath, HPARAM)
def main():
# Get audio features
(trainpath, testpath, HPARAM) = config()
(train_feat, train_lbls, test_feat, test_lbls) = train(trainpath,
testpath, HPARAM)
train_lbls = flatten_list(train_lbls)
test_lbls = flatten_list(test_lbls) # flatting the lbls
# Get visual features
(train_vid_feat, test_vid_feat) = get_vis_features()
# show shapes
print (train_feat.shape, test_feat.shape)
print (train_vid_feat.shape, test_vid_feat.shape)
# concatenation
cat_feat_train = np.concatenate((train_feat, train_vid_feat), axis=1)
cat_feat_test = np.concatenate((test_feat, test_vid_feat), axis=1)
print (cat_feat_train.shape)
print (cat_feat_test.shape)
# get scores
svm(cat_feat_train, cat_feat_test, train_lbls, test_lbls)
if __name__ == '__main__':
main()
|
from .dist_utils import (DistOptimizerHook, allreduce_grads, get_dist_info,
init_dist)
from .evaluation import (DistEvalTopKAccuracyHook, mean_class_accuracy,
parallel_test, softmax, top_k_acc, top_k_accuracy,
top_k_hit)
from .fp16 import Fp16OptimizerHook, auto_fp16, force_fp32, wrap_fp16_model
from .opts import parser
from .parallel import (DataContainer, MMDataParallel,
MMDistributedDataParallel, collate, scatter,
scatter_kwargs)
from .test import multi_gpu_test, single_gpu_test
from .train import set_random_seed, train_network
__all__ = [
'DistOptimizerHook', 'allreduce_grads', 'init_dist', 'get_dist_info',
'DistEvalTopKAccuracyHook', 'mean_class_accuracy',
'softmax', 'top_k_acc', 'top_k_accuracy', 'top_k_hit', 'parallel_test',
'Fp16OptimizerHook', 'auto_fp16', 'force_fp32', 'wrap_fp16_model',
'parser',
'collate', 'DataContainer', 'MMDataParallel', 'MMDistributedDataParallel',
'scatter', 'scatter_kwargs',
'set_random_seed', 'train_network',
'single_gpu_test', 'multi_gpu_test'
]
|
from django.contrib import admin
from auth.models import InviteCode
@admin.register(InviteCode)
class InviteCodeAdmin(admin.ModelAdmin):
pass |
#!/usr/bin/python
# create database and tables
import sqlite3 as lite
datafile = "sensor_stream.db"
contolFile = "control_stream.db"
con = lite.connect(datafile)
cur = con.cursor()
cur.execute("create table sensor_log(time_stamp date, sensor_id integer, temperature real, humidity real)")
con.close()
con = lite.connect(contolFile)
cur = con.cursor()
cur.execute("create table control_log(time_stamp date, control_name text, value integer)")
cur.execute("create table control_target(time_stamp date, temperature real, humidity real)")
con.close()
|
import sys, pathlib
sys.path.append(pathlib.Path(__file__).parent.parent / 'src')
from {{filename}} import {{filename}}
import unittest
class {{filename}}Test(unittest.TestCase):
def test_init(self):
ins = {{filename}}()
self.assertEqual({{filename}}, type(ins))
def test_raise(self):
with self.assertRaises(Exception) as e:
raise Exception('A')
self.assertEqual('A', e.exception.args[0])
if __name__ == '__main__':
unittest.main()
|
import unittest
from datastructure.oop.progression.test_progression import FibonacciProgression
class MyTestCase(unittest.TestCase):
def test_something(self):
p = FibonacciProgression(2, 2)
self.assertEqual(42, [next(p) for i in range(8)][7])
if __name__ == '__main__':
unittest.main()
|
import logging
import boto3
import os
from botocore.exceptions import ClientError
import json
QUEUE_URI = os.getenv("QUEUE_URI")
logger = logging.getLogger()
logging.basicConfig(level=logging.INFO,
format='%(asctime)s: %(levelname)s: %(message)s')
sqs_client = boto3.client("sqs", region_name = os.getenv("QUEUE_REGION"))
def receive_queue_message():
try:
response = sqs_client.receive_message(QueueUrl=QUEUE_URI, WaitTimeSeconds=5, MaxNumberOfMessages=1)
except ClientError:
logger.exception('Could not receive the message from the - {}.'.format(
QUEUE_URI))
raise
else:
return response
def delete_queue_message(receipt_handle):
try:
response = sqs_client.delete_message(QueueUrl=QUEUE_URI,
ReceiptHandle=receipt_handle)
except ClientError:
logger.exception('Could not delete the meessage from the - {}.'.format(
QUEUE_URI))
raise
else:
return response
if __name__ == '__main__':
while True:
messages = receive_queue_message()
print(messages)
if "Messages" in messages:
for msg in messages['Messages']:
msg_body = msg['Body']
receipt_handle = msg['ReceiptHandle']
logger.info(f'The message body: {msg_body}')
logger.info('Deleting message from the queue...')
resp_delete = delete_queue_message(receipt_handle)
logger.info(
'Received and deleted message(s) from {} with message {}.'.format(QUEUE_URI,resp_delete))
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
'''
size:本堆块的长度。长度计算方式:size字段长度+用户申请的长度+对齐。libc以size_T长度*2为粒度对齐。
例如32bit以4*2=8byte对齐,64bit以8*2=0×10对齐。因为最少以8字节对齐,所以size一定是8的倍数,故size字段的最后三位恒为0,libc用这三个bit做标志flag。
比较关键的是最后一个bit(pre_inuse),用于指示相邻的前一个堆块是alloc还是free。如果正在使用,则bit=1。
libc判断当前堆块是否处于free状态的方法就是判断下一个堆块的pre_inuse是否为1。这里也是double free和null byte offset等漏洞利用的关键。
'''
from sys import *
class heapalign:
verbose = False
log = ''
def __init__(self):
print 'Heap align tips:'.strip()
print '''
fast bin : <= 128 (0x80)
small bin : > 128 (0x80) <= 512 (0x200)
large bin : > 512 (0x200)
'''
def align64(self,size,hexadecimal=False,verbose=False):
aligned = 0
log = ''
while aligned ==0:
tmp = size % 0x10
if tmp !=0:
size = size + tmp
if verbose == True:
log += 'aligning size :{}\n'.format(size)
self.align64(size,True)
else :
self.align64(size,False)
else:
aligned = 1
if hexadecimal :
return hex(size),log.strip()
else:
return size,log.strip()
def align32(self,size,hexadecimal=False,verbose=False):
aligned = 0
log =''
while aligned ==0:
tmp = size % 0x8
if tmp !=0:
size = size + tmp
if verbose == True:
log += 'aligning size :{}\n'.format(size)
self.align32(size,True)
else :
self.align32(size,False)
else:
aligned = 1
if hexadecimal :
return hex(size),log.strip()
else:
return size,log.strip()
def show(self,arch,size,hexadecimal=False,verbose=False):
if arch == 'i386':
if verbose == True:
size,info = self.align32(size,hexadecimal=False,verbose=True)
print info+'\n'
if hexadecimal == True:
print '[*] chunk size: 0x%x'%(size)
print '[*]memory size: 0x%x'%(size-0x8)
else:
print '[*] chunk size: %d'%(size)
print '[*]memory size: %d'%(size-0x8)
elif verbose == False:
size,info = self.align32(size,hexadecimal=False,verbose=False)
if hexadecimal == True:
print '[*] chunk size: 0x%x'%(size)
print '[*]memory size: 0x%x'%(size-0x8)
else:
print '[*] chunk size: %d'%(size)
print '[*]memory size: %d'%(size-0x8)
elif arch == 'amd64':
if verbose == True:
size,info = self.align64(size,hexadecimal=False,verbose=True)
print info+'\n'
if hexadecimal == True:
print '[*] chunk size: 0x%x'%(size+0x10)
print '[*]memory size: 0x%x'%(size)
else:
print '[*] chunk size: %d'%(size+0x10)
print '[*]memory size: %d'%(size)
elif verbose == False:
size,info = self.align64(size,hexadecimal=False,verbose=False)
if hexadecimal == True:
print '[*] chunk size: 0x%x'%(size+0x10)
print '[*]memory size: 0x%x'%(size)
else:
print '[*] chunk size: %d'%(size+0x10)
print '[*]memory size: %d'%(size)
align = heapalign()
try :
align.show(argv[1],int(argv[2]),int(argv[3]),int(argv[4]))
except :
print '''usage :python heapalign.py arch size hexadecimal verbose
arch : i386 or amd64
size : the size you need to align
hexadecimal : 1 or 0
verbose : 1 or 0
'''.strip()
exit(0)
|
import mock
import pytest
from nose.tools import assert_equal
from addons.forward.tests.utils import ForwardAddonTestCase
from tests.base import OsfTestCase
from website import settings
pytestmark = pytest.mark.django_db
class TestForward(ForwardAddonTestCase, OsfTestCase):
def setUp(self):
super(TestForward, self).setUp()
self.app.authenticate(*self.user.auth)
def test_change_url_log_added(self):
log_count = self.project.logs.count()
self.app.put_json(
self.project.api_url_for('forward_config_put'),
dict(
url='http://how.to.bas/ic',
),
)
self.project.reload()
assert_equal(
self.project.logs.count(),
log_count + 1
)
def test_change_timeout_log_not_added(self):
log_count = self.project.logs.count()
self.app.put_json(
self.project.api_url_for('forward_config_put'),
dict(
url=self.node_settings.url,
),
)
self.project.reload()
assert_equal(
self.project.logs.count(),
log_count
)
@mock.patch.object(settings, 'SPAM_CHECK_ENABLED', True)
@mock.patch('osf.models.node.Node.do_check_spam')
def test_change_url_check_spam(self, mock_check_spam):
self.project.is_public = True
self.project.save()
self.app.put_json(self.project.api_url_for('forward_config_put'), {'url': 'http://possiblyspam.com'})
assert mock_check_spam.called
data, _ = mock_check_spam.call_args
author, author_email, content, request_headers = data
assert author == self.user.fullname
assert author_email == self.user.username
assert content == 'http://possiblyspam.com'
|
# -*- coding: utf-8 -*-
"""\
CML Execution Utilities
-----------------------
"""
import os
import shutil
import logging
import glob
from ..utils import osutils
_lgr = logging.getLogger(__name__)
def is_caelus_casedir(root=None):
"""Check if the path provided looks like a case directory.
A directory is determined to be an OpenFOAM/Caelus case directory if the
``system``, ``constant``, and ``system/controlDict`` exist. No check is
performed to determine whether the case directory will actually run or if a
mesh is present.
Args:
root (path): Top directory to start traversing (default: CWD)
"""
casedir_entries = ["constant", "system",
os.path.join("system", "controlDict")]
cdir = os.getcwd() if root is None else root
return all(os.path.exists(os.path.join(cdir, d))
for d in casedir_entries)
def find_case_dirs(basedir):
"""Recursively search for case directories existing in a path.
Args:
basedir (path): Top-level directory to traverse
Yields:
Absolute path to the case directory
"""
absdir = osutils.abspath(basedir)
# is the root directory itself a case directory?
if is_caelus_casedir(absdir):
yield absdir
else:
for root, dirs, _ in os.walk(absdir):
for d in list(dirs):
cdir = os.path.join(root, d)
if is_caelus_casedir(cdir):
dirs.remove(d)
yield cdir
def find_caelus_recipe_dirs(
basedir,
action_file="caelus_tasks.yaml"):
"""Return case directories that contain action files.
A case directory with action file is determined if the directory succeeds
checks in :func:`is_caelus_dir` and also contains the action file specified
by the user.
Args:
basedir (path): Top-level directory to traverse
action_file (filename): Default is ``caelus_tasks.yaml``
Yields:
Path to the case directory with action files
"""
for cdir in find_case_dirs(basedir):
if os.path.exists(os.path.join(cdir, action_file)):
yield cdir
def find_recipe_dirs(basedir, action_file="caelus_tasks.yaml"):
"""Return directories that contain the action files
This behaves differently than :func:`find_caelus_recipe_dirs` in that it
doesn't require a valid case directory. It assumes that the case
directories are sub-directories and this task file acts on multiple
directories.
Args:
basedir (path): Top-level directory to traverse
action_file (filename): Default is ``caelus_tasks.yaml``
Yields:
Path to the case directory with action files
"""
absdir = osutils.abspath(basedir)
for root, dirs, _ in os.walk(absdir):
if os.path.exists(os.path.join(root, action_file)):
for dname in list(dirs):
dirs.remove(dname)
yield root
def clean_polymesh(casedir,
region=None,
preserve_patterns=None):
"""Clean the polyMesh from the given case directory.
Args:
casedir (path): Path to the case directory
region (str): Mesh region to delete
preserve_patterns (list): Shell wildcard patterns of files to preserve
"""
ppatterns = ["blockMeshDict"]
if preserve_patterns:
ppatterns += preserve_patterns
absdir = osutils.abspath(casedir)
meshdir = (os.path.join(absdir, "constant", "polyMesh")
if region is None else
(os.path.join(absdir, "constant", region, "polyMesh")))
if os.path.exists(meshdir):
_lgr.debug("Cleaning polyMesh in %s", absdir)
osutils.clean_directory(meshdir, ppatterns)
else:
_lgr.warning("No polyMesh directory %s; skipping clean_mesh",
meshdir)
def clean_casedir(casedir,
preserve_extra=None,
preserve_zero=True,
preserve_times=False,
preserve_processors=False,
purge_mesh=False):
"""Clean a Caelus case directory.
Cleans files generated by a run. By default, this function will always
preserve ``system``, ``constant``, and ``0`` directories as well as any
YAML or python files. Additional files and directories can be preserved by
using the ``preserve_extra`` option that accepts a list of shell wildcard
patterns of files/directories that must be preserved.
Args:
casedir (path): Absolute path to a case directory.
preserve_extra (list): List of shell wildcard patterns to preserve
purge_mesh (bool): If true, also removes mesh from constant/polyMesh
preserve_zero (bool): If False, removes the 0 directory
preserve_times (bool): If False, removes the time directories
preserve_processors (bool): If False, removes processor directories
Raises:
IOError: ``clean_casedir`` will refuse to remove files from a directory
that is not a valid Caelus case directory.
"""
base_patterns = ["system", "constant", "*.yaml", "*.yml", "*.py",
"*.job", "README*", "readme*", "cmlControls"]
zero_pat = ["0"] if preserve_zero else []
time_pat = (["[1-9]*", "0.[0-9]*", "-[0-9]*"]
if preserve_times else [])
proc_pat = ["processor*"] if preserve_processors else []
extra_pat = preserve_extra if preserve_extra else []
ppatterns = (base_patterns + zero_pat + extra_pat
+ time_pat + proc_pat)
absdir = osutils.abspath(casedir)
if not is_caelus_casedir(absdir):
raise IOError(
"Not a valid case directory; refusing to perform destructive "
"clean operation on %s"%absdir)
_lgr.debug("Cleaning case directory: %s", absdir)
osutils.clean_directory(absdir, ppatterns)
if purge_mesh:
clean_polymesh(absdir)
def clone_case(casedir,
template_dir,
copy_polymesh=True,
copy_zero=True,
copy_scripts=True,
extra_patterns=None):
"""Clone a Caelus case directory.
Args:
casedir (path): Absolute path to new case directory.
template_dir (path): Case directory to be cloned
copy_polymesh (bool): Copy contents of constant/polyMesh to new case
copy_zero (bool): Copy time=0 directory to new case
copy_scripts (bool): Copy python and YAML files
extra_patterns (list): List of shell wildcard patterns for copying
Returns:
path: Absolute path to the newly cloned directory
Raises:
IOError: If either the ``casedir`` exists or if the ``template_dir``
does not exist or is not a valid Caelus case directory.
"""
absdir = osutils.abspath(casedir)
tmpl_dir = osutils.abspath(template_dir)
if os.path.exists(absdir):
raise IOError("Cannot overwrite existing file/directory: %s", absdir)
if not (os.path.exists(tmpl_dir) and
is_caelus_casedir(tmpl_dir)):
raise IOError("Invalid Caelus case directory provided as template: %s",
template_dir)
default_ignore = ["[1-9]*", "0.[0-9]*", "-[0-9]*",
"processor*", "lines",
"surfaces", "probes*", "forces*", "sets",
"VTK", "*.foam", "surfaceSampling", "postProcessing",
"*.log", "log.*", "*logs", "*.job", "*.pdf", "*.png"]
if not copy_zero:
default_ignore += ["0"]
if not copy_scripts:
default_ignore += ["*.py", "*.yaml"]
if not copy_polymesh:
default_ignore += ["polyMesh"]
if extra_patterns:
default_ignore += extra_patterns
ignore_func = shutil.ignore_patterns(*default_ignore)
osutils.copy_tree(tmpl_dir, absdir, ignore_func=ignore_func)
_lgr.info("Cloned directory: %s; template directory: %s",
absdir, tmpl_dir)
return absdir
def get_mpi_size(casedir):
"""Determine the number of MPI ranks to run"""
#TODO: Implement decomposeParDict options. How do we handle
#redistributePar?
with osutils.set_work_dir(casedir):
return len(glob.glob("processor*"))
|
"""
Referenced from
https://github.com/chainer/chainer/pull/3351
originally made by @himkt
"""
from chainer import reporter
from chainer.training import util
def greater(current_val, best_val):
return current_val > best_val
def less(current_val, best_val):
return current_val < best_val
class EarlyStoppingTrigger(object):
"""Trigger invoked when specific value continue to be worse.
Args:
monitor (str) : the metric you want to monitor
trigger: Trigger that decides the comparison interval between current
best value and new value. This must be a tuple in the form of
``<int>, 'epoch'`` or ``<int>, 'iteration'`` which is passed to
:class:`~chainer.training.triggers.IntervalTrigger`.
patients (int) : the value to patient
mode (str) : max, min, or auto. using them to determine the _compare
verbose (bool) : flag for debug mode
max_epoch (int) : upper bound of the number of training loops
"""
def __init__(self, trigger=(1, 'epoch'), monitor='main/loss', patients=3,
mode='auto', verbose=False, max_epoch=100, debug=False):
self.count = 0
self.patients = patients
self.monitor = monitor
self.verbose = verbose
self.debug = debug
self.max_epoch = max_epoch
self.already_warning = False
self._interval_trigger = util.get_trigger(trigger)
self.listeners = []
self._init_summary()
if mode == 'max':
self._compare = greater
elif mode == 'min':
self._compare = less
else:
if 'accuracy' in monitor:
self._compare = greater
else:
self._compare = less
if self._compare == greater:
if verbose:
print('early stopping: operator is greater')
self.best = -1 * (1 << 50)
else:
if verbose:
print('early stopping: operator is less')
self.best = 1 << 50
def __call__(self, trainer):
"""Decides whether the training loop should be stopped.
Args:
trainer (~chainer.training.Trainer): Trainer object that this
trigger is associated with. The ``observation`` of this trainer
is used to determine if the trigger should fire.
Returns:
bool: ``True`` if the training loop should be stopped.
"""
observation = trainer.observation
summary = self._summary
if self.monitor in observation:
summary.add({self.monitor: observation[self.monitor]})
if trainer.updater.epoch >= self.max_epoch:
return True
if not self._interval_trigger(trainer):
return False
if self.monitor not in observation.keys():
if not self.already_warning:
print('Warning: {} is not in observation'.format(self.monitor))
self.already_warning = True
return False
stat = self._summary.compute_mean()
current_val = stat[self.monitor]
self._init_summary()
if self.debug:
print('current count: {}'.format(self.count))
print('best: {}, current_val: {}'.format(self.best, current_val))
if self._compare(current_val, self.best):
self.best = current_val
self.count = 0
else:
self.count += 1
if self._stop_condition():
if self.verbose:
if self.max_epoch != trainer.updater.epoch:
print('Epoch {}: early stopping'.format(
trainer.updater.epoch))
# Reset count and return True, go to next early stopping loop...
self.count = 0
for fn in self.listeners:
fn(trainer)
return True
return False
def set_on_condition_listener(self, fn):
assert callable(fn)
self.listeners.append(fn)
def clear_on_condition_listener(self):
self.listeners = []
def _stop_condition(self):
if self.debug:
print('{} >= {}'.format(self.count, self.patients))
return self.count >= self.patients
def _init_summary(self):
self._summary = reporter.DictSummary()
def update_patients(self, patients):
self.patients = patients
|
from django.apps import AppConfig
class NyokaserverConfig(AppConfig):
name = 'nyokaserver'
|
# Find the number of letters "a" or "A" in a string using control flow
counter = 0
string = 'Python is a widely used high-level programming language for general-purpose programming, ' \
'created by Guido van Rossum and first released in 1991. An interpreted language, Python has a design ' \
'philosophy that emphasizes code readability (notably using whitespace indentation to delimit code blocks ' \
'rather than curly brackets or keywords), and a syntax that allows programmers to express concepts in fewer ' \
'lines of code than might be used in languages such as C++ or Java.[23][24] ' \
'The language provides constructs intended to enable writing clear programs on both a small and large scale.'
# Iterate the string character by character
# Compare if character is "a" or "A"
|
from typing import Optional
from dataclasses import dataclass
from .InvoiceDataResult import InvoiceDataResult
from .BasicResponse import BasicResponse
@dataclass
class QueryInvoiceDataResponse(BasicResponse):
"""Response type of the POST /queryInvoiceData REST operation
:param invoice_data_result: Invoice data query result
"""
invoice_data_result: Optional[InvoiceDataResult]
|
#
# Copyright (c) 2022 Airbyte, Inc., all rights reserved.
#
from abc import ABC, abstractmethod
from typing import List
import requests
from airbyte_cdk.sources.declarative.types import Record
class HttpExtractor(ABC):
@abstractmethod
def extract_records(self, response: requests.Response) -> List[Record]:
pass
|
metros = float(input('Digite um valor em metros: '))
centimetros = metros * 100
milimetros = metros * 1000
decimetros = metros * 10
decametro = metros / 10
hectometro = metros / 100
kilometro = metros / 1000
print('A conversão de metro para CM foi {} \n e MM foi {} \n em DM foi {} \n em DAM foi {} \n em HM {} \n e por fim em KM {}'.format(centimetros, milimetros, decimetros, decametro, hectometro, kilometro))
|
from django.contrib import admin
from .models import Contact, Invoice, Item, InvoiceItem, Payment
# Register your models here.
class ContactAdmin(admin.ModelAdmin):
list_display = ("contact_type", 'name','email', 'city', 'pincode')
search_fields = ("contact_type", 'name','email', 'city', 'pincode')
admin.site.register(Contact, ContactAdmin)
class ItemAdmin(admin.ModelAdmin):
list_display = ("item_code", 'title','gst_rate', 'description')
search_fields = ("item_code", 'title','gst_rate' )
admin.site.register(Item, ItemAdmin)
class InvoiceItemInline(admin.TabularInline):
model = InvoiceItem
class Payment(admin.TabularInline):
model = Payment
class InvoiceAdmin(admin.ModelAdmin):
# fieldsets = [
# (None, {'fields': ['invoi']}),
# ('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),
# ]
inlines = [InvoiceItemInline, Payment]
extra = 1
admin.site.register(Invoice, InvoiceAdmin)
|
from machinable import Experiment
class TestView(Experiment):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._state = None
def hello(self):
return "there"
def set_state(self, state):
self._state = state
def get_state(self):
return self._state
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AvroSerialization
from ._models_py3 import AzureDataLakeStoreOutputDataSource
from ._models_py3 import AzureMachineLearningWebServiceFunctionBinding
from ._models_py3 import AzureMachineLearningWebServiceFunctionRetrieveDefaultDefinitionParameters
from ._models_py3 import AzureMachineLearningWebServiceInputColumn
from ._models_py3 import AzureMachineLearningWebServiceInputs
from ._models_py3 import AzureMachineLearningWebServiceOutputColumn
from ._models_py3 import AzureSqlDatabaseDataSourceProperties
from ._models_py3 import AzureSqlDatabaseOutputDataSource
from ._models_py3 import AzureTableOutputDataSource
from ._models_py3 import BlobDataSourceProperties
from ._models_py3 import BlobOutputDataSource
from ._models_py3 import BlobReferenceInputDataSource
from ._models_py3 import BlobStreamInputDataSource
from ._models_py3 import CsvSerialization
from ._models_py3 import DiagnosticCondition
from ._models_py3 import Diagnostics
from ._models_py3 import DocumentDbOutputDataSource
from ._models_py3 import ErrorResponse
from ._models_py3 import EventHubDataSourceProperties
from ._models_py3 import EventHubOutputDataSource
from ._models_py3 import EventHubStreamInputDataSource
from ._models_py3 import Function
from ._models_py3 import FunctionBinding
from ._models_py3 import FunctionInput
from ._models_py3 import FunctionOutput
from ._models_py3 import FunctionProperties
from ._models_py3 import FunctionRetrieveDefaultDefinitionParameters
from ._models_py3 import Input
from ._models_py3 import InputProperties
from ._models_py3 import IoTHubStreamInputDataSource
from ._models_py3 import JavaScriptFunctionBinding
from ._models_py3 import JavaScriptFunctionRetrieveDefaultDefinitionParameters
from ._models_py3 import JsonSerialization
from ._models_py3 import OAuthBasedDataSourceProperties
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import Output
from ._models_py3 import OutputDataSource
from ._models_py3 import PowerBIOutputDataSource
from ._models_py3 import ReferenceInputDataSource
from ._models_py3 import ReferenceInputProperties
from ._models_py3 import Resource
from ._models_py3 import ResourceTestStatus
from ._models_py3 import ScalarFunctionProperties
from ._models_py3 import Serialization
from ._models_py3 import ServiceBusDataSourceProperties
from ._models_py3 import ServiceBusQueueOutputDataSource
from ._models_py3 import ServiceBusTopicOutputDataSource
from ._models_py3 import Sku
from ._models_py3 import StartStreamingJobParameters
from ._models_py3 import StorageAccount
from ._models_py3 import StreamingJob
from ._models_py3 import StreamInputDataSource
from ._models_py3 import StreamInputProperties
from ._models_py3 import SubResource
from ._models_py3 import SubscriptionQuota
from ._models_py3 import SubscriptionQuotasListResult
from ._models_py3 import Transformation
except (SyntaxError, ImportError):
from ._models import AvroSerialization
from ._models import AzureDataLakeStoreOutputDataSource
from ._models import AzureMachineLearningWebServiceFunctionBinding
from ._models import AzureMachineLearningWebServiceFunctionRetrieveDefaultDefinitionParameters
from ._models import AzureMachineLearningWebServiceInputColumn
from ._models import AzureMachineLearningWebServiceInputs
from ._models import AzureMachineLearningWebServiceOutputColumn
from ._models import AzureSqlDatabaseDataSourceProperties
from ._models import AzureSqlDatabaseOutputDataSource
from ._models import AzureTableOutputDataSource
from ._models import BlobDataSourceProperties
from ._models import BlobOutputDataSource
from ._models import BlobReferenceInputDataSource
from ._models import BlobStreamInputDataSource
from ._models import CsvSerialization
from ._models import DiagnosticCondition
from ._models import Diagnostics
from ._models import DocumentDbOutputDataSource
from ._models import ErrorResponse
from ._models import EventHubDataSourceProperties
from ._models import EventHubOutputDataSource
from ._models import EventHubStreamInputDataSource
from ._models import Function
from ._models import FunctionBinding
from ._models import FunctionInput
from ._models import FunctionOutput
from ._models import FunctionProperties
from ._models import FunctionRetrieveDefaultDefinitionParameters
from ._models import Input
from ._models import InputProperties
from ._models import IoTHubStreamInputDataSource
from ._models import JavaScriptFunctionBinding
from ._models import JavaScriptFunctionRetrieveDefaultDefinitionParameters
from ._models import JsonSerialization
from ._models import OAuthBasedDataSourceProperties
from ._models import Operation
from ._models import OperationDisplay
from ._models import Output
from ._models import OutputDataSource
from ._models import PowerBIOutputDataSource
from ._models import ReferenceInputDataSource
from ._models import ReferenceInputProperties
from ._models import Resource
from ._models import ResourceTestStatus
from ._models import ScalarFunctionProperties
from ._models import Serialization
from ._models import ServiceBusDataSourceProperties
from ._models import ServiceBusQueueOutputDataSource
from ._models import ServiceBusTopicOutputDataSource
from ._models import Sku
from ._models import StartStreamingJobParameters
from ._models import StorageAccount
from ._models import StreamingJob
from ._models import StreamInputDataSource
from ._models import StreamInputProperties
from ._models import SubResource
from ._models import SubscriptionQuota
from ._models import SubscriptionQuotasListResult
from ._models import Transformation
from ._paged_models import FunctionPaged
from ._paged_models import InputPaged
from ._paged_models import OperationPaged
from ._paged_models import OutputPaged
from ._paged_models import StreamingJobPaged
from ._stream_analytics_management_client_enums import (
SkuName,
OutputStartMode,
EventsOutOfOrderPolicy,
OutputErrorPolicy,
CompatibilityLevel,
JsonOutputSerializationFormat,
Encoding,
UdfType,
)
__all__ = [
'AvroSerialization',
'AzureDataLakeStoreOutputDataSource',
'AzureMachineLearningWebServiceFunctionBinding',
'AzureMachineLearningWebServiceFunctionRetrieveDefaultDefinitionParameters',
'AzureMachineLearningWebServiceInputColumn',
'AzureMachineLearningWebServiceInputs',
'AzureMachineLearningWebServiceOutputColumn',
'AzureSqlDatabaseDataSourceProperties',
'AzureSqlDatabaseOutputDataSource',
'AzureTableOutputDataSource',
'BlobDataSourceProperties',
'BlobOutputDataSource',
'BlobReferenceInputDataSource',
'BlobStreamInputDataSource',
'CsvSerialization',
'DiagnosticCondition',
'Diagnostics',
'DocumentDbOutputDataSource',
'ErrorResponse',
'EventHubDataSourceProperties',
'EventHubOutputDataSource',
'EventHubStreamInputDataSource',
'Function',
'FunctionBinding',
'FunctionInput',
'FunctionOutput',
'FunctionProperties',
'FunctionRetrieveDefaultDefinitionParameters',
'Input',
'InputProperties',
'IoTHubStreamInputDataSource',
'JavaScriptFunctionBinding',
'JavaScriptFunctionRetrieveDefaultDefinitionParameters',
'JsonSerialization',
'OAuthBasedDataSourceProperties',
'Operation',
'OperationDisplay',
'Output',
'OutputDataSource',
'PowerBIOutputDataSource',
'ReferenceInputDataSource',
'ReferenceInputProperties',
'Resource',
'ResourceTestStatus',
'ScalarFunctionProperties',
'Serialization',
'ServiceBusDataSourceProperties',
'ServiceBusQueueOutputDataSource',
'ServiceBusTopicOutputDataSource',
'Sku',
'StartStreamingJobParameters',
'StorageAccount',
'StreamingJob',
'StreamInputDataSource',
'StreamInputProperties',
'SubResource',
'SubscriptionQuota',
'SubscriptionQuotasListResult',
'Transformation',
'OperationPaged',
'StreamingJobPaged',
'InputPaged',
'OutputPaged',
'FunctionPaged',
'SkuName',
'OutputStartMode',
'EventsOutOfOrderPolicy',
'OutputErrorPolicy',
'CompatibilityLevel',
'JsonOutputSerializationFormat',
'Encoding',
'UdfType',
]
|
from itasca import UDEC_Connection
udec = UDEC_Connection()
udec.connect()
for i in range(10):
print "sending", i
udec.send(i)
value = udec.receive()
print "got", value, "from UDEC"
udec.send(-1)
udec.end()
|
import math
import sys
import itertools
def strange_multiplication_1225():
case_arr = list(map(int, sys.stdin.readline().split()))
case_1_val = 0
case_2_val = 0
for num1 in str(case_arr[0]):
case_1_val += int(num1)
for num2 in str(case_arr[1]):
case_2_val += int(num2)
sys.stdout.write(f"{case_1_val * case_2_val}")
def get_number_1037():
case_num = int(sys.stdin.readline())
tot_arr = list(map(int, sys.stdin.readline().split()))
tot_arr.sort()
small_num = tot_arr[0]
large_num = tot_arr[case_num - 1]
sys.stdout.write(f"{small_num * large_num}")
def get_number_2417():
num = int(sys.stdin.readline())
sys.stdout.write(f"{math.ceil(num ** 0.5)}")
def cal_num_1629():
def recursive_1629(num, n, c):
if n == 1:
return num % c
if n % 2 == 0:
cal_n = recursive_1629(num, n // 2, c)
return cal_n * cal_n % c
else:
cal_n = recursive_1629(num, n // 2, c)
return cal_n * cal_n * num % c
return res
cal_list = list(map(int, sys.stdin.readline().split()))
sys.stdout.write(f"{recursive_1629(cal_list[0], cal_list[1], cal_list[2])}")
def prog_add_num_278():
def solution(n):
answer = 0
for each_num in str(n):
answer += int(each_num)
print(answer)
return answer
def cal_num_13706():
input_num = int(sys.stdin.readline())
sys.stdout.write(f"{math.isqrt(input_num)}")
def cal_num_1292():
input_arr = list(map(int, sys.stdin.readline().split()))
tot_arr = []
ret_num = 0
for i in range(101):
for num in range(0, i):
tot_arr.append(i)
for tot_idx in range(input_arr[0] - 1, input_arr[1]):
ret_num += tot_arr[tot_idx]
sys.stdout.write(f"{ret_num}")
def cal_num_2745():
case_arr = list(map(str, sys.stdin.readline().split()))
sys.stdout.write(f"{int(case_arr[0], int(case_arr[1]))}")
def cal_num_1373():
num = int(sys.stdin.readline().rstrip(), 2)
sys.stdout.write(f"{oct(num)[2:]}")
def cal_num_2420():
num_arr = list(map(int, sys.stdin.readline().split()))
sys.stdout.write(f"{abs(num_arr[0] - num_arr[1])}")
def cal_num_10610():
input_num = sys.stdin.readline().rstrip()
max_num = -1
if "0" not in input_num:
sys.stdout.write(f"{max_num}")
return
else:
tot_num = 0
input_arr = list(input_num)
for each_num in input_arr:
tot_num += int(each_num)
if tot_num % 3 != 0:
sys.stdout.write(f"{max_num}")
return
else:
input_arr.sort(reverse=True)
sys.stdout.write(f"{''.join(input_arr)}")
def math_5988():
tot_num = int(sys.stdin.readline())
for idx in range(tot_num):
case_num = int(sys.stdin.readline())
if case_num % 2 == 0:
sys.stdout.write(f"even\n")
else:
sys.stdout.write(f"odd\n")
def math_9506():
while True:
input_num = int(sys.stdin.readline())
if input_num == -1:
break
cal_num = 0
cal_arr = []
for idx in range(1, int(input_num / 2) + 1):
if input_num % idx == 0:
cal_num += idx
cal_arr.append(str(idx))
if input_num == cal_num:
sys.stdout.write(f"{input_num} = {' + '.join(cal_arr)}\n")
else:
sys.stdout.write(f"{input_num} is NOT perfect.\n")
def math_10822():
input_str = sys.stdin.readline().rstrip()
input_arr = input_str.split(',')
input_arr = map(int, input_arr)
sys.stdout.write(f"{sum(input_arr)}")
def math_1075():
input_num = list(sys.stdin.readline().rstrip())
divide_num = int(sys.stdin.readline())
input_num[len(input_num) - 1] = '0'
input_num[len(input_num) - 2] = '0'
input_num = int("".join(input_num))
for cal_num in range(input_num, input_num + 100, 1):
if cal_num % divide_num == 0:
cal_list = list(str(cal_num))
sys.stdout.write(f"{cal_list[len(cal_list) - 2]}{cal_list[len(cal_list) - 1]}\n")
break
def math_2587():
input_arr = []
tot_num = 0
for idx in range(5):
each_input = int(sys.stdin.readline())
input_arr.append(each_input)
tot_num += each_input
input_arr.sort()
sys.stdout.write(f"{int(tot_num / 5)}\n{input_arr[2]}")
def math_1247():
for idx in range(3):
case_num = int(sys.stdin.readline())
tot_num = 0
for case_idx in range(case_num):
tot_num += int(sys.stdin.readline())
if tot_num > 0:
sys.stdout.write("+\n")
elif tot_num < 0:
sys.stdout.write("-\n")
else:
sys.stdout.write("0\n")
if __name__ == "__main__":
# strange_multiplication_1225()
# get_number_1037()
# get_number_2417()
# cal_num_1629()
# cal_num_13706()
# cal_num_1292()
# cal_num_2745()
# cal_num_1373()
# cal_num_2420()
# cal_num_10610()
# math_5988()
# math_9506()
# math_10822()
# math_1075()
# math_2587()
math_1247()
|
import os
import difflib
from labugr import windows
__all__ = ['ayuda']
def __nombre_funciones():
"""
Devuelve una lista de las funciones en el directorio de trabajo
"""
lista = os.listdir()
for i in range(0, len(lista)):
lista[i] = lista[i].split('-')[0]
return lista
def ayuda(funcion):
"""
Muestra en pantalla la documentación en español de una función.
Carga la documentación desde el directorio doc/funciones.
Parámetros
----------
funcion : string o función
El nombre de la función como string o como objeto en si
Ejemplos
--------
>>> ayuda(arange)
>>> ayuda('arange')
"""
directorio_previo = os.getcwd()
os.chdir(os.path.join(os.path.dirname(__file__),'doc-ESP'))
lista = __nombre_funciones()
#Si la función es pasada como objeto, obtener su nombre como str
if not isinstance(funcion,str):
funcion = funcion.__name__
if funcion in windows.__all__:
funcion = "windows." + funcion
if not funcion in lista:
parecida = difflib.get_close_matches(funcion, lista)
print("""
ERROR: {} no se reconoce como función
La función más parecida es {}""".format(funcion, parecida))
else:
archivo = "{}-es.txt".format(funcion)
with open(archivo) as f:
print (f.read())
os.chdir(directorio_previo)
return |
#!/usr/bin/env python
import sys
import os
import json
import re
from functools import cmp_to_key
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "libs", "pandemia-client-py"))
import pandemia
def get_path(path):
return os.path.join(os.path.dirname(__file__), "..", "..", path)
def ident_4(json_text):
lines = json_text.split("\n")
rv = []
for line in lines:
rv.append(" " + line)
return "\n".join(rv)
def json_print(text):
parsed = json.loads(text)
json_text = json.dumps(parsed, indent=4, sort_keys=False)
return ident_4(json_text)
def pretty_json_str(text):
if not text or text == '':
return ''
try:
parsed = json.loads(text)
json_text = json.dumps(parsed, indent=4, sort_keys=False)
except Exception as _e:
print("Cannot encode json: `%s`" % text)
return json_text
TITLE_EXT_RE = re.compile(r"###(.*?) \[(GET|POST) (.*?)\]")
def load_doc(scope, in_path):
global TITLE_EXT_RE
docs = []
endpoints_path = []
line_num = 0
with open(in_path) as f:
in_title = False
in_group = False
in_api_endpoint = False
in_api_endpoint_parameters = False
in_api_endpoint_request = False
in_api_endpoint_response = False
in_api_endpoint_request_header = False
current_group = ""
lines = f.readlines()
for line in lines:
line_num = line_num + 1
if line.startswith("HOST:"):
docs.append({'elem': 'Host', 'value': line})
elif line.startswith("# "):
docs.append({'elem': 'MainTitle', 'value': line[2:].strip()})
in_title = True
elif in_title and not (line.startswith("#") or line.startswith("+") or line.startswith(" ")):
in_title = not (line.startswith("#") or line.startswith("+") or line.startswith(" "))
if in_title and len(line.strip()) > 0:
if docs[-1]['elem'] != 'MainDesc':
docs.append({'elem': 'MainDesc', 'value': line})
else:
docs[-1]['value'] = (docs[-1]['value'] + '\n' + line).strip()
elif line.startswith("## "):
group_name = line[8:].strip()
docs.append({'elem': 'Group', 'group': group_name, 'title': group_name, 'desc': ""})
current_group = group_name
in_group = True
in_title = False
in_api_endpoint = False
in_api_endpoint_request = False
in_api_endpoint_response = False
in_api_endpoint_request_header = False
elif in_group:
in_group = not line.startswith("## ")
if in_group:
if line.startswith("### "):
in_api_endpoint = True
m = TITLE_EXT_RE.match(line)
title = m.group(1).strip()
method = m.group(2).strip()
path = m.group(3).strip()
if path in endpoints_path:
raise Exception("Endpoint terdeteksi duplikat: `%s` (line %d)" % (path, line_num))
rel_path = path.split('/')[-1]
method_name = title.lower().replace(' ', '_')
endpoints_path.append(path)
docs.append({'elem':"ApiEndpoint",
"group": current_group,
'path': path,
'rel_path': rel_path,
'method': method,
'title': title,
'desc': "",
'method_name': method_name,
'request_param': "",
'request_json': "",
'response_ok': "",
'headers': ""})
in_api_endpoint_parameters = False
in_api_endpoint_request = False
in_api_endpoint_response = False
in_api_endpoint_request_header = False
continue
elif in_api_endpoint:
in_api_endpoint = not line.startswith("##")
if in_api_endpoint:
if line.startswith('+ Request JSON'):
in_api_endpoint_request = True
in_api_endpoint_parameters = False
in_api_endpoint_response = False
in_api_endpoint_request_header = False
continue
elif line.startswith('+ Parameters'):
in_api_endpoint_parameters = True
in_api_endpoint_response = False
in_api_endpoint_request = False
in_api_endpoint_request_header = False
continue
elif line.startswith('+ Response'):
in_api_endpoint_response = True
in_api_endpoint_request = False
in_api_endpoint_parameters = False
in_api_endpoint_request_header = False
continue
elif line.startswith('+ Request plain'):
in_api_endpoint_response = False
in_api_endpoint_request = False
in_api_endpoint_parameters = False
in_api_endpoint_request_header = True
continue
elif in_api_endpoint_parameters:
in_api_endpoint_parameters = not line.startswith("+") and not line.startswith("#")
if in_api_endpoint_parameters:
docs[-1]['request_param'] = (docs[-1]['request_param'] + '\n' + line).strip()
elif in_api_endpoint_request:
in_api_endpoint_request = not line.startswith("+") and not line.startswith("#")
if in_api_endpoint_request:
docs[-1]['request_json'] = (docs[-1]['request_json'] + line).strip()
elif in_api_endpoint_response:
in_api_endpoint_response = not line.startswith("+") and not line.startswith("#")
if in_api_endpoint_response:
docs[-1]['response_ok'] = (docs[-1]['response_ok'] + '\n' + line).strip()
elif in_api_endpoint_request_header:
in_api_endpoint_request_header = not line.startswith("+") and not line.startswith("#")
if in_api_endpoint_request_header:
if not line.strip().startswith("+ Headers"):
docs[-1]['headers'] = (docs[-1]['headers'] + '\n' + line).strip()
else:
docs[-1]['desc'] = (docs[-1]['desc'] + line)
for doc in docs:
if 'desc' in doc:
doc['desc'] = doc['desc'].strip()
return docs
def get_main_title(docs):
a = list(filter(lambda a: a['elem'] == "MainTitle", docs))
if a:
return a[0]['value']
return 'Untitled'
def get_main_desc(docs):
a = list(filter(lambda a: a['elem'] == "MainDesc", docs))
if a:
return a[0]['value']
return 'Rest API documentation'
NO_QUERY_RE = re.compile(r'(.*)\{\?.*\}')
# parse path and return path without query parameter (?+).
def path_no_query(path):
global NO_QUERY_RE
return NO_QUERY_RE.sub(r'\1', path)
def contain(item, docs):
found = False
for doc in docs:
if doc['elem'] == item['elem']:
if doc['elem'] == 'Group':
found = doc['title'] == item['title']
elif doc['elem'] == 'ApiEndpoint':
found = path_no_query(doc['path']) == path_no_query(item['path'])
if found:
return True
return False
def cmp(a, b):
return (a > b) - (a < b)
def merge_doc(orig_docs, other_docs):
for other in other_docs:
if not contain(other, orig_docs):
orig_docs.append(other)
for orig in orig_docs:
for other in other_docs:
if orig['elem'] == other['elem']:
if orig['elem'] == 'ApiEndpoint' and path_no_query(orig['path']) == path_no_query(other['path']):
orig['title'] = other['title']
orig['desc'] = other['desc']
orig['method'] = other['method']
orig['method_name'] = other['method_name']
return
def gen_doc(scope, in_path, out_path):
parsed_docs = load_doc(scope, out_path)
with open(out_path + ".tmp~", "w") as fout:
fout.write("FORMAT: 1A\n\n")
fout.write("# %s\n\n" % get_main_title(parsed_docs))
fout.write("%s\n" % get_main_desc(parsed_docs))
new_docs = []
if not os.path.isfile(in_path):
# print("Source file not exists `%s`, please compile with `cargo build` first" % in_path)
print("Please compile Pandemia with `cargo build` first.")
exit(1)
return
with open(in_path) as f:
lines = f.readlines()
for line in lines:
j = json.loads(line)
new_docs.append(j)
merge_doc(parsed_docs, new_docs)
def sorter(a, b):
if 'group' in a and 'group' in b:
return cmp(a['group'], b['group'])
return 0
updated_docs = sorted(parsed_docs, key=cmp_to_key(sorter))
groups = filter(lambda a: a["elem"] == "Group", updated_docs)
endpoints = sorted(filter(lambda a: a["elem"] == "ApiEndpoint", updated_docs), key=cmp_to_key(lambda a,b: cmp(a['method_name'], b['method_name'])) )
for group in groups:
if group['title'] in EXCLUDED['groups']:
continue
process_line(group, fout)
for endpoint in endpoints:
if endpoint['group'] in EXCLUDED['groups']:
continue
if endpoint['group'] == group['group']:
process_line(endpoint, fout)
os.rename(out_path + '.tmp~', out_path)
BP_PARAM_RE = re.compile(r"\+ (.*?):\s*([0-9]*).*?\-s*(.*)")
HEADER_REQ_RE = re.compile(r"(.*?):\s\"(.*?)\"(\s-\s(.*))?")
def parse_query_params(param_str):
rv = []
for line in param_str.split('\n'):
line = line.strip()
rs = BP_PARAM_RE.match(line)
if rs:
# print(rs.groups())
key = rs.group(1).strip()
value = rs.group(2).strip()
desc = rs.group(3).strip()
rv.append(dict(key=key, value=value, description=desc))
return rv
def parse_request_headers(param):
rv = [
{
"key": "Content-Type",
"value": "application/json"
},
{
"key": "Accept",
"value": "application/json",
"description": "Request JSON"
}
]
if "headers" in param:
param_str = param['headers']
for line in param_str.split("\n"):
line = line.strip()
rs = HEADER_REQ_RE.match(line)
if rs:
key = rs.group(1).strip()
value = rs.group(2).strip()
desc = ""
if rs.group(3):
desc = rs.group(3).strip()
rv.append(dict(key=key, value=value, description=desc))
return rv
def gen_postman(api_scope, input_path, out_path):
parsed_docs = load_doc(api_scope, input_path)
d = {
"info": {
"_postman_id": "cb12386d-1896-449c-93e6-d6da8ff6e800",
"name": "??",
"schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json"
},
"item": []
}
for m in parsed_docs:
if m['elem'] == "MainTitle":
d['info']['name'] = m['value'] + " (" + api_scope + ")"
elif m['elem'] == "Group":
d['item'].append({'name': m['title'], 'item': []})
elif m['elem'] == "ApiEndpoint":
if type(d['item'][-1]['item']) is not list:
raise Exception("prev element not `Group`")
query_params = parse_query_params(m['request_param'])
headers = parse_request_headers(m)
d['item'][-1]['item'].append({
'name': m['title'],
'request': {
'method': m['method'],
'header': headers,
'body': {
'mode': "raw",
'raw': pretty_json_str(m['request_json'])
},
"url": {
"raw": "{{base_url}}/%s" % m['path'],
"host": ["{{base_url}}"],
"path": list(filter(lambda a: len(a.strip()) > 0, m["path"].split("/"))),
"query": query_params
}
},
'response': [
{
'header': [
{
"key": "Content-Type",
"value": "application/json"
}
],
'status': '200 OK',
'code': 200,
'body': pretty_json_str(m['response_ok'])
}
]
})
with open(out_path, "w") as fout:
fout.write(json.dumps(d, indent=4, sort_keys=False))
def process_line(j, fout):
global EXCLUDED
if j["elem"] == "Group":
title = j["title"].strip()
fout.write("## Group %s\n" % title)
if j["desc"] and j["desc"] != "":
fout.write("\n%s\n\n" % j["desc"].strip())
else:
fout.write("\n")
elif j["elem"] == "ApiEndpoint":
if j['path'] in EXCLUDED['endpoints']:
return
title = j['title']
if not title or title == "":
title = j['method_name'].replace('_', ' ').title()
fout.write("### %s [%s %s]\n\n" % (title, j['method'], j['path']))
fout.write("%s\n\n" % j['desc'])
if 'headers' in j and j['headers'].strip() != "":
headers = j['headers']
fout.write("+ Request plain text\n\n")
fout.write(" + Headers\n\n")
fout.write(" %s\n\n" % headers)
if j['request_param'] and j['request_param'] != "":
fout.write("+ Parameters\n\n")
request_param = j['request_param']
fout.write(" %s\n\n" % request_param)
elif j['request_json'] and j['request_json'] != "":
fout.write("+ Request JSON (application/json)\n\n")
try:
fout.write("%s\n\n" % json_print(j['request_json'].strip()))
except Exception as e:
print("e: %s" % e)
raise Exception("Format json tidak valid untuk request API `%s`: `%s`" % (j['path'], j['request_json']))
fout.write("+ Response 200 (application/json)\n\n")
if j['response_ok'] and j['response_ok'] != "":
try:
fout.write("%s\n\n" % json_print(j['response_ok']))
except Exception as e:
raise Exception("Format json tidak valid untuk response API `%s`" % j['path'])
else:
fout.write("%s\n\n" % ident_4("{}"))
import yaml
EXCLUDED = {}
def main():
global EXCLUDED
with open('api-docs/excludes.yaml') as f:
EXCLUDED = yaml.load(f, Loader=yaml.FullLoader)
# print(EXCLUDED)
public_input_path = get_path("api-docs/public-endpoints.raw.txt")
private_input_path = get_path("api-docs/private-endpoints.raw.txt")
public_blp = get_path("api-docs/public-api.md")
private_blp = get_path("api-docs/private-api.md")
gen_doc("public", public_input_path, public_blp)
gen_doc("private", private_input_path, private_blp)
gen_postman("public", public_blp, get_path("target/public-api.postman"))
gen_postman("private", private_blp, get_path("target/private-api.postman"))
if __name__ == "__main__":
main()
|
from datetime import datetime
import random
import sys
# datetime object containing current date and time
now = datetime.now()
dt_string = now.strftime("%Y-%m-%dT%H:%M:%S")
gtws = ['eui-b827ebfffe998292',
'eui-b827ebfffe411ace',
'eui-b827ebfffe13b290',
'eui-b827ebfffe71f386']
ms = [[125300, 125300, 125300, 125300], [125310, 125300, 125290, 125305]]
sensorid = int(sys.argv[1])
line = ''
for i in range(len(gtws)):
# random error
diff = random.randint(1, 50)
# diff = 0
line += gtws[i] + ";" + dt_string + "." + str(ms[sensorid][i] + diff) + "Z&"
print(line)
# print('eui-b827ebfffe998292;2020-02-06T16:03:40.001312Z&eui-b827ebfffed3b23f;&eui-b827ebfffe411ace;2020-02-06T16:03:40.001312Z&eui-b827ebfffe13b290;2020-02-06T16:03:40.001312Z&eui-b827ebfffe71f386;2020-02-06T16:03:40.001312Z') |
import cv2
img = cv2.imread("../lung_gray.png")
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# box filter
res = cv2.boxFilter(img, -1, (3,3))
cv2.imshow("boxfilter", res)
cv2.imwrite("./boxfilter_image.png", res)
# gussian filter
gussian_res = cv2.GaussianBlur(img,(3, 3),1.5)
cv2.imwrite("./gussian_res_image.png", gussian_res)
# median filter non-linear kernel
# median filter eliminates noise at the expense of loss of picture clarity
salt_img = cv2.imread("./salt_noisy.png")
med_res = cv2.medianBlur(salt_img, 3)
cv2.imwrite("./med_res_image.png", med_res)
cv2.waitKey(10000)
cv2.destroyAllWindows() |
""" Provides a CSV interface for sh-style whitespace separated values. """
from contextlib import contextmanager
from subprocess import list2cmdline
from shlex import split as cmdline2list
from .csv2 import csv_open
class WsvValueError(ValueError):
""" Raised when a WSV line cannot be parsed. """
def __init__(self, message, line_num):
self.line_num = line_num
message = "line {0}: {1}".format(line_num, message)
super(WsvValueError, self).__init__(message)
class WsvReader(object):
""" Implements a CsvReader-compatible object on a Whitespace Separated Value stream. """
def __init__(self, in_io, lineterminator=None):
self.in_iter = iter(in_io)
self.lineterminator = lineterminator
self.line_count = 0
def __iter__(self):
return self
def __next__(self):
# python3
return self.next()
def next(self):
""" Get the next item in the iteration. """
# python2
lineterminator = self.lineterminator
line_str = next(self.in_iter)
self.line_count += 1
if lineterminator and line_str.endswith(lineterminator):
line_str = line_str[:-len(lineterminator)]
try:
row = cmdline2list(line_str)
except ValueError as ex:
raise WsvValueError(str(ex), self.line_count)
return row
class WsvWriter(object):
""" Implements a CsvWriter-compatible object on a Whitespace Separated Value stream. """
def __init__(self, out_io, lineterminator=None):
if not lineterminator:
# let output stream translate linefeeds:
lineterminator = "\n"
self.out_io = out_io
self.lineterminator = lineterminator
def writerow(self, row):
""" Write a row to the output stream. """
out_io = self.out_io
line_str = list2cmdline(row)
out_io.write(line_str)
out_io.write(self.lineterminator)
@contextmanager
def open_wsv_reader(
in_io,
file_name,
encoding=None,
errors=None,
lineterminator=None,
buffering=None,
):
""" High-level function to open a CSV reader.
If file_name is None, then in_io will be used.
"""
DEFAULT_BUFFERING = -1
encoding = encoding or 'utf_8_sig'
# 'strict' | 'ignore' | 'replace' | 'backslashreplace'
errors = errors or 'strict'
buffering = buffering if buffering is not None else DEFAULT_BUFFERING
read_text_io_mode = 'r'
in_file_id = file_name
should_close_in_file = True
if not in_file_id:
in_file_id = in_io.fileno()
should_close_in_file = False
in_io = csv_open(
in_file_id,
mode=read_text_io_mode,
encoding=encoding,
errors=errors,
buffering=buffering,
closefd=should_close_in_file,
)
in_file_io = None
if should_close_in_file:
in_file_io = in_io
in_csv = WsvReader(
in_io,
lineterminator=lineterminator,
)
# @contextmanager: enter: yield this object to the with statement:
yield in_csv
# @contextmanager: exit: after the yield close the stream:
if in_file_io:
in_file_io.close()
@contextmanager
def open_wsv_writer(
out_io,
file_name,
encoding=None,
errors=None,
lineterminator=None,
buffering=None,
):
""" Basic function to open a CSV writer with various options.
If file_name is None, then out_io will be used as the underlying stream.
"""
DEFAULT_BUFFERING = -1
encoding = encoding or 'utf_8'
# 'strict' | 'ignore' | 'replace' | 'backslashreplace'
errors = errors or 'strict'
buffering = buffering if buffering is not None else DEFAULT_BUFFERING
write_text_io_mode = 'w'
out_file_id = file_name
should_close_out_file = True
if not out_file_id:
out_file_id = out_io.fileno()
should_close_out_file = False
out_io = csv_open(
out_file_id,
mode=write_text_io_mode,
encoding=encoding,
errors=errors,
buffering=buffering,
closefd=should_close_out_file,
)
out_file_io = None
if should_close_out_file:
out_file_io = out_io
out_csv = WsvWriter(
out_io,
lineterminator=lineterminator,
)
# @contextmanager: enter: yield this object to the with statement:
yield out_csv
# @contextmanager: exit: after the yield close the stream:
if out_file_io:
out_file_io.close()
|
import csv
import nltk.data
import re
from sklearn import feature_extraction
import random
import os
from collections import defaultdict
LABELS = ['agree', 'disagree', 'discuss', 'unrelated']
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
def print_confusion_matrix(cm):
lines = ['CONFUSION MATRIX:']
header = "|{:^11}|{:^11}|{:^11}|{:^11}|{:^11}|".format('', *LABELS)
line_len = len(header)
lines.append("-"*line_len)
lines.append(header)
lines.append("-"*line_len)
hit = 0.0
total = 0
related_hit = 0.0
stance_hit = 0
stance_total = 0.0
for i, row in enumerate(cm):
if i < 3:
related_hit += sum(row[:-1])
stance_total += sum(row)
stance_hit += row[i]
else:
related_hit += row[i]
hit += row[i]
total += sum(row)
lines.append("|{:^11}|{:^11}|{:^11}|{:^11}|{:^11}|".format(LABELS[i],
*row))
lines.append("-"*line_len)
lines.append("ACCURACY: {:.3f}".format(hit / total))
lines.append("ACCURACY-relatedness: {:.3f}".format(related_hit / total))
lines.append("ACCURACY-stance: {:.3f}".format(stance_hit / stance_total))
print('\n'.join(lines))
_wnl = nltk.WordNetLemmatizer()
def normalize_word(w):
return _wnl.lemmatize(w).lower()
def remove_stopwords(l):
# Removes stopwords from a list of tokens
return [w for w in l if w not in feature_extraction.text.ENGLISH_STOP_WORDS]
def get_tokenized_lemmas(s):
return [normalize_word(t) for t in remove_stopwords(nltk.word_tokenize(s))]
def clean(s):
# Cleans a string: Lowercasing, trimming, removing non-alphanumeric
return " ".join(re.findall(r'\w+', s, flags=re.UNICODE)).lower()
def unicode_csv_reader(utf8_data, dialect=csv.excel, **kwargs):
csv_reader = csv.reader(utf8_data, dialect=dialect, **kwargs)
for row in csv_reader:
# yield [unicode(cell, 'utf-8') for cell in row]
yield [str(cell) for cell in row] # NOTE: Changed for python3.
def load_body(filename):
id2body = {}
id2body_sentences = {}
# with open(filename) as fh:
with open(filename, encoding='utf-8', errors='ignore') as fh: # NOTE: Added encoding-'utf-8'
reader = csv.DictReader(fh)
data = list(reader)
for row in data:
id = row['Body ID']
# body = unicode(row['articleBody'], errors='ignore').decode('utf-8').strip()
body = str(row['articleBody']).strip() # NOTE: Changed for python3.
body_sentences = tokenizer.tokenize(body)
clean_body = clean(body)
clean_body = get_tokenized_lemmas(clean_body)
clean_body_sentences = []
for sentence in body_sentences:
clean_sentence = clean(sentence)
clean_sentence = get_tokenized_lemmas(clean_sentence)
clean_body_sentences.append(clean_sentence)
id2body[id] = clean_body
id2body_sentences[id] = clean_body_sentences
return id2body, id2body_sentences
reader = unicode_csv_reader(codecs.open(filename))
for id, body in reader:
body_sentences = tokenizer.tokenize(body)
clean_body = clean(body)
clean_body = get_tokenized_lemmas(clean_body)
clean_body_sentences = []
for sentence in body_sentences:
clean_sentence = clean(sentence)
clean_sentence = get_tokenized_lemmas(clean_sentence)
clean_body_sentences.append(clean_sentence)
id2body[id] = clean_body
id2body_sentences[id] = clean_body_sentences
return id2body, id2body_sentences
def load_title(filename):
data = []
# with open(filename) as fh:
with open(filename, errors='ignore') as fh: # NOTE: Changed for python3.
reader = csv.DictReader(fh)
raw_data = list(reader)
for row in raw_data:
# title = unicode(row['Headline'], errors='ignore').decode('utf-8').strip()
title = str(row['Headline']).strip() # NOTE: Changed for python3.
clean_title = clean(title)
clean_title = get_tokenized_lemmas(clean_title)
id = row['Body ID']
# ignore the stance if there is any
data.append((clean_title, id))
return data
reader = unicode_csv_reader(open(filename))
for row in reader:
title = row[0]
clean_title = clean(title)
clean_title = get_tokenized_lemmas(clean_title)
id = row[1]
# ignore the stance if there is any
data.append((clean_title, id))
return data
def load_stance(filename):
# reader = unicode_csv_reader(open(filename))
reader = unicode_csv_reader(open(filename, errors='ignore')) # NOTE: Changed for python3.
data = []
for title, id, stance in reader:
clean_title = clean(title)
clean_title = get_tokenized_lemmas(clean_title)
data.append((clean_title, id, stance.strip()))
return data
|
from rest_framework.decorators import action
from rest_framework.permissions import IsAdminUser
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from goods.models import SKU, GoodsCategory, SPU
from meiduo_admin.serializers.categorys import CategoriesSerializer
from meiduo_admin.serializers.options import SpuSpecSerializer
from meiduo_admin.serializers.skus import SKUSerializer
class SkusViewSet(ModelViewSet):
"""规格商品的增删改查"""
# 用户登录
permission_classes = [IsAdminUser]
# 1.指定查询集
queryset = SKU.objects.all().order_by('id')
# 2.指定序列化器
serializer_class = SKUSerializer
# 指定router动态生成路由时,提取参数的正则表达式
lookup_value_regex = '\d+'
# 重写获取查询集数据的方法
def get_queryset(self):
keyword = self.request.query_params.get('keyword')
if not keyword:
return self.queryset
else:
return self.queryset.filter(name__contains=self.request.query_params.get('keyword'))
@action(methods=['get'], detail=False)
def categories(self, request):
"""查询三级分类"""
# 查询规格选项商品
# 方式一
# categories = GoodsCategory.objects.filter(subs__id=None)
# 方式二
categories = GoodsCategory.objects.filter(subs__isnull=True)
# 序列化返回数据
ser = CategoriesSerializer(categories, many=True)
# 返回结果
return Response(ser.data)
def specs(self, request, pk):
"""查询规格信息"""
"""
获取spu商品规格信息
:param request:
:param pk: spu表id值
:return:
"""
# 1、查询spu对象
spu = SPU.objects.get(id=pk)
# 2、关联查询spu所关联的规格表
data = spu.specs.all()
# 3、序列化返回规格信息
ser = SpuSpecSerializer(data, many=True)
return Response(ser.data)
|
"""
Simple functions for interacting with coresignal in a progromatic way
"""
# TODO: make these return WH objects
import pandas as pd
import requests
import numpy as np
import os, sys
import json
from pydantic import Json, HttpUrl
from typing import List, Dict, Optional, Any, Union
from WH_Utils.Objects.Company import Company
from WH_Utils.Objects.Prospect import Prospect
from WH_Utils.Objects.Enums import EventType
from datetime import datetime
def get_person_by_id(id_number: int, auth_dict: Dict[str, Any]) -> Any:
"""
This function just fetches a person by the id number from coresignal.
Args
-------
id_number: int
the coresignal id number. Should be aquired from a coresignal query.
auth_dict: auth_dict
the authorization header. Check here for instructions on how to make this
Returns
----------
person_data: dict
the full response from coresignal
"""
url = "https://api.coresignal.com/dbapi/v1/collect/member/{}".format(id_number)
response = requests.get(url, headers=auth_dict)
if response.status_code == 200:
data = json.loads(response.text)
return data
else:
raise ValueError(
"Bad Response Code. Response code: {}".format(response.status_code)
)
def get_person_by_url(linkedin_url: str, auth_dict: Dict[str, Any]) -> Any:
"""Returns the coresignal for a person given the persons linkedin URL
Args
-----
linkedin_url: HttpUrl
the linkedin url of the person you want info on
auth_dict: auth_dict
the authorization header. Check here for instructions on how to make this
Returns
----------
person_data: dict
the full json style respose of the person from coresignal
"""
if linkedin_url.endswith("/"):
linkedin_url = linkedin_url[:-1]
short_hand = linkedin_url.split("/")[-1]
url = "https://api.coresignal.com/dbapi/v1/collect/member/{}".format(short_hand)
response = requests.get(url, headers=auth_dict)
if response.status_code == 200:
data = json.loads(response.text)
return data
else:
raise ValueError(
"Bad Response Code. Response code: {}".format(response.status_code)
)
def find_employees_by_work_history(
company_url: str, auth_dict: Dict[str, Any]
) -> List[int]:
"""
Finds a list of employee coresignal id numbers based on where the employees worked.
Args
------
company_url: HttpUrl
the linkedin_url of the company you want to find past employees of.
auth_dict: auth_dict
the authorization header. Check here for instructions on how to make this
Returns
--------
person_ids: List[int]
list of strings where every item is an id number of someone who worked at the target comapny
"""
url = "https://api.coresignal.com/dbapi/v1/search/member"
data = {"experience_company_linkedin_url": company_url}
response = requests.post(url, headers=auth_dict, json=data)
t = [int(x) for x in response.text[1:-1].split(",")]
return t
def coresingal_to_prospect(
id: Union[int, str],
auth_dict: Dict[str, Any],
event_type: Optional[EventType] = None,
company_id: Optional[str] = None,
) -> Prospect:
"""Build a prospect from an ID or linkedin url
Args:
id: Union[int, str]
coresignal id (int) or linkedin url (str)
auth_dict: Dict[str, Any]
the authorization header. Check here for instructions on how to make this
event_type: Optional[EventType]
if you know the event type for this prospect you can add it here
company_id: Optional[Union[int, str]]
If they are associated with a company event and we know the company, put it here (the WH id)
Returns
--------
prospect: Prospect
the prospect
"""
# TODO: cut down extra data
# TODO: add company id to extra data
if isinstance(id, int):
data = get_person_by_id(id, auth_dict)
elif isinstance(id, str):
data = get_person_by_url(id, auth_dict)
else:
raise ValueError("ID data type is weird")
data_dict = {
"id": None,
"name": data["name"],
"location": data["location"],
"coresignal_id": data["id"],
"linkedin_url": data["url"],
"picture": data["logo_url"],
"event_type": event_type,
"full_data": data,
"analytics": None,
"date_created": None,
"last_modified": None,
}
prospect = Prospect(data_dict=data_dict, event_type=event_type)
return prospect
def coresignal_to_company(id: Union[int, str], auth_dict: Dict[str, str]):
path = "https://api.coresignal.com/dbapi/v1/linkedin/company/collect/{}".format(id)
response = requests.get(path, headers=auth_dict)
assert response.status_code == 200, "Bad Response Code: {}".format(response.text)
response_data = response.json()
data_dict = {
"id": None,
"name": response_data["name"],
"coresignal_id": response_data["id"],
"linkedin_url": response_data["url"],
"industry": response_data["industry"],
"description": response_data["description"],
"location": response_data["headquarters_city"],
"logo": response_data["logo_url"],
"type": response_data["type"],
"website": response_data["website"],
"full_data": response_data,
"created": datetime.now().date(),
"last_modified": datetime.now().date(),
"CIK": "Unknown",
}
return Company(data_dict=data_dict)
|
import sys
import pandas
import networkx as nx
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.types import *
from graphframes import *
sc=SparkContext("local", "degree.py")
sqlContext = SQLContext(sc)
''' return the simple closure of the graph as a graphframe.'''
def simple(g):
# Extract edges and make a data frame of "flipped" edges
# YOUR CODE HERE
# Combine old and new edges. Distinctify to eliminate multi-edges
# Filter to eliminate self-loops.
# A multigraph with loops will be closured to a simple graph
# If we try to undirect an undirected graph, no harm done
# YOUR CODE HERE
''' Return a data frame of the degree distribution of each edge in the provided graphframe '''
def degreedist(g):
# Generate a DF with degree,count
# YOUR CODE HERE
def findDegree(G, ver):
degree = 0
for i in range(G.v):
if G.diri[ver][i] == 1:
degree += 1
if G.diri[ver][ver] == 1:
degree += 1
return degree
''' Read in an edgelist file with lines of the format id1<delim>id2
and return a corresponding graphframe. If "large" we assume
a header row and that delim = " ", otherwise no header and
delim = ","'''
def readFile(filename, large, sqlContext=sqlContext):
lines = sc.textFile(filename)
if large:
delim=" "
# Strip off header row.
lines = lines.mapPartitionsWithIndex(lambda ind,it: iter(list(it)[1:]) if ind==0 else it)
else:
delim=","
# Extract pairs from input file and convert to data frame matching
# schema for graphframe edges.
# YOUR CODE HERE
# Extract all endpoints from input file (hence flatmap) and create
# data frame containing all those node names in schema matching
# graphframe vertices
# YOUR CODE HERE
# Create graphframe g from the vertices and edges.
return g
# main stuff
# If you got a file, yo, I'll parse it.
if len(sys.argv) > 1:
filename = sys.argv[1]
if len(sys.argv) > 2 and sys.argv[2]=='large':
large=True
else:
large=False
print("Processing input file " + filename)
g = readFile(filename, large)
print("Original graph has " + str(g.edges.count()) + " directed edges and " + str(g.vertices.count()) + " vertices.")
g2 = simple(g)
print("Simple graph has " + str(g2.edges.count()/2) + " undirected edges.")
distrib = degreedist(g2)
distrib.show()
nodecount = g2.vertices.count()
print("Graph has " + str(nodecount) + " vertices.")
out = filename.split("/")[-1]
print("Writing distribution to file " + out + ".csv")
distrib.toPandas().to_csv(out + ".csv")
# Otherwise, generate some random graphs.
else:
print("Generating random graphs.")
vschema = StructType([StructField("id", IntegerType())])
eschema = StructType([StructField("src", IntegerType()),StructField("dst", IntegerType())])
gnp1 = nx.gnp_random_graph(100, 0.05, seed=1234)
gnp2 = nx.gnp_random_graph(2000, 0.01, seed=5130303)
gnm1 = nx.gnm_random_graph(100,1000, seed=27695)
gnm2 = nx.gnm_random_graph(1000,100000, seed=9999)
todo = {"gnp1": gnp1, "gnp2": gnp2, "gnm1": gnm1, "gnm2": gnm2}
for gx in todo:
print("Processing graph " + gx)
v = sqlContext.createDataFrame(sc.parallelize(todo[gx].nodes()), vschema)
e = sqlContext.createDataFrame(sc.parallelize(todo[gx].edges()), eschema)
g = simple(GraphFrame(v,e))
distrib = degreedist(g)
print("Writing distribution to file " + gx + ".csv")
distrib.toPandas().to_csv(gx + ".csv")
|
""" This module contains unit tests for utils-module. """
import sys
sys.path.append('../cloudnetpy')
from dataclasses import dataclass
import numpy as np
import numpy.ma as ma
from numpy.testing import assert_array_almost_equal
import pytest
from cloudnetpy import utils
def test_binvec():
""" Unit tests for units.binvec(). """
arg, out = [], []
arg.append([1, 2, 3])
out.append([0.5, 1.5, 2.5, 3.5])
arg.append([0.1, 0.3, 0.5])
out.append([0.0, 0.2, 0.4, 0.6])
arg.append([0.02, 0.04, 0.06])
out.append([0.01, 0.03, 0.05, 0.07])
for arg1, out1, in zip(arg, out):
assert_array_almost_equal(utils.binvec(arg1), out1)
def test_isbit():
""" Unit tests for units.isbit(). """
assert utils.isbit(0, 0) is False
assert utils.isbit(1, 0) is True
assert utils.isbit(2, 0) is False
assert utils.isbit(2, 1) is True
@pytest.mark.parametrize("n, k, res", [
(0, 0, 1),
(3, 0, 3),
(4, 0, 5),
(4, 1, 6),
])
def test_setbit(n, k, res):
""" Unit tests for units.setbit(). """
assert utils.setbit(n, k) == res
def test_seconds2hours():
""" Unit tests for units.seconds2hour_hour(). """
n0 = np.array([1095379200])
assert utils.seconds2hours(n0) == [24]
n1 = np.array([12*60*60])
assert utils.seconds2hours(n0 + n1) == [12]
def test_rebin_2d():
""" Unit tests for units.rebin_2d(). """
x = np.array([1, 2, 2.99, 4, 4.99, 6, 7])
xnew = np.array([2, 4, 6])
data = np.array([range(1, 8), range(1, 8)]).T
data_i = utils.rebin_2d(x, data, xnew)
assert_array_almost_equal(data_i,np.array([[2, 4.5, 6.5],
[2, 4.5, 6.5]]).T)
def test_filter_isolated_pixels():
""" Unit tests for units.filter_isolated_pixels(). """
x = np.array([[0, 0, 1, 1, 1],
[0, 0, 0, 0, 0],
[1, 0, 1, 0, 0],
[0, 0, 0, 0, 1]])
x2 = np.array([[0, 0, 1, 1, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
assert_array_almost_equal(utils.filter_isolated_pixels(x), x2)
def test_ffill():
"""Units tests for utils.ffill()."""
x = np.array([0, 5, 0, 0, 2, 0])
res = np.array([0, 5, 5, 5, 2, 2])
assert_array_almost_equal(utils.ffill(x), res)
x = np.array([[1, 0, 2, 0],
[0, 5, 0, 0]])
res = np.array([[1, 1, 2, 2],
[0, 5, 5, 5]])
assert_array_almost_equal(utils.ffill(x), res)
x = np.array([[5, 1, 1, 6],
[3, 0, 1, 0]])
res = np.array([[5, 5, 5, 6],
[3, 0, 0, 0]])
assert_array_almost_equal(utils.ffill(x, value=1), res)
def test_cumsumr():
"""Unit tests for utils.cumsumr()."""
x = np.array([0, 1, 2, 0, 1, 1])
res = np.array([0, 1, 3, 0, 1, 2])
assert_array_almost_equal(utils.cumsumr(x), res)
x = np.array([[0, 1, 1, 0],
[0, 5, 0, 0]])
res = np.array([[0, 1, 2, 0],
[0, 5, 0, 0]])
assert_array_almost_equal(utils.cumsumr(x, axis=1), res)
x = np.array([[0, 1, 1, 0],
[0, 5, 0, 0]])
res = np.array([[0, 1, 1, 0],
[0, 6, 0, 0]])
assert_array_almost_equal(utils.cumsumr(x, axis=0), res)
x = np.array([[0, 1, 1, 0],
[0, 5, 0, 0]])
res = np.array([[0, 1, 1, 0],
[0, 6, 0, 0]])
assert_array_almost_equal(utils.cumsumr(x), res)
@pytest.mark.parametrize("input, output", [
(np.array([1, 2, 3]), False),
(ma.array([1, 2, 3]), False),
(2, True),
((2.5,), True),
((2.5, 3.5), False),
([3], True),
([3, 4], False),
(np.array(5), True),
(ma.array(5.2), True),
(ma.array([1, 2, 3], mask=True), False),
(ma.array([1, 2, 3], mask=False), False),
([], False),
])
def test_isscalar(input, output):
"""Unit tests for utils.isscalar()."""
assert output == utils.isscalar(input)
@dataclass
class Data:
alt: np.array
units: str
def __getitem__(self, item):
return self.alt
def test_n_elements():
x = np.arange(1, 10)
assert utils.n_elements(x, 5) == 5
assert utils.n_elements(x, 5.4) == 5
assert utils.n_elements(x, 5.5) == 6
x = np.linspace(0, 10, 21)
assert utils.n_elements(x, 3.5) == 7
x = np.linspace(0, 1, 61)
assert utils.n_elements(x, 30, 'time') == 30
x = np.linspace(0, 6, (6*60+1)*2)
assert utils.n_elements(x, 10, 'time') == 20
@pytest.mark.parametrize("input, bases, tops", [
([1, 1, 1, 0, 0, 0, 1, 1, 1], [0, 6], [2, 8]),
([0, 0, 0, 1, 0, 0], [3], [3]),
([0, 0, 0, 1, 1, 1], [3], [5]),
([0, 0, 0, 0, 0, 0], [], []),
([1, 1, 1], [0], [2]),
])
def test_bases_and_tops(input, bases, tops):
assert_array_almost_equal(utils.bases_and_tops(input)[0], bases)
assert_array_almost_equal(utils.bases_and_tops(input)[1], tops)
def test_l2_norm():
"""Unit tests for utils.l2_norm()"""
x1 = np.array([2, 3])
x2 = np.array([3, 4])
assert_array_almost_equal(utils.l2norm(x1, x2), np.sqrt([13, 25]))
x2m = ma.array(x2, mask=True)
assert_array_almost_equal(utils.l2norm(x1, x2m), [2, 3])
x2m = ma.array(x2, mask=[0, 1])
assert_array_almost_equal(utils.l2norm(x1, x2m), [np.sqrt(13), 3])
def test_interp_2d():
"""Unit tests for utils.interp_2d()"""
x = np.array([1, 2, 3])
y = np.array([1, 2, 3])
z = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]])
x_new = np.array([1.5, 2.5])
y_new = np.array([1.5, 2.5])
result = np.array([[1.5, 1.5], [2.5, 2.5]])
assert_array_almost_equal(utils.interpolate_2d(x, y, z, x_new, y_new),
result)
x_new = np.array([1, 2])
y_new = np.array([1, 10])
result = np.array([[1, 1], [2, 2]])
assert_array_almost_equal(utils.interpolate_2d(x, y, z, x_new, y_new),
result)
x = ma.array([1, 2, 3])
y = ma.array([1, 2, 3])
z = ma.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]])
z[1, 1] = ma.masked
x_new = np.array([1.5, 2.5])
y_new = np.array([1.5, 2.5])
result = np.array([[1.5, 1.5], [2.5, 2.5]])
assert_array_almost_equal(utils.interpolate_2d(x, y, z, x_new, y_new),
result)
def test_mdiff():
"""Unit tests for utils.mdiff()."""
assert utils.mdiff(np.array([1, 2, 3])) == 1
assert utils.mdiff(ma.array([1, 2, 3, 4, 5, 6], mask=[0, 1, 0, 1, 0, 0])) == 1
assert utils.mdiff(np.array([1, 2, 10, 11, 12, 13, 14, 16])) == 1
|
"""Scraper for the Vermont Environmental
CourtID: vt
Court Short Name: VT
Court Contact: submit form here https://www.vermontjudiciary.org/website-feedback-form
"""
from . import vt
class Site(vt.Site):
def get_backscrape_max(self):
return 1
def get_division_id(self):
return "2"
|
from hypernets.core.ops import HyperInput
from hypernets.core.callbacks import SummaryCallback
from hypernets.core.search_space import HyperSpace, Choice
from hypernets.searchers.random_searcher import RandomSearcher
from hyperts.hyper_ts import HyperTS
from hyperts.utils import consts, get_tool_box
from hyperts.datasets import load_random_univariate_forecast_dataset
from hyperts.framework.wrappers import SimpleTSEstimator
from hyperts.framework.wrappers.stats_wrappers import ProphetWrapper
class Test_HyperTS():
@classmethod
def search_space_one_trial(cls, timestamp):
fit_kwargs = {'timestamp': timestamp}
space = HyperSpace()
with space.as_default():
input = HyperInput(name='input1')
SimpleTSEstimator(ProphetWrapper, fit_kwargs=fit_kwargs, seasonality_mode=Choice(['additive', 'multiplicative']))(input)
space.set_inputs(input)
return space
def test_hyperts(self):
X, y = load_random_univariate_forecast_dataset(return_X_y=True)
tb = get_tool_box(X)
X_train, X_test, y_train, y_test = tb.temporal_train_test_split(X, y, test_horizon=24)
task = consts.Task_UNIVARIATE_FORECAST
optimize_direction = consts.OptimizeDirection_MINIMIZE
reward_metric = consts.Metric_RMSE
rs = RandomSearcher(lambda : self.search_space_one_trial(timestamp='ds'), optimize_direction=optimize_direction)
ht = HyperTS(rs, reward_metric=reward_metric, task=task, callbacks=[SummaryCallback()])
ht.search(X_train, y_train, X_test, y_test, max_trials=3)
best_trial = ht.get_best_trial()
estimator = ht.final_train(best_trial.space_sample, X_train, y_train)
result = estimator.evaluate(X_test, y_test)
assert result[reward_metric] > 0 |
## Dependencies
import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
%matplotlib inline
import math
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
from FindLaneLine import *
## Global parameters
# Gaussian blur
ksize = 5
# Canny edges
lt = 50
ht = 150
# Hough transform and Draw Lines
rho = 1
theta = np.pi/180
threshold = 10
min_line_len = 1
max_line_gap = 5
# Lane line
linecol = [255,0,0]
linethick = 10
## loop all images in the directory and save to output directory
outpath = ("test_images_output/")
if not os.path.isdir(outpath):
os.makedirs(outpath)
for filename in os.listdir("test_images/"):
if filename.endswith(".jpg"):
im = mpimg.imread(os.path.join("test_images/",filename))
f = PicLanePipeline(im)
#cv2.imwrite(os.path.join(outpath, "outlines_"+ filename), f)
# above the red and blue colors are inverted
cv2.imwrite(os.path.join(outpath, "outlines_"+ filename),
cv2.cvtColor(f, cv2.COLOR_RGB2BGR))
cv2.waitKey(0)
#plt.figure()
#plt.imshow(f)
## Apply the pipeline to videos
outpath = ("test_videos_output/")
if not os.path.isdir(outpath): os.makedirs(outpath)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
clip2 = VideoFileClip("test_videos/solidYellowLeft.mp4")
outclip1 = clip1.fl_image(process_image)
fulloutpath1 = os.path.join(outpath, "out_solidWhiteRight.mp4")
%time outclip1.write_videofile(fulloutpath1, audio=False)
outclip2 = clip2.fl_image(process_image)
fulloutpath2 = os.path.join(outpath, "out_solidYellowLeft.mp4")
%time outclip2.write_videofile(fulloutpath2, audio=False) |
"""
* *******************************************************
* Copyright VMware, Inc. 2018. All Rights Reserved.
* SPDX-License-Identifier: MIT
* *******************************************************
*
* DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
* EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
* WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
* NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""
__author__ = 'VMware, Inc.'
# To create a policy of a different type, import the CreateSpec of the
# corresponding capability.
from com.vmware.vcenter.compute.policies.capabilities.vm_host_affinity_client \
import CreateSpec
from com.vmware.vapi.std_client import DynamicID
from com.vmware.vcenter.vm_client import Power
from com.vmware.vcenter_client import Host
from samples.vsphere.common import sample_cli
from samples.vsphere.common import sample_util
from samples.vsphere.common.ssl_helper import get_unverified_session
from samples.vsphere.vcenter.helper.vm_helper import get_vm
from vmware.vapi.vsphere.client import create_vsphere_client
def attach_tag(client, inv_obj, inv_type, tag):
dyn_id = DynamicID(type=inv_type, id=inv_obj)
try:
client.tagging.TagAssociation.attach(tag.id, dyn_id)
except Exception as e:
print("Check that the tag is associable to {}".format(inv_type))
raise e
class ComputePolicyWorkflow(object):
"""
Demonstrates usage of the compute policy APIs to create a policy of
VM-Host affinity capability and checks the compliance status of the policy
for a particular virtual machine after the virtual machine is powered on.
"""
def __init__(self):
self.policy_id = None
self.vm_id = None
self.vm_info = None
# Create argument parser for standard inputs:
# server, username, password, cleanup and skipverification.
parser = sample_cli.build_arg_parser()
parser.add_argument('-n', '--name', required=True,
help='Name of the policy')
parser.add_argument('-d', '--description', required=False,
help='Description for the policy',
default='Sample policy description')
parser.add_argument('-vn', '--vmname', required=True,
help='Name of the virtual machine')
parser.add_argument('-hn', '--hostname', required=True,
help='Name of the host')
parser.add_argument('-vt', '--vmtag', required=True,
help='Tag name to attach to the virtual machine')
parser.add_argument('-ht', '--hosttag', required=True,
help='Tag name to attach to the host')
# Parse the arguments.
args = sample_util.process_cli_args(parser.parse_args())
self.vm_name = args.vmname
self.vm_tag_name = args.vmtag
self.host_name = args.hostname
self.host_tag_name = args.hosttag
self.policy_name = args.name
self.policy_desc = args.description
self.cleardata = args.cleardata
# Skip server cert verification if needed.
# This is not recommended in production code.
session = get_unverified_session() if args.skipverification else None
# Connect to vSphere client.
self.client = create_vsphere_client(server=args.server,
username=args.username,
password=args.password,
session=session)
def run(self):
# Get the virtual machine and power it off.
self.vm_id = get_vm(self.client, self.vm_name)
self.vm_info = self.client.vcenter.VM.get(self.vm_id)
if not self.vm_info:
raise ValueError("Virtual machine {} not found".format(
self.vm_name))
else:
if self.vm_info.power_state == Power.State.POWERED_ON:
self.client.vcenter.vm.Power.stop(self.vm_id)
elif self.vm_info.power_state == Power.State.SUSPENDED:
self.client.vcenter.vm.Power.start(self.vm_id)
self.client.vcenter.vm.Power.stop(self.vm_id)
# Get the tags.
tags = self.client.tagging.Tag.list()
for tag in tags:
info = self.client.tagging.Tag.get(tag)
if info.name == self.vm_tag_name:
vm_tag = info
if info.name == self.host_tag_name:
host_tag = info
if not vm_tag or not host_tag:
raise ValueError("Provided tag(s) not found")
# Tag the virtual machine and the host.
attach_tag(self.client, self.vm_id, "VirtualMachine", vm_tag)
filter_spec = Host.FilterSpec(names=set([self.host_name]))
all_hosts = self.client.vcenter.Host.list(filter_spec)
if not len(all_hosts) > 0:
raise ValueError("Provided host not found")
host_id = all_hosts[0].host
attach_tag(self.client, host_id, "HostSystem", host_tag)
# Create a vm-host affinity policy.
create_spec = CreateSpec(vm_tag=vm_tag.id, host_tag=host_tag.id,
name=self.policy_name,
description=self.policy_desc)
print("Creating a VM-Host affinity policy")
try:
self.policy_id = self.client.vcenter.compute.\
Policies.create(create_spec)
except Exception as e:
print("Policy creation failed")
raise e
print("Policy created with id: {}".format(self.policy_id))
# Power-on the virtual machine.
print("Powering on {}".format(self.vm_name))
self.client.vcenter.vm.Power.start(self.vm_id)
self.vm_info = self.client.vcenter.VM.get(self.vm_id)
assert self.vm_info.power_state == Power.State.POWERED_ON
# Check the compliance status of the policy on this virtual machine.
status = self.client.vcenter.vm.compute.Policies.get(self.vm_id,
self.policy_id)
print("The compliance status of policy {} for virtual machine "
"{} is {}".format(self.policy_id, self.vm_id, status.status))
def cleanup(self):
'''
Delete the policy and power off the virtual machine.
'''
if self.policy_id is not None:
print("Deleting the policy {}".format(self.policy_id))
self.client.vcenter.compute.Policies.delete(self.policy_id)
if self.vm_info.power_state == Power.State.POWERED_ON:
print("Powering off {}".format(self.vm_name))
self.client.vcenter.vm.Power.stop(self.vm_id)
def main():
cp_workflow = ComputePolicyWorkflow()
cp_workflow.run()
if cp_workflow.cleardata:
cp_workflow.cleanup()
if __name__ == '__main__':
main()
|
"""Label segments with language and speaker by matching original transcript to CTM."""
from __future__ import annotations # for difflib.SequenceMatcher type annotation
import difflib
from collections import deque
from collections import namedtuple
from itertools import islice
from typing import Any
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
import pandas as pd
from aalto_asr_preprocessor import preprocessor
from fi_parliament_tools.parsing.data_structures import EmbeddedStatement
from fi_parliament_tools.parsing.data_structures import Statement
from fi_parliament_tools.parsing.data_structures import Transcript
from fi_parliament_tools.segmentFiltering.IO import KaldiCTMSegmented
from fi_parliament_tools.segmentFiltering.IO import KaldiSegments
from fi_parliament_tools.segmentFiltering.IO import KaldiText
Match = namedtuple("Match", "a b size")
StatementsList = List[Union[Statement, EmbeddedStatement]]
def label_segments(
transcript: Transcript,
ctm: KaldiCTMSegmented,
segments: KaldiSegments,
kalditext: KaldiText,
recipe: Any,
errors: List[str],
) -> Tuple[KaldiSegments, KaldiText]:
"""Assign language and speakers to each segment and corresponding text transcript.
Args:
transcript (Transcript): original transcript
ctm (KaldiCTMSegmented): Kaldi ctm_edits.segmented file
segments (KaldiSegments): Kaldi segments file
kalditext (KaldiText): Kaldi text file
recipe (Any): preprocessing recipe for text
errors (List[str]): description of all encountered errors
Returns:
Tuple[KaldiSegments, KaldiText]: matched
"""
ctm.df = match_ctm_to_transcript(ctm.df, transcript, recipe, errors)
info = parse_segment_info(ctm.df)
segments.df = get_labels(ctm.df, segments.df, info)
kalditext.df[["new_uttid", "lang", "mpid"]] = segments.df[["new_uttid", "lang", "mpid"]]
return segments, kalditext
def match_ctm_to_transcript(
df: pd.DataFrame, transcript: Transcript, recipe: Any, errors: List[str]
) -> pd.DataFrame:
"""Iterate through statements in the transcript and try to find them in the aligned CTM.
Args:
df (pd.DataFrame): alignment CTM
transcript (Transcript): session transcript
recipe (Any): preprocessing recipe for text
errors (List[str]): description of all encountered errors
Returns:
pd.DataFrame: updated CTM
"""
df.attrs["statements"] = df.attrs["failed"] = 0
for sub in transcript.subsections:
for main_statement in sub.statements:
texts = [main_statement.text]
statements: StatementsList = [main_statement]
if main_statement.embedded_statement.text:
texts = list(main_statement.text.partition("#ch_statement"))
texts[1] = main_statement.embedded_statement.text
statements = [main_statement, main_statement.embedded_statement, main_statement]
texts = [
preprocessor.apply(
txt, recipe.REGEXPS, recipe.UNACCEPTED_CHARS, recipe.TRANSLATIONS
)
for txt in texts
if len(txt.strip().split(" ")) > 1
]
df.attrs["statements"] += len(texts)
for txt, stmnt in zip(texts, statements):
try:
df = assign_speaker(df, txt, stmnt)
except (ValueError, RuntimeError) as err:
df.attrs["failed"] += 1
msg = f"Cannot align statement {stmnt} in {df.attrs['session']}: {err}"
errors.append(msg)
return df
def adjust_indices(df: pd.DataFrame, start_idx: int, end_idx: int) -> Tuple[int, int]:
"""Update statement start and end indices if ASR hypothesis does not match transcript.
The Kaldi alignment may have matched the last words of a statement to untranscribed speech
following the statement.
Args:
df (pd.DataFrame): alignment CTM without silences and UNK tokens
start_idx (int): current start point for a statement
end_idx (int): current end point for a statement
Raises:
ValueError: alignment has failed if start and end indices are the same
Returns:
Tuple[int, int]: updated end point for a statement
"""
if start_idx == end_idx:
raise ValueError("Found segment is of length 0.")
cors = df.iloc[start_idx:end_idx].edit.values == "cor"
first_cor_idx: int = cors.argmax()
last_cor_idx: int = cors[::-1].argmax()
if last_cor_idx > 0:
last_cor_idx += 1
return start_idx + first_cor_idx, end_idx - last_cor_idx
def assign_speaker(
df: pd.DataFrame, text: str, statement: Union[Statement, EmbeddedStatement]
) -> pd.DataFrame:
"""Assign speaker to a statement in the aligned CTM.
Args:
df (pd.DataFrame): aligned CTM
text (str): preprocessed statement text
statement (Union[Statement, EmbeddedStatement]): statement object
Returns:
pd.DataFrame: updated CTM
"""
start_idx, end_idx = find_statement(
df[["transcript", "edit"]], text.split(), statement.language
)
if "sv" not in statement.language:
start_idx, end_idx = adjust_indices(df, start_idx, end_idx)
df.loc[start_idx:end_idx, "speaker"] = statement.firstname + " " + statement.lastname
df.loc[start_idx:end_idx, "mpid"] = statement.mp_id
df.loc[start_idx:end_idx, "lang"] = statement.language
return df
def find_statement(
df: pd.DataFrame,
text: List[str],
lang: str,
match_limit: int = 30,
size: int = 10000,
step: int = 7500,
) -> Tuple[int, int]:
"""Find where the statement text starts and ends in the alignment CTM.
Args:
df (pd.DataFrame): alignment CTM
text (List[str]): statement text to find as a list of words
lang (str): handle Swedish and Finnish different
match_limit (int): the number of words to match in search, defaults to 30
size (int): the size of the sliding window, defaults to 10 000
step (int): the step size of the sliding window, defaults to 7500
Raises:
ValueError: no alignment could be found
Returns:
Tuple[int, int]: start and end indices of statement text
"""
masked = df[(df.transcript != "<eps>") & (df.transcript != "<UNK>")]
words_matched = min(len(text), match_limit)
for i, w in enumerate(sliding_window(masked.transcript.values, size=size, step=step)):
start, window = (0, list(w))
diff = difflib.SequenceMatcher(None, window, text[:words_matched])
while start < step:
min_m = min(words_matched, 5)
match = next((m for m in diff.get_matching_blocks() if m.size >= min_m), Match(0, 0, 0))
start += match.a
if match.size <= 0:
break
else:
s = start + i * step
edit_ratios = masked.edit[s : s + match.size].value_counts(normalize=True)
if "sv" in lang or ("cor" in edit_ratios and edit_ratios["cor"] > 0.5):
return masked.index[s], find_end_index(masked.transcript[s:], text)
start += words_matched
diff.set_seq1(window[start:])
raise ValueError("Alignment not found.")
def find_end_index(masked: pd.DataFrame, text: List[str], added_range: int = 100) -> int:
"""Find the last index of the statement text in the transcript column of the alignment CTM.
Args:
masked (pd.DataFrame): silence and unk masked transcript
text (List[str]): statement text as a list of words
added_range (int): search the word up to the added range, defaults to 100
Raises:
ValueError: end index not found
Returns:
int: last index of the statement
"""
search_end = min(len(text) + added_range, len(masked) - 1)
diff = difflib.SequenceMatcher(None, masked.iloc[:search_end].values, text)
ops = diff.get_opcodes()
if ops[-1][0] == "equal":
return int(masked.index[ops[-1][2]])
matches = diff.get_matching_blocks()
if end_idx := next((m.a + m.size for m in matches[::-1] if m.size > 1), 0):
return int(masked.index[end_idx])
raise ValueError("Statement end index not found.")
def sliding_window(
iterable: Iterable[Any], size: int = 10000, step: int = 7500, fillvalue: Optional[Any] = None
) -> Iterator[Any]:
"""Form a deque-based sliding window for iterables with variable size and step.
From: https://stackoverflow.com/a/13408251
Args:
iterable (Iterable[Any]): the target of the sliding window
size (int): size/length of the window, defaults to 10000
step (int): move window forward by step on each iteration, defaults to 7500
fillvalue (Any, optional): padding in the last window (if needed), defaults to None
Raises:
ValueError: invalid size/step parameters
Returns:
None: return nothing when iteration stops
Yields:
Iterator[Any]: current window
"""
if size < 0 or step < 1:
raise ValueError
it = iter(iterable)
q = deque(islice(it, size), maxlen=size)
if not q:
return # empty iterable or size == 0
q.extend(fillvalue for _ in range(size - len(q))) # pad to size
while True:
yield iter(q) # iter() to avoid accidental outside modifications
try:
q.append(next(it))
except StopIteration: # Python 3.5 pep 479 support
return
q.extend(next(it, fillvalue) for _ in range(step - 1))
def parse_segment_info(df: pd.DataFrame) -> pd.DataFrame:
"""Get segment id, start index and end index from the segment info column.
Args:
df (pd.DataFrame): alignment CTM
Returns:
pd.DataFrame: parsed segment info
"""
info = df.segment_info.str.extractall(r"start-segment-(\d+)\[start=(\d+),end=(\d+)").astype(int)
info.rename(columns={0: "seg_num", 1: "seg_start_idx", 2: "seg_end_idx"}, inplace=True)
info["word_id"] = df.word_id[info.index.get_level_values(None)].values
return info
def get_labels(df: pd.DataFrame, segments_df: pd.DataFrame, info: pd.DataFrame) -> pd.DataFrame:
"""Get speaker and language labels for segments and form new utterance ids.
Args:
df (pd.DataFrame): alignment CTM
segments_df (pd.DataFrame): Kaldi segments file
info (pd.DataFrame): contains segment id, segment start and end word indices
Returns:
pd.DataFrame: updated segments file
"""
mask = (df.edit != "sil") & (df.edit != "fix")
segments_df["mpid"] = (
info.apply(lambda x: get_segment_speaker(x, df, mask), axis=1).astype(int).values
)
segments_df["lang"] = (info.apply(lambda x: get_segment_language(x, df, mask), axis=1)).values
segments_df["new_uttid"] = segments_df.apply(
lambda x: form_new_utterance_id(x, df.attrs["session"]), axis=1
)
segments_df.recordid = df.attrs["session"]
return segments_df
def get_segment_speaker(row: pd.Series, main_df: pd.DataFrame, sil_mask: pd.Series) -> int:
"""Determine the speaker id of a segment or return -1 if a segment has more than one speaker.
Args:
row (pd.Series): row in the list of segments
main_df (pd.DataFrame): aligned CTM with speaker assignments
sil_mask (pd.Series): hides sil and UNK tokens
Returns:
int: MP id of the segment speaker or -1 if more than one speaker
"""
shift = row.seg_start_idx - row.word_id
length = row.seg_end_idx - row.seg_start_idx
start = row.name[0] + shift
slice = main_df.mpid.iloc[start : start + length].loc[sil_mask]
mpids: List[int] = slice.unique()
mpids.sort()
if len(mpids) == 2 and 0 in mpids and sum(slice == 0) < 2:
return mpids[1]
elif len(mpids) > 1:
return -1
return mpids[0]
def get_segment_language(row: pd.Series, main_df: pd.DataFrame, sil_mask: pd.Series) -> str:
"""Determine the language of a segment.
The variable row.name corresponds to the index in the main_df where the segment was defined.
Args:
row (pd.Series): row in the segment info DataFrame
main_df (pd.DataFrame): aligned CTM with language assignments
sil_mask (pd.Series): hides sil and UNK tokens
Returns:
str: language label of the segment
"""
shift = row.seg_start_idx - row.word_id
length = row.seg_end_idx - row.seg_start_idx
start = row.name[0] + shift
slice = main_df.lang.iloc[start : start + length].loc[sil_mask]
langs = " ".join(slice.unique())
if "fi" in langs and "sv" in langs:
return "fi+sv"
elif "sv" in langs:
return "sv"
return "fi"
def form_new_utterance_id(row: pd.Series, session: str) -> str:
"""Form a new utterance id based on the row values if the segment has only one speaker.
MP id can take three values:
- `mpid > 0` -> valid MP id
- `mpid == 0` -> speaker matching failed or no speaker
- `mpid == -1` -> multiple speakers
Args:
row (pd.Series): a row from the segments DataFrame
session (str): session id
Returns:
str: new utterance id or empty string if a segment has more than one speaker
"""
if row.mpid > 0:
s = int(row.start * 100)
e = int(row.end * 100)
return f"{row.mpid:05}-{session}-{s:08}-{e:08}"
return ""
|
#--------------------------------------------#
# 该部分代码用于看网络结构
#--------------------------------------------#
import torch
from torchsummary import summary
from nets.deeplabv3_plus import DeepLab
if __name__ == "__main__":
# 需要使用device来指定网络在GPU还是CPU运行
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = DeepLab(num_classes=21, backbone="mobilenet", downsample_factor=16, pretrained=False).to(device)
summary(model, (3,512,512))
|
import numpy as np
from utils import matrix_ref, int_face, notation, swap4, colors, corners
class FaceCube:
def __init__(self):
self.color_matrix = matrix_ref.copy()
def swap(self, to_swap, way):
(self.color_matrix[to_swap[0]], self.color_matrix[to_swap[1]],
self.color_matrix[to_swap[2]], self.color_matrix[to_swap[3]]) = \
swap4(self.color_matrix[to_swap[0]].copy(), self.color_matrix[to_swap[1]].copy(),
self.color_matrix[to_swap[2]].copy(), self.color_matrix[to_swap[3]].copy(),
way)
def turn_face(self, face, way=0):
turn = int_face[face]
# Turn edge facelets in the turning face
self.swap([(turn, 1), (turn, 2), (turn, 3), (turn, 4)], way)
self.swap([(turn, 5), (turn, 6), (turn, 7), (turn, 8)], way)
# UP
if turn == 0:
lateral_edges_to_swap = [(1, 3), (2, 4), (4, 1), (5, 2)]
lateral_corners_to_swap_1 = [(1, 7), (2, 8), (4, 5), (5, 6)]
lateral_corners_to_swap_2 = [(1, 8), (2, 5), (4, 6), (5, 7)]
# FRONT
elif turn == 1:
lateral_edges_to_swap = [(3, 1), (2, 1), (0, 1), (5, 1)]
lateral_corners_to_swap_1 = [(3, 5), (2, 5), (0, 5), (5, 5)]
lateral_corners_to_swap_2 = [(3, 6), (2, 6), (0, 6), (5, 6)]
# RIGHT
elif turn == 2:
lateral_edges_to_swap = [(0, 2), (1, 2), (3, 4), (4, 2)]
lateral_corners_to_swap_1 = [(0, 6), (1, 6), (3, 8), (4, 6)]
lateral_corners_to_swap_2 = [(0, 7), (1, 7), (3, 5), (4, 7)]
# DOWN
elif turn == 3:
lateral_edges_to_swap = [(1, 1), (5, 4), (4, 3), (2, 2)]
lateral_corners_to_swap_1 = [(5, 8), (4, 7), (2, 6), (1, 5)]
lateral_corners_to_swap_2 = [(5, 5), (4, 8), (2, 7), (1, 6)]
# BACK
elif turn == 4:
lateral_edges_to_swap = [(0, 3), (2, 3), (3, 3), (5, 3)]
lateral_corners_to_swap_1 = [(0, 8), (2, 8), (3, 8), (5, 8)]
lateral_corners_to_swap_2 = [(0, 7), (2, 7), (3, 7), (5, 7)]
# LEFT
elif turn == 5:
lateral_edges_to_swap = [(0, 4), (4, 4), (3, 2), (1, 4)]
lateral_corners_to_swap_1 = [(1, 8), (0, 8), (4, 8), (3, 6)]
lateral_corners_to_swap_2 = [(1, 5), (0, 5), (4, 5), (3, 7)]
else:
print("Error: Invalid face to turn")
return
# Turn edge facelets in the lateral faces
self.swap(lateral_edges_to_swap, way)
# Turn corner facelets in the lateral faces part 1
self.swap(lateral_corners_to_swap_1, way)
# Turn corner facelets in the lateral faces part 2
self.swap(lateral_corners_to_swap_2, way)
def is_solved(self):
return np.array_equal(self.color_matrix, matrix_ref)
def n_colors_in_place(self):
return np.sum(self.color_matrix == matrix_ref)
def print(self):
print("rubiks_cube: \n")
print(" {0} {1} {2}".format(self.color_matrix[4, 8], self.color_matrix[4, 3], self.color_matrix[4, 7]))
print(" {0} {1} {2}".format(self.color_matrix[4, 4], self.color_matrix[4, 0], self.color_matrix[4, 2]))
print(" {0} {1} {2}".format(self.color_matrix[4, 5], self.color_matrix[4, 1], self.color_matrix[4, 6]))
print(" -----")
print("{0} {1} {2}|{3} {4} {5}|{6} {7} {8}|{9} {10} {11}"
.format(self.color_matrix[5, 8], self.color_matrix[5, 3], self.color_matrix[5, 7],
self.color_matrix[0, 8], self.color_matrix[0, 3], self.color_matrix[0, 7],
self.color_matrix[2, 8], self.color_matrix[2, 3], self.color_matrix[2, 7],
self.color_matrix[3, 8], self.color_matrix[3, 3], self.color_matrix[3, 7]))
print("{0} {1} {2}|{3} {4} {5}|{6} {7} {8}|{9} {10} {11}"
.format(self.color_matrix[5, 4], self.color_matrix[5, 0], self.color_matrix[5, 2],
self.color_matrix[0, 4], self.color_matrix[0, 0], self.color_matrix[0, 2],
self.color_matrix[2, 4], self.color_matrix[2, 0], self.color_matrix[2, 2],
self.color_matrix[3, 4], self.color_matrix[3, 0], self.color_matrix[3, 2]))
print("{0} {1} {2}|{3} {4} {5}|{6} {7} {8}|{9} {10} {11}"
.format(self.color_matrix[5, 5], self.color_matrix[5, 1], self.color_matrix[5, 6],
self.color_matrix[0, 5], self.color_matrix[0, 1], self.color_matrix[0, 6],
self.color_matrix[2, 5], self.color_matrix[2, 1], self.color_matrix[2, 6],
self.color_matrix[3, 5], self.color_matrix[3, 1], self.color_matrix[3, 6]))
print(" -----")
print(" {0} {1} {2}".format(self.color_matrix[1, 8], self.color_matrix[1, 3], self.color_matrix[1, 7]))
print(" {0} {1} {2}".format(self.color_matrix[1, 4], self.color_matrix[1, 0], self.color_matrix[1, 2]))
print(" {0} {1} {2}".format(self.color_matrix[1, 5], self.color_matrix[1, 1], self.color_matrix[1, 6]))
print()
def scramble(self, scramble):
turns = [notation[turn] for turn in scramble.split()]
for turn in turns:
self.turn_face(turn[0], turn[1])
def assert_valid_matrix(self):
count_colors = {"color_U": 0, "color_F": 0, "color_R": 0, "color_D": 0, "color_B": 0, "color_L": 0}
for face in self.color_matrix:
for facelet in face:
if facelet not in colors:
print("Invalid Matrix")
return
elif facelet == colors[0]:
count_colors["color_U"] += 1
elif facelet == colors[1]:
count_colors["color_F"] += 1
elif facelet == colors[2]:
count_colors["color_R"] += 1
elif facelet == colors[3]:
count_colors["color_D"] += 1
elif facelet == colors[4]:
count_colors["color_B"] += 1
elif facelet == colors[5]:
count_colors["color_L"] += 1
for counted in count_colors.values():
if counted != 9:
print("Invalid Matrix")
return
def get_color_matrix(self):
return self.color_matrix.copy()
def set_color_matrix(self, matrix):
self.color_matrix = matrix
def get_binary_array(self, one_hot=False):
if one_hot:
binary_array = np.zeros(288).astype(int)
for i in range(6):
for j in range(1, 9):
if self.color_matrix[i][j] == colors[0]:
binary_array[(j - 1) * 6 + i * 48] = 1
elif self.color_matrix[i][j] == colors[1]:
binary_array[(j - 1) * 6 + 1 + i * 48] = 1
elif self.color_matrix[i][j] == colors[2]:
binary_array[(j - 1) * 6 + 2 + i * 48] = 1
elif self.color_matrix[i][j] == colors[3]:
binary_array[(j - 1) * 6 + 3 + i * 48] = 1
elif self.color_matrix[i][j] == colors[4]:
binary_array[(j - 1) * 6 + 4 + i * 48] = 1
elif self.color_matrix[i][j] == colors[5]:
binary_array[(j - 1) * 6 + 5 + i * 48] = 1
else:
binary_array = np.zeros(144).astype(int)
for i in range(6):
for j in range(1, 9):
if self.color_matrix[i][j] == colors[1]:
binary_array[(j - 1) * 3 + 1 + i * 24] = 1
elif self.color_matrix[i][j] == colors[2]:
binary_array[(j - 1) * 3 + 1 + i * 24] = 1
binary_array[(j - 1) * 3 + 2 + i * 24] = 1
elif self.color_matrix[i][j] == colors[3]:
binary_array[(j - 1) * 3 + i * 24] = 1
elif self.color_matrix[i][j] == colors[4]:
binary_array[(j - 1) * 3 + i * 24] = 1
binary_array[(j - 1) * 3 + 2 + i * 24] = 1
elif self.color_matrix[i][j] == colors[5]:
binary_array[(j - 1) * 3 + i * 24] = 1
binary_array[(j - 1) * 3 + 1 + i * 24] = 1
return binary_array
def set_corner(self, facelets, piece, position):
type1_corners = [corners[0], corners[2], corners[5], corners[7]]
type1_positions = [0, 2, 5, 7]
if (piece[0] in type1_corners and position in type1_positions) or \
(piece[0] not in type1_corners and position not in type1_positions):
if piece[1] == 0:
self.color_matrix[facelets[0]] = piece[0][0]
self.color_matrix[facelets[1]] = piece[0][1]
self.color_matrix[facelets[2]] = piece[0][2]
elif piece[1] == 1:
self.color_matrix[facelets[1]] = piece[0][0]
self.color_matrix[facelets[2]] = piece[0][1]
self.color_matrix[facelets[0]] = piece[0][2]
else:
self.color_matrix[facelets[2]] = piece[0][0]
self.color_matrix[facelets[0]] = piece[0][1]
self.color_matrix[facelets[1]] = piece[0][2]
else:
if piece[1] == 0:
self.color_matrix[facelets[0]] = piece[0][0]
self.color_matrix[facelets[2]] = piece[0][1]
self.color_matrix[facelets[1]] = piece[0][2]
elif piece[1] == 1:
self.color_matrix[facelets[1]] = piece[0][0]
self.color_matrix[facelets[0]] = piece[0][1]
self.color_matrix[facelets[2]] = piece[0][2]
else:
self.color_matrix[facelets[2]] = piece[0][0]
self.color_matrix[facelets[1]] = piece[0][1]
self.color_matrix[facelets[0]] = piece[0][2]
def set_edge(self, facelets, piece):
if piece[1] == 0:
self.color_matrix[facelets[0]] = piece[0][0]
self.color_matrix[facelets[1]] = piece[0][1]
else:
self.color_matrix[facelets[0]] = piece[0][1]
self.color_matrix[facelets[1]] = piece[0][0]
def set_matrix_from_pieces(self, pieces):
# Set from edges
self.set_edge([(0, 1), (1, 3)], pieces[0]) # UF
self.set_edge([(0, 2), (2, 4)], pieces[1]) # UR
self.set_edge([(0, 3), (4, 1)], pieces[2]) # UB
self.set_edge([(0, 4), (5, 2)], pieces[3]) # UL
self.set_edge([(1, 4), (5, 1)], pieces[4]) # FL
self.set_edge([(1, 2), (2, 1)], pieces[5]) # FR
self.set_edge([(4, 2), (2, 3)], pieces[6]) # BR
self.set_edge([(4, 4), (5, 3)], pieces[7]) # BL
self.set_edge([(3, 1), (1, 1)], pieces[8]) # DF
self.set_edge([(3, 4), (2, 2)], pieces[9]) # DR
self.set_edge([(3, 3), (4, 3)], pieces[10]) # DB
self.set_edge([(3, 2), (5, 4)], pieces[11]) # DL
# Set from corners
self.set_corner([(0, 5), (1, 8), (5, 6)], pieces[12], 0) # UFL
self.set_corner([(0, 6), (1, 7), (2, 5)], pieces[13], 1) # UFR
self.set_corner([(0, 7), (4, 6), (2, 8)], pieces[14], 2) # UBR
self.set_corner([(0, 8), (4, 5), (5, 7)], pieces[15], 3) # UBL
self.set_corner([(3, 6), (1, 5), (5, 5)], pieces[16], 4) # DFL
self.set_corner([(3, 5), (1, 6), (2, 6)], pieces[17], 5) # DFR
self.set_corner([(3, 8), (4, 7), (2, 7)], pieces[18], 6) # DBR
self.set_corner([(3, 7), (4, 8), (5, 8)], pieces[19], 7) # DBL
|
from abc import ABC, abstractmethod
class MessageConverter(ABC):
"""
Abstract class, which declares methods for converting just single message for specific format.
All important operations of data conversion should be done in this class.
"""
@abstractmethod
def modify_data(self, input_message):
"""
Takes data from input_message structure and converts it into data structure of output format
:param input_message: Parsed message of specific format, for example in JSON it will be probably dictionary,
in XML ElementTree root, but it depends on parsers.
If input_message is None, method takes input_message from class attribute
:return: data in output data structure for exporting
"""
pass
|
# pip install fbchat
from fbchat import log
from fbchat import Client
class Bot(Client):
def onMessage(self, author_id, message_object, thread_id, thread_type, **kwargs):
self.markAsDelivered(thread_id, message_object.uid)
self.markAsRead(thread_id)
log.info("{} from {} in {}".format(message_object, thread_id, thread_type.name))
#Test for self author condition
if author_id != self.uid:
self.send(message_object, thread_id=thread_id, thread_type=thread_type)
client = Bot('<E-mail ID>', '<password>')
client.listen()
#End task to stop the bot
|
import os
import codecs
import warnings
import chardet
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def _get_encoding(b):
return chardet.detect(b)['encoding']
def _get_encoding_from_file(path):
with open(path, 'rb') as f:
return _get_encoding(f.read())
def auto_open(path):
encoding = _get_encoding_from_file(path)
try:
return codecs.open(path, 'rU', encoding)
except UnicodeDecodeError:
pass
try:
return codecs.open(path, 'r')
except UnicodeDecodeError:
warnings.warn("couldn't decode file. return empty file:\n{}".format(str(e)))
return open(os.path.join(BASE_DIR, 'blank.txt'), 'r')
def auto_read(path):
default_codecs = ["EUC-KR", "ISO-8859-9", "CP949"] # Korean stuff
try:
with codecs.open(path, 'r') as f:
return f.read()
except UnicodeDecodeError:
pass
for codec in default_codecs:
try:
with codecs.open(path, 'rU', codec) as f:
return f.read()
except UnicodeDecodeError:
pass
return None
def auto_decode(b):
if len(b) == 0:
return ''
encoding = _get_encoding(b)
if encoding:
try:
return b.decode(encoding)
except UnicodeDecodeError:
pass
try:
return b.decode()
except UnicodeDecodeError:
warnings.warn("couldn't decode string. return empty string:\n{}".format(str(e)))
return ''
|
# Author : Achintya Gupta
from .swtlocalizer import SWTLocalizer
__version__ = "1.1.1"
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/flyss/myData/myCode/pythonWorkSpace/pyDownloader/pyResourceGet/uiDefines/SpiderDebugWindow.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_SpiderDebugWindow(object):
def setupUi(self, SpiderDebugWindow):
SpiderDebugWindow.setObjectName("SpiderDebugWindow")
SpiderDebugWindow.resize(893, 600)
self.centralwidget = QtWidgets.QWidget(SpiderDebugWindow)
self.centralwidget.setObjectName("centralwidget")
self.groupBox = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox.setGeometry(QtCore.QRect(10, 10, 871, 151))
self.groupBox.setObjectName("groupBox")
self.txtUrl = QtWidgets.QLineEdit(self.groupBox)
self.txtUrl.setGeometry(QtCore.QRect(10, 30, 851, 27))
self.txtUrl.setObjectName("txtUrl")
self.txtXPathText = QtWidgets.QTextEdit(self.groupBox)
self.txtXPathText.setGeometry(QtCore.QRect(10, 60, 771, 81))
self.txtXPathText.setObjectName("txtXPathText")
self.btnTest = QtWidgets.QPushButton(self.groupBox)
self.btnTest.setGeometry(QtCore.QRect(790, 60, 71, 81))
self.btnTest.setObjectName("btnTest")
self.txtLogs = QtWidgets.QTextEdit(self.centralwidget)
self.txtLogs.setGeometry(QtCore.QRect(10, 170, 871, 411))
self.txtLogs.setReadOnly(True)
self.txtLogs.setObjectName("txtLogs")
SpiderDebugWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(SpiderDebugWindow)
QtCore.QMetaObject.connectSlotsByName(SpiderDebugWindow)
def retranslateUi(self, SpiderDebugWindow):
_translate = QtCore.QCoreApplication.translate
SpiderDebugWindow.setWindowTitle(_translate("SpiderDebugWindow", "XPath调试"))
self.groupBox.setTitle(_translate("SpiderDebugWindow", "工具栏"))
self.txtUrl.setText(_translate("SpiderDebugWindow", "http://***"))
self.txtXPathText.setHtml(_translate("SpiderDebugWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Noto Sans CJK SC\'; font-size:10.5pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'文泉驿等宽微米黑\'; font-size:11pt;\">//div[contains(@name,""]***************</span></p></body></html>"))
self.btnTest.setText(_translate("SpiderDebugWindow", "测试"))
|
# -*- coding: utf-8 -*-
class ActionInterrupted(Exception):
"""
This exception can be raised by a :ref:`adminaction_requested` or :ref:`adminaction_start`
to prevent action to be executed
"""
class FakeTransaction(Exception):
pass
|
"""
Module: verification_stats_grid.py
Purpose: Process verification statistics generated by LMR_verify_grid.py on
various reconstruction experiments and produce a summary for comparison.
Originator: Greg Hakim | Dept. of Atmospheric Sciences, Univ. of Washington
| January 2017
Revisions:
***... work in progress ...***
Requires copying the .pckl files from all chosen experiment
directories and place them in the current directory.
Lots of parameters the user needs to modify in the main part of the code
depending on the summary to produce.
"""
#import numpy as np
import pickle
import matplotlib
# need to do this when running remotely, and to suppress figures
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# ==============================================================================
# make simple plots (some bits have to be hard wired, so use with care)
#iplot = True
iplot = False
# list with pickle files containing results;
# these are strings corresponding to experiment names and nb of MC
# iterations. These strings should correspond to the root of the names of .pckl
# files containing summary verification stats generated with LMR_verify_grid.py:
# <<string>>_iters_grid_verification.pckl
# ---------------------------------------------------------------------
#expts = [
# 'production_mlost_ccsm4_pagesall_0.75_101',
# 'production_gis_ccsm4_pagesall_0.75_101',
# 'production_cru_ccsm4_pagesall_0.75_101',
# 'production_mlost_era20c_pagesall_0.75_101',
# ]
expts = [
# 'pages2_loc1000_11',
# 'pages2_loc5000_11',
# 'pages2_loc10000_11',
# 'pages2_loc12000_11',
# 'pages2_loc15000_11',
# 'pages2_loc20000_11',
# 'pages2_noloc_11',
# 'pages2_loc12000_breit_11',
# 'pages2_loc12000_pages2k2_seasonal_TorP_11',
# 'pages2_loc15000_pages2k2_seasonal_TorP_nens200_inflate1.25_10',
# 'pages2_loc20000_seasonal_bilinear_nens200_11',
# 'pages2_loc25000_seasonal_bilinear_nens200_11',
# 'pages2_loc25000_seasonal_bilinear_nens200_75pct_11',
# 'pages2_loc25000_seasonal_bilinear_nens200_meta_11'
'test_py3_2',
'test_py3b_2'
]
# use a metadata keyword to filter results for quick assessment
#keyword = 'GIS'
#keyword = 'MLOST'
#keyword = 'correlation'
#keyword = 'efficiency'
#keyword = 'LMR_GIS coefficient of efficiency'
# ==============================================================================
def print_verification_stats(expts, keyword):
dsave = []
for ex in expts:
exfile = ex+'_iters_grid_verification.pckl'
print('---------------------------------------------------------------------')
print('working on file: ',exfile)
infile = open(exfile,'rb')
ddict = pickle.load(infile)
# NOTE: This line obliterates any variables in global scope with same name as ddict key
# load keys into variables by the key name
# globals().update(ddict)
stat_metadata = ddict['stat_metadata']
# list out keys conditionally
for key in list(ddict.keys()):
#print key,ddict[key],stat_metadata[key]
if keyword in stat_metadata[key]:
print(key,ddict[key],stat_metadata[key])
# save data
dsave.append(ddict[key])
return dsave
# ================================= MAIN =======================================
#
print('----------------------------------')
print('experiments:')
print(expts)
print('----------------------------------')
keyword1 = 'LMR_BE coefficient of efficiency global mean'
print('\nresults for all experiments for metric: ' + keyword1)
dsave1 = print_verification_stats(expts, keyword1)
print(dsave1)
keyword2 = 'LMR_GIS coefficient of efficiency global mean'
print('\nresults for all experiments for metric: ' + keyword2)
dsave2 = print_verification_stats(expts, keyword2)
print(dsave2)
keyword3 = 'GIS time-mean spatial anomaly correlation'
print('\nresults for all experiments for metric: ' + keyword3)
dsave3 = print_verification_stats(expts, keyword3)
print(dsave3)
# make a simple plot
# ------------------
#pvar = dsave1; keyw = keyword1
pvar = dsave2; keyw = keyword2
#pvar = dsave3; keyw = keyword3
if iplot:
# xvals=['1000','5000','10000','12000','15000','20000','40000']
# xlabs=['1000','5000','10000','12000','15000','20000','no loc']
xvals=['exp1','exp2']
xlabs=['exp1','exp2']
plt.plot(xvals,pvar,'ko')
plt.plot(xvals,pvar,'k-')
plt.xlabel('localization distance (km)')
plt.ylabel(keyw)
plt.title('grid verification ' + keyw)
figname = keyw.replace(' ','_')+'_grid.png'
print('saving to '+ figname)
plt.savefig(figname)
|
from django.contrib import admin
from .models import Perreria
# Register your models here.
admin.site.register(Perreria) |
import pytest
from bioconda_utils import bioconductor_skeleton
from bioconda_utils import cran_skeleton
from bioconda_utils import utils
config = {
'channels': ['conda-forge', 'bioconda', 'defaults']
}
def test_cran_write_recipe(tmpdir):
cran_skeleton.write_recipe('locfit', recipe_dir=str(tmpdir), recursive=False)
assert tmpdir.join('r-locfit', 'meta.yaml').exists()
assert tmpdir.join('r-locfit', 'build.sh').exists()
assert tmpdir.join('r-locfit', 'bld.bat').exists()
def test_cran_write_recipe_no_windows(tmpdir):
cran_skeleton.write_recipe('locfit', recipe_dir=str(tmpdir), recursive=False, no_windows=True)
assert tmpdir.join('r-locfit', 'meta.yaml').exists()
assert tmpdir.join('r-locfit', 'build.sh').exists()
assert not tmpdir.join('r-locfit', 'bld.bat').exists()
for line in tmpdir.join('r-locfit', 'meta.yaml').readlines():
if 'skip: True' in line:
assert '[win]' in line
def test_bioc_write_recipe_skip_in_condaforge(tmpdir):
bioconductor_skeleton.write_recipe(
'edgeR', recipe_dir=str(tmpdir), config=config, recursive=True,
skip_if_in_channels=['conda-forge'])
for pkg in [
'bioconductor-edger', 'bioconductor-limma',
]:
assert tmpdir.join(pkg).exists()
for pkg in ['r-cpp', 'r-lattice', 'r-locfit']:
assert not tmpdir.join(pkg).exists()
def test_bioc_write_recipe_no_skipping(tmpdir):
bioconductor_skeleton.write_recipe(
'edgeR', recipe_dir=str(tmpdir), config=config, recursive=True,
skip_if_in_channels=None)
for pkg in [
'bioconductor-edger', 'bioconductor-limma', 'r-rcpp'
# sometime locfit and lattice don't build correctly, but we should
# eventually ensure they are here as well.
# 'r-locfit',
# 'r-lattice',
]:
assert tmpdir.join(pkg).exists()
def test_meta_contents(tmpdir):
config = {
'channels': ['conda-forge', 'bioconda', 'defaults']
}
bioconductor_skeleton.write_recipe(
'edgeR', recipe_dir=str(tmpdir), config=config, recursive=False)
edger_meta = utils.load_first_metadata(str(tmpdir.join('bioconductor-edger'))).meta
assert 'r-rcpp' in edger_meta['requirements']['run']
# The rendered meta has {{ compiler('c') }} filled in, so we need to check
# for one of those filled-in values.
names = [i.split()[0] for i in edger_meta['requirements']['build']]
assert 'toolchain' in names
# bioconductor, bioarchive, and cargoport
assert len(edger_meta['source']['url']) == 3
def test_find_best_bioc_version():
assert bioconductor_skeleton.find_best_bioc_version('DESeq2', '1.14.1') == '3.4'
# Non-existent version:
with pytest.raises(bioconductor_skeleton.PackageNotFoundError):
bioconductor_skeleton.find_best_bioc_version('DESeq2', '5000')
# Version existed at some point in the past, but only exists now on
# bioaRchive:
with pytest.raises(bioconductor_skeleton.PackageNotFoundError):
bioconductor_skeleton.BioCProjectPage('BioBase', pkg_version='2.37.2')
def test_pkg_version():
# version specified, but not bioc version
b = bioconductor_skeleton.BioCProjectPage('DESeq2', pkg_version='1.14.1')
assert b.version == '1.14.1'
assert b.bioc_version == '3.4'
assert b.bioconductor_tarball_url == (
'https://bioconductor.org/packages/3.4/bioc/src/contrib/DESeq2_1.14.1.tar.gz')
assert b.bioarchive_url is None
assert b.cargoport_url == (
'https://depot.galaxyproject.org/software/bioconductor-deseq2/bioconductor-deseq2_1.14.1_src_all.tar.gz') # noqa: E501: line too long
# bioc version specified, but not package version
b = bioconductor_skeleton.BioCProjectPage('edgeR', bioc_version='3.5')
assert b.version == '3.18.1'
assert b.bioc_version == '3.5'
assert b.bioconductor_tarball_url == (
'https://bioconductor.org/packages/3.5/bioc/src/contrib/edgeR_3.18.1.tar.gz')
assert b.bioarchive_url is None
assert b.cargoport_url == (
'https://depot.galaxyproject.org/software/bioconductor-edger/bioconductor-edger_3.18.1_src_all.tar.gz') # noqa: E501: line too long
def test_bioarchive_exists_but_not_bioconductor():
"""
BioCProjectPage init tries to find the package on the bioconductor site.
Sometimes bioaRchive has cached the tarball but it no longer exists on the
bioconductor site. In those cases, we're raising a PackageNotFoundError.
It's possible to build a recipe based on a package only found in
bioarchive, but I'm not sure we want to support that in an automated
fashion. In those cases it would be best to build the recipe manually.
"""
with pytest.raises(bioconductor_skeleton.PackageNotFoundError):
bioconductor_skeleton.BioCProjectPage('BioBase', pkg_version='2.37.2')
def test_bioarchive_exists():
# package found on both bioconductor and bioarchive.
b = bioconductor_skeleton.BioCProjectPage('DESeq', pkg_version='1.26.0')
assert b.bioarchive_url == 'https://bioarchive.galaxyproject.org/DESeq_1.26.0.tar.gz'
def test_annotation_data(tmpdir):
bioconductor_skeleton.write_recipe('AHCytoBands', str(tmpdir), config, recursive=True)
meta = utils.load_first_metadata(str(tmpdir.join('bioconductor-ahcytobands'))).meta
assert 'wget' in meta['requirements']['run']
assert len(meta['source']['url']) == 3
assert not tmpdir.join('bioconductor-ahcytobands', 'build.sh').exists()
assert tmpdir.join('bioconductor-ahcytobands', 'post-link.sh').exists()
assert tmpdir.join('bioconductor-ahcytobands', 'pre-unlink.sh').exists()
def test_experiment_data(tmpdir):
bioconductor_skeleton.write_recipe('affydata', str(tmpdir), config, recursive=True)
meta = utils.load_first_metadata(str(tmpdir.join('bioconductor-affydata'))).meta
assert 'wget' in meta['requirements']['run']
assert len(meta['source']['url']) == 3
assert not tmpdir.join('bioconductor-affydata', 'build.sh').exists()
assert tmpdir.join('bioconductor-affydata', 'post-link.sh').exists()
assert tmpdir.join('bioconductor-affydata', 'pre-unlink.sh').exists()
def test_nonexistent_pkg(tmpdir):
# no such package exists in the current bioconductor
with pytest.raises(bioconductor_skeleton.PackageNotFoundError):
bioconductor_skeleton.write_recipe(
'nonexistent', str(tmpdir), config, recursive=True)
# package exists, but not this version
with pytest.raises(bioconductor_skeleton.PackageNotFoundError):
bioconductor_skeleton.write_recipe(
'DESeq', str(tmpdir), config, recursive=True, pkg_version='5000')
def test_overwrite(tmpdir):
bioconductor_skeleton.write_recipe(
'edgeR', recipe_dir=str(tmpdir), config=config, recursive=False)
# Same thing with force=False returns ValueError
with pytest.raises(ValueError):
bioconductor_skeleton.write_recipe(
'edgeR', recipe_dir=str(tmpdir), config=config, recursive=False)
# But same thing with force=True is OK
bioconductor_skeleton.write_recipe(
'edgeR', recipe_dir=str(tmpdir), config=config, recursive=False, force=True)
|
# Auto-generated at 2021-09-27T17:12:31.439417+08:00
# from: Justice Iam Service (4.1.0)
# Copyright (c) 2018 - 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
from ..models.account_user_active_ban_response_v4 import AccountUserActiveBanResponseV4
from ..models.account_user_permissions_response_v4 import AccountUserPermissionsResponseV4
class AccountUserResponseV4(Model):
"""Account user response V4
Properties:
auth_type: (authType) REQUIRED str
bans: (bans) REQUIRED List[AccountUserActiveBanResponseV4]
country: (country) REQUIRED str
created_at: (createdAt) REQUIRED str
date_of_birth: (dateOfBirth) REQUIRED str
deletion_status: (deletionStatus) REQUIRED bool
display_name: (displayName) REQUIRED str
email_address: (emailAddress) REQUIRED str
email_verified: (emailVerified) REQUIRED bool
enabled: (enabled) REQUIRED bool
last_date_of_birth_changed_time: (lastDateOfBirthChangedTime) REQUIRED str
last_enabled_changed_time: (lastEnabledChangedTime) REQUIRED str
namespace: (namespace) REQUIRED str
new_email_address: (newEmailAddress) OPTIONAL str
old_email_address: (oldEmailAddress) REQUIRED str
permissions: (permissions) REQUIRED List[AccountUserPermissionsResponseV4]
phone_number: (phoneNumber) OPTIONAL str
phone_verified: (phoneVerified) REQUIRED bool
platform_id: (platformId) OPTIONAL str
platform_user_id: (platformUserId) OPTIONAL str
roles: (roles) REQUIRED List[str]
user_id: (userId) REQUIRED str
username: (username) OPTIONAL str
"""
# region fields
auth_type: str # REQUIRED
bans: List[AccountUserActiveBanResponseV4] # REQUIRED
country: str # REQUIRED
created_at: str # REQUIRED
date_of_birth: str # REQUIRED
deletion_status: bool # REQUIRED
display_name: str # REQUIRED
email_address: str # REQUIRED
email_verified: bool # REQUIRED
enabled: bool # REQUIRED
last_date_of_birth_changed_time: str # REQUIRED
last_enabled_changed_time: str # REQUIRED
namespace: str # REQUIRED
new_email_address: str # OPTIONAL
old_email_address: str # REQUIRED
permissions: List[AccountUserPermissionsResponseV4] # REQUIRED
phone_number: str # OPTIONAL
phone_verified: bool # REQUIRED
platform_id: str # OPTIONAL
platform_user_id: str # OPTIONAL
roles: List[str] # REQUIRED
user_id: str # REQUIRED
username: str # OPTIONAL
# endregion fields
# region with_x methods
def with_auth_type(self, value: str) -> AccountUserResponseV4:
self.auth_type = value
return self
def with_bans(self, value: List[AccountUserActiveBanResponseV4]) -> AccountUserResponseV4:
self.bans = value
return self
def with_country(self, value: str) -> AccountUserResponseV4:
self.country = value
return self
def with_created_at(self, value: str) -> AccountUserResponseV4:
self.created_at = value
return self
def with_date_of_birth(self, value: str) -> AccountUserResponseV4:
self.date_of_birth = value
return self
def with_deletion_status(self, value: bool) -> AccountUserResponseV4:
self.deletion_status = value
return self
def with_display_name(self, value: str) -> AccountUserResponseV4:
self.display_name = value
return self
def with_email_address(self, value: str) -> AccountUserResponseV4:
self.email_address = value
return self
def with_email_verified(self, value: bool) -> AccountUserResponseV4:
self.email_verified = value
return self
def with_enabled(self, value: bool) -> AccountUserResponseV4:
self.enabled = value
return self
def with_last_date_of_birth_changed_time(self, value: str) -> AccountUserResponseV4:
self.last_date_of_birth_changed_time = value
return self
def with_last_enabled_changed_time(self, value: str) -> AccountUserResponseV4:
self.last_enabled_changed_time = value
return self
def with_namespace(self, value: str) -> AccountUserResponseV4:
self.namespace = value
return self
def with_new_email_address(self, value: str) -> AccountUserResponseV4:
self.new_email_address = value
return self
def with_old_email_address(self, value: str) -> AccountUserResponseV4:
self.old_email_address = value
return self
def with_permissions(self, value: List[AccountUserPermissionsResponseV4]) -> AccountUserResponseV4:
self.permissions = value
return self
def with_phone_number(self, value: str) -> AccountUserResponseV4:
self.phone_number = value
return self
def with_phone_verified(self, value: bool) -> AccountUserResponseV4:
self.phone_verified = value
return self
def with_platform_id(self, value: str) -> AccountUserResponseV4:
self.platform_id = value
return self
def with_platform_user_id(self, value: str) -> AccountUserResponseV4:
self.platform_user_id = value
return self
def with_roles(self, value: List[str]) -> AccountUserResponseV4:
self.roles = value
return self
def with_user_id(self, value: str) -> AccountUserResponseV4:
self.user_id = value
return self
def with_username(self, value: str) -> AccountUserResponseV4:
self.username = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result = {}
if hasattr(self, "auth_type") and self.auth_type:
result["authType"] = str(self.auth_type)
elif include_empty:
result["authType"] = str()
if hasattr(self, "bans") and self.bans:
result["bans"] = [i0.to_dict(include_empty=include_empty) for i0 in self.bans]
elif include_empty:
result["bans"] = []
if hasattr(self, "country") and self.country:
result["country"] = str(self.country)
elif include_empty:
result["country"] = str()
if hasattr(self, "created_at") and self.created_at:
result["createdAt"] = str(self.created_at)
elif include_empty:
result["createdAt"] = str()
if hasattr(self, "date_of_birth") and self.date_of_birth:
result["dateOfBirth"] = str(self.date_of_birth)
elif include_empty:
result["dateOfBirth"] = str()
if hasattr(self, "deletion_status") and self.deletion_status:
result["deletionStatus"] = bool(self.deletion_status)
elif include_empty:
result["deletionStatus"] = bool()
if hasattr(self, "display_name") and self.display_name:
result["displayName"] = str(self.display_name)
elif include_empty:
result["displayName"] = str()
if hasattr(self, "email_address") and self.email_address:
result["emailAddress"] = str(self.email_address)
elif include_empty:
result["emailAddress"] = str()
if hasattr(self, "email_verified") and self.email_verified:
result["emailVerified"] = bool(self.email_verified)
elif include_empty:
result["emailVerified"] = bool()
if hasattr(self, "enabled") and self.enabled:
result["enabled"] = bool(self.enabled)
elif include_empty:
result["enabled"] = bool()
if hasattr(self, "last_date_of_birth_changed_time") and self.last_date_of_birth_changed_time:
result["lastDateOfBirthChangedTime"] = str(self.last_date_of_birth_changed_time)
elif include_empty:
result["lastDateOfBirthChangedTime"] = str()
if hasattr(self, "last_enabled_changed_time") and self.last_enabled_changed_time:
result["lastEnabledChangedTime"] = str(self.last_enabled_changed_time)
elif include_empty:
result["lastEnabledChangedTime"] = str()
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = str()
if hasattr(self, "new_email_address") and self.new_email_address:
result["newEmailAddress"] = str(self.new_email_address)
elif include_empty:
result["newEmailAddress"] = str()
if hasattr(self, "old_email_address") and self.old_email_address:
result["oldEmailAddress"] = str(self.old_email_address)
elif include_empty:
result["oldEmailAddress"] = str()
if hasattr(self, "permissions") and self.permissions:
result["permissions"] = [i0.to_dict(include_empty=include_empty) for i0 in self.permissions]
elif include_empty:
result["permissions"] = []
if hasattr(self, "phone_number") and self.phone_number:
result["phoneNumber"] = str(self.phone_number)
elif include_empty:
result["phoneNumber"] = str()
if hasattr(self, "phone_verified") and self.phone_verified:
result["phoneVerified"] = bool(self.phone_verified)
elif include_empty:
result["phoneVerified"] = bool()
if hasattr(self, "platform_id") and self.platform_id:
result["platformId"] = str(self.platform_id)
elif include_empty:
result["platformId"] = str()
if hasattr(self, "platform_user_id") and self.platform_user_id:
result["platformUserId"] = str(self.platform_user_id)
elif include_empty:
result["platformUserId"] = str()
if hasattr(self, "roles") and self.roles:
result["roles"] = [str(i0) for i0 in self.roles]
elif include_empty:
result["roles"] = []
if hasattr(self, "user_id") and self.user_id:
result["userId"] = str(self.user_id)
elif include_empty:
result["userId"] = str()
if hasattr(self, "username") and self.username:
result["username"] = str(self.username)
elif include_empty:
result["username"] = str()
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls,
auth_type: str,
bans: List[AccountUserActiveBanResponseV4],
country: str,
created_at: str,
date_of_birth: str,
deletion_status: bool,
display_name: str,
email_address: str,
email_verified: bool,
enabled: bool,
last_date_of_birth_changed_time: str,
last_enabled_changed_time: str,
namespace: str,
old_email_address: str,
permissions: List[AccountUserPermissionsResponseV4],
phone_verified: bool,
roles: List[str],
user_id: str,
new_email_address: Optional[str] = None,
phone_number: Optional[str] = None,
platform_id: Optional[str] = None,
platform_user_id: Optional[str] = None,
username: Optional[str] = None,
) -> AccountUserResponseV4:
instance = cls()
instance.auth_type = auth_type
instance.bans = bans
instance.country = country
instance.created_at = created_at
instance.date_of_birth = date_of_birth
instance.deletion_status = deletion_status
instance.display_name = display_name
instance.email_address = email_address
instance.email_verified = email_verified
instance.enabled = enabled
instance.last_date_of_birth_changed_time = last_date_of_birth_changed_time
instance.last_enabled_changed_time = last_enabled_changed_time
instance.namespace = namespace
instance.old_email_address = old_email_address
instance.permissions = permissions
instance.phone_verified = phone_verified
instance.roles = roles
instance.user_id = user_id
if new_email_address is not None:
instance.new_email_address = new_email_address
if phone_number is not None:
instance.phone_number = phone_number
if platform_id is not None:
instance.platform_id = platform_id
if platform_user_id is not None:
instance.platform_user_id = platform_user_id
if username is not None:
instance.username = username
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> AccountUserResponseV4:
instance = cls()
if not dict_:
return instance
if "authType" in dict_ and dict_["authType"] is not None:
instance.auth_type = str(dict_["authType"])
elif include_empty:
instance.auth_type = str()
if "bans" in dict_ and dict_["bans"] is not None:
instance.bans = [AccountUserActiveBanResponseV4.create_from_dict(i0, include_empty=include_empty) for i0 in dict_["bans"]]
elif include_empty:
instance.bans = []
if "country" in dict_ and dict_["country"] is not None:
instance.country = str(dict_["country"])
elif include_empty:
instance.country = str()
if "createdAt" in dict_ and dict_["createdAt"] is not None:
instance.created_at = str(dict_["createdAt"])
elif include_empty:
instance.created_at = str()
if "dateOfBirth" in dict_ and dict_["dateOfBirth"] is not None:
instance.date_of_birth = str(dict_["dateOfBirth"])
elif include_empty:
instance.date_of_birth = str()
if "deletionStatus" in dict_ and dict_["deletionStatus"] is not None:
instance.deletion_status = bool(dict_["deletionStatus"])
elif include_empty:
instance.deletion_status = bool()
if "displayName" in dict_ and dict_["displayName"] is not None:
instance.display_name = str(dict_["displayName"])
elif include_empty:
instance.display_name = str()
if "emailAddress" in dict_ and dict_["emailAddress"] is not None:
instance.email_address = str(dict_["emailAddress"])
elif include_empty:
instance.email_address = str()
if "emailVerified" in dict_ and dict_["emailVerified"] is not None:
instance.email_verified = bool(dict_["emailVerified"])
elif include_empty:
instance.email_verified = bool()
if "enabled" in dict_ and dict_["enabled"] is not None:
instance.enabled = bool(dict_["enabled"])
elif include_empty:
instance.enabled = bool()
if "lastDateOfBirthChangedTime" in dict_ and dict_["lastDateOfBirthChangedTime"] is not None:
instance.last_date_of_birth_changed_time = str(dict_["lastDateOfBirthChangedTime"])
elif include_empty:
instance.last_date_of_birth_changed_time = str()
if "lastEnabledChangedTime" in dict_ and dict_["lastEnabledChangedTime"] is not None:
instance.last_enabled_changed_time = str(dict_["lastEnabledChangedTime"])
elif include_empty:
instance.last_enabled_changed_time = str()
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = str()
if "newEmailAddress" in dict_ and dict_["newEmailAddress"] is not None:
instance.new_email_address = str(dict_["newEmailAddress"])
elif include_empty:
instance.new_email_address = str()
if "oldEmailAddress" in dict_ and dict_["oldEmailAddress"] is not None:
instance.old_email_address = str(dict_["oldEmailAddress"])
elif include_empty:
instance.old_email_address = str()
if "permissions" in dict_ and dict_["permissions"] is not None:
instance.permissions = [AccountUserPermissionsResponseV4.create_from_dict(i0, include_empty=include_empty) for i0 in dict_["permissions"]]
elif include_empty:
instance.permissions = []
if "phoneNumber" in dict_ and dict_["phoneNumber"] is not None:
instance.phone_number = str(dict_["phoneNumber"])
elif include_empty:
instance.phone_number = str()
if "phoneVerified" in dict_ and dict_["phoneVerified"] is not None:
instance.phone_verified = bool(dict_["phoneVerified"])
elif include_empty:
instance.phone_verified = bool()
if "platformId" in dict_ and dict_["platformId"] is not None:
instance.platform_id = str(dict_["platformId"])
elif include_empty:
instance.platform_id = str()
if "platformUserId" in dict_ and dict_["platformUserId"] is not None:
instance.platform_user_id = str(dict_["platformUserId"])
elif include_empty:
instance.platform_user_id = str()
if "roles" in dict_ and dict_["roles"] is not None:
instance.roles = [str(i0) for i0 in dict_["roles"]]
elif include_empty:
instance.roles = []
if "userId" in dict_ and dict_["userId"] is not None:
instance.user_id = str(dict_["userId"])
elif include_empty:
instance.user_id = str()
if "username" in dict_ and dict_["username"] is not None:
instance.username = str(dict_["username"])
elif include_empty:
instance.username = str()
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"authType": "auth_type",
"bans": "bans",
"country": "country",
"createdAt": "created_at",
"dateOfBirth": "date_of_birth",
"deletionStatus": "deletion_status",
"displayName": "display_name",
"emailAddress": "email_address",
"emailVerified": "email_verified",
"enabled": "enabled",
"lastDateOfBirthChangedTime": "last_date_of_birth_changed_time",
"lastEnabledChangedTime": "last_enabled_changed_time",
"namespace": "namespace",
"newEmailAddress": "new_email_address",
"oldEmailAddress": "old_email_address",
"permissions": "permissions",
"phoneNumber": "phone_number",
"phoneVerified": "phone_verified",
"platformId": "platform_id",
"platformUserId": "platform_user_id",
"roles": "roles",
"userId": "user_id",
"username": "username",
}
# endregion static methods
|
from django.contrib import admin
# Register your models here.
from django.contrib.auth.admin import UserAdmin
from .forms import CustomUserCreationForm, CustomUserChangeForm
from .models import CustomUser, Authority
admin.site.site_header = "Landgate Admin"
admin.site.site_title = "Survey Services Portal"
admin.site.index_title = "Welcome to Landgate Staff Range Calibration Portal"
@admin.register(Authority)
class AuthorityAdmin(admin.ModelAdmin):
list_display = ('authority_name', 'authority_abbrev',)
ordering = ('authority_abbrev',)
class CustomUserAdmin(UserAdmin):
add_form = CustomUserCreationForm
form = CustomUserChangeForm
model = CustomUser
list_display = ('email', 'first_name', 'last_name', 'authority', 'is_active', 'is_staff', 'is_superuser', 'date_joined', 'last_login')
list_filter = ('email', 'is_staff', 'is_active',)
fieldsets = (
(None, {'fields': ('email', 'password')}),
#('Personal info', {'fields': ('email', 'phone_number',)}),
('Permissions', {'fields': ('is_staff','groups',)}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': (
'email',
'first_name',
'last_name',
'authority',
'password1',
'password2',
'is_staff',
'is_active')}
),
)
search_fields = ('email',)
ordering = ('email',)
admin.site.register(CustomUser, CustomUserAdmin)
|
from dataclass_factory import Factory
from shazamio.factory import FactorySchemas
from shazamio.models import ArtistInfo
from shazamio.models import SongSection, VideoSection, RelatedSection
from shazamio.models import TrackInfo
from shazamio.models import YoutubeData
from shazamio.models import ResponseTrack
FACTORY_TRACK = Factory(schemas={
TrackInfo: FactorySchemas.FACTORY_TRACK_SCHEMA,
SongSection: FactorySchemas.FACTORY_SONG_SECTION_SCHEMA,
VideoSection: FactorySchemas.FACTORY_VIDEO_SECTION_SCHEMA,
RelatedSection: FactorySchemas.FACTORY_RELATED_SECTION_SCHEMA,
YoutubeData: FactorySchemas.FACTORY_YOUTUBE_TRACK_SCHEMA,
ResponseTrack: FactorySchemas.FACTORY_RESPONSE_TRACK_SCHEMA,
}, debug_path=True)
FACTORY_ARTIST = Factory(
schemas={ArtistInfo: FactorySchemas.FACTORY_ARTIST_SCHEMA},
debug_path=True
)
|
from .util import *
from .pyfzf import Pyfzf
from .spinner import Spinner
from .session import BaseSession
from .fileloader import FileLoader
|
# Copyright (c) 2012-2019 Seafile Ltd.
# encoding: utf-8
from datetime import datetime
import logging
import re
import requests
import json
from django.core.management.base import BaseCommand
from django.urls import reverse
from django.utils import translation
from django.utils.translation import ungettext
from seahub.base.models import CommandsLastCheck
from seahub.notifications.models import UserNotification
from seahub.utils import get_site_scheme_and_netloc, get_site_name
from seahub.auth.models import SocialAuthUser
from seahub.dingtalk.utils import dingtalk_get_access_token, dingtalk_get_userid_by_unionid
from seahub.dingtalk.settings import DINGTALK_MESSAGE_SEND_TO_CONVERSATION_URL, \
DINGTALK_AGENT_ID
# Get an instance of a logger
logger = logging.getLogger(__name__)
# https://ding-doc.dingtalk.com/doc#/serverapi3/wvdxel
########## Utility Functions ##########
def remove_html_a_element(s):
"""
Replace <a ..>xx</a> to xx and wrap content with <div></div>.
"""
patt = '<a.*?>(.+?)</a>'
def repl(matchobj):
return matchobj.group(1)
return re.sub(patt, repl, s)
class CommandLogMixin(object):
def println(self, msg):
self.stdout.write('[%s] %s\n' % (str(datetime.now()), msg))
def log_error(self, msg):
logger.error(msg)
self.println(msg)
def log_info(self, msg):
logger.info(msg)
self.println(msg)
def log_debug(self, msg):
logger.debug(msg)
self.println(msg)
#######################################
class Command(BaseCommand, CommandLogMixin):
""" send dingtalk notifications
"""
help = 'Send dingtalk msg to user if he/she has unseen notices every '
'period of time.'
label = "notifications_send_dingtalk_notices"
def handle(self, *args, **options):
self.log_debug('Start sending dingtalk msg...')
self.do_action()
self.log_debug('Finish sending dingtalk msg.\n')
def send_dingtalk_msg(self, user_id, title, content):
self.log_info('Send dingtalk msg to user: %s, msg: %s' % (user_id, content))
data = {
"agent_id": DINGTALK_AGENT_ID,
"userid_list": user_id,
"msg": {
"msgtype": "markdown",
"markdown": {
"title": title,
"text": content
}
}
}
resp_json = requests.post(self.dingtalk_message_send_to_conversation_url,
data=json.dumps(data)).json()
if resp_json.get('errcode') != 0:
self.log_info(resp_json)
def do_action(self):
# check before start
access_token = dingtalk_get_access_token()
if not access_token:
self.log_error('can not get access_token')
self.dingtalk_message_send_to_conversation_url = DINGTALK_MESSAGE_SEND_TO_CONVERSATION_URL + '?access_token=' + access_token
self.detail_url = get_site_scheme_and_netloc().rstrip('/') + reverse('user_notification_list')
site_name = get_site_name()
# start
now = datetime.now()
today = datetime.now().replace(hour=0).replace(minute=0).replace(
second=0).replace(microsecond=0)
# 1. get all users who are connected dingtalk
socials = SocialAuthUser.objects.filter(provider='dingtalk')
users = [(x.username, x.uid) for x in socials]
self.log_info('Found %d users' % len(users))
if not users:
return
user_uid_map = {}
for username, uid in users:
user_uid_map[username] = dingtalk_get_userid_by_unionid(uid)
# 2. get previous time that command last runs
try:
cmd_last_check = CommandsLastCheck.objects.get(command_type=self.label)
self.log_debug('Last check time is %s' % cmd_last_check.last_check)
last_check_dt = cmd_last_check.last_check
cmd_last_check.last_check = now
cmd_last_check.save()
except CommandsLastCheck.DoesNotExist:
last_check_dt = today
self.log_debug('Create new last check time: %s' % now)
CommandsLastCheck(command_type=self.label, last_check=now).save()
# 3. get all unseen notices for those users
qs = UserNotification.objects.filter(
timestamp__gt=last_check_dt
).filter(seen=False).filter(
to_user__in=list(user_uid_map.keys())
)
self.log_info('Found %d notices' % qs.count())
if qs.count() == 0:
return
user_notices = {}
for q in qs:
if q.to_user not in user_notices:
user_notices[q.to_user] = [q]
else:
user_notices[q.to_user].append(q)
# save current language
cur_language = translation.get_language()
# active zh-cn
translation.activate('zh-cn')
self.log_info('the language is set to zh-cn')
# 4. send msg to users
for username, uid in users:
user_id = user_uid_map[username]
notices = user_notices.get(username, [])
count = len(notices)
if count == 0:
continue
title = ungettext(
"\n"
"You've got 1 new notice on %(site_name)s:\n",
"\n"
"You've got %(num)s new notices on %(site_name)s:\n",
count
) % {'num': count, 'site_name': site_name, }
content = ' \n '.join([remove_html_a_element(x.format_msg()) for x in notices])
self.send_dingtalk_msg(user_id, title, content)
# reset language
translation.activate(cur_language)
self.log_info('reset language success')
|
"""
Copyright 2021 Janrey "CodexLink" Licas
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from asyncio import (
AbstractEventLoop,
Task,
all_tasks,
create_task,
current_task,
gather,
get_event_loop,
sleep,
wait,
)
from typing import Any, Generator, Set
from api import AsyncGithubAPILite
from badge import BadgeConstructor
from client import DiscordClientHandler
from elements.constants import ENV_FILENAME, GithubRunnerActions
from utils import UtilityMethods
class DiscordActivityBadge(
UtilityMethods, AsyncGithubAPILite, DiscordClientHandler, BadgeConstructor
):
# The heart of the Discord Activity Badge. Everything runs in Object-Oriented Approach. Please check each methods.
def __await__(self) -> Generator:
"""
This special method let the superclass run by calling another user-specified special method which is `__start__`.
Which basically calls another special method `__await__` to invoke the class in the loop that AsyncIO wants.
"""
return self.__start__().__await__()
async def __start__(self) -> None:
"""
Executes all subclasses's methods that are both async and non-async for the preparation of whole process.
"""
super().resolve_args() # * First, we resolve the arguments given by the client before we attempt to do anything.
super().init_logger( # * Once the argument has been evaluated, we have to load the Logger to log everything.
level_coverage=getattr(self.args, "logger_level"),
root_level=getattr(self.args, "verbosity"),
log_to_file=getattr(self.args, "generate_log_file"),
out_to_console=getattr(self.args, "no_console_log"),
)
# And then, evaluate the local environment by checking for `.env` before resolve those Environment Variables.
if getattr(self.args, "running_on_local"):
super().check_dotenv()
else:
self.logger.info(
f"Running local mode invocation not detected. Skipping checks for '{ENV_FILENAME}'."
)
# Once the extra step is done or skipped, evaluate the envs for other modules to use.
super().resolve_envs()
# Since every pre-requisite methods were done loading, we have to instantiate other subclasses to load other assets.
self._cascade_init_cls: Task = create_task( # (5)
super().__ainit__(),
name="Classes_AsyncGithubAPI_Child_Initialization",
)
# # The two tasks (`discord_client_task` and `readme_data`) is supposed to be a pair or in `gather()` but they are seperated
# # because of static cooperative await. Meaning some `await wait()` is waiting at certain codeblocks.
self.discord_client_task: Task = create_task(
self.start(self.envs["DISCORD_BOT_TOKEN"]),
name="DiscordClient_UserFetching",
) # * Load the Discord Client so that it can take some time while we load other stuff.
self.readme_data: Task = create_task(
self.exec_api_actions(GithubRunnerActions.FETCH_README),
name="GithubAPI_README_Fetching",
) # * Fetch README (expects Base64String from result())
self.badge_task: Task = create_task(
self.construct_badge(), name="BadgeConstructor_Construct"
) # * Runs badge construction and wait for Task `discord_client_task` to finish before continuing.
# Implicitly declare this wait instead of inside of the method. There's nothing much to do (in `badge_updater`) while we wait to fetch README data.
await wait({self.readme_data})
badge_updater: Task = create_task(
self.check_and_update_badge(self.readme_data.result()[1]),
name="README_BadgeChecker_Updater",
) # ! Once we got the README, check it and wait for Task `badge_task` to finish before checking if changes is required to commit.
# Voluntarily invoke this `wait` outside of method `exec_api_action` to avoid confusion due to abstraction.
await wait({badge_updater})
if not getattr(self.args, "do_not_commit") and not self.envs["IS_DRY_RUN"]:
create_task(
self.exec_api_actions(
GithubRunnerActions.COMMIT_CHANGES,
data=[self.readme_data.result()[0], badge_updater.result()],
)
)
else:
self.logger.warning(
"Argument -dnc / --do-not-commit was invoked, will skip updating README."
)
await self.__end__() # Once every task/s is done spawning in the loop, await `__end__` to show the status of the tasks whenever there's a blank space in runtime.
async def __end__(self) -> None:
"""
A method that handles the script whenever it waits for any other tasks to finish.
"""
self.logger.info("Done loading modules and necessary elements for the process.")
prev_tasks: Set[Any] = set({})
while True: # We do infinite loop while we wait for other tasks to finish.
if (
len(all_tasks()) <= 1
): # If thre are no other tasks aside from the loop (it's considered a task) then end the loop by closing other sessions.
self.logger.info(
"All tasks successfully finished! Closing Client Sessions..."
)
await gather(
self.close(), self._api_session.close()
) # Discord API and aiohttp.ClientSession.
self.logger.info(
"Connection Sessions were successfully closed. (Discord Client and Github API)"
)
break
# * Otherwise, we display a string (non-redundantly) that counts the number of remaining tasks in the loop.
tasks: Set[Task] = all_tasks()
n_tasks: int = len(tasks)
try:
if (
prev_tasks != tasks
): # * We also check by the context of all_tasks() if they were in the same length as before but different context, inevitably display it redundantly.
self.logger.info(
f"{n_tasks} task/s left to finish the runtime process."
)
prev_tasks = tasks
self.logger.debug(
f"Current Task: {current_task()} | Other Task/s in Queue | {all_tasks()}"
)
except TypeError:
prev_tasks = tasks
await sleep(
0
) # We don't need await for certain n of time, we just let other tasks do their own job without disruption from this sleep.
# # Entrypoint Code
loop_instance: AbstractEventLoop = get_event_loop()
entry_instance: AbstractEventLoop = loop_instance.run_until_complete(
DiscordActivityBadge()
)
|
# from helpers.py.tree_node import Node
# Definition for a Node.
class Node:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
# from helpers.py.node import Node
class Solution:
def treeToDoublyList(self, root: 'Node') -> 'Node':
if not root:
return root
nodeList = []
def in_order(n: 'Node'):
if not n:
return None
in_order(n.left)
nodeList.append(n)
in_order(n.right)
in_order(root)
head = nodeList[0]
for i, n in enumerate(nodeList):
n.left = nodeList[i-1] if i>0 else nodeList[-1]
n.right = nodeList[i+1] if i < len(nodeList)-1 else head
return head
if __name__ == '__main__':
test_cases = [Node(4, Node(2,Node(1),Node(3)),Node(5,None))]
for case in test_cases:
ans = Solution().treeToDoublyList(case)
print(ans)
|
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
class Metrics:
"""
Methods for computing useful regression metrics
sse: Sum of squared errors
sst: Total sum of squared errors (actual vs avg(actual))
r_squared: Regression coefficient (R^2)
adj_r_squared: Adjusted R^2
mse: Mean sum of squared errors
AIC: Akaike information criterion
BIC: Bayesian information criterion
"""
def sse(self):
"""Returns sum of squared errors (model vs. actual)"""
if not self.is_fitted:
print("Model not fitted yet!")
return None
squared_errors = (self.resid_) ** 2
self.sq_error_ = np.sum(squared_errors)
return self.sq_error_
def sst(self):
"""Returns total sum of squared errors (actual vs avg(actual))"""
if not self.is_fitted:
print("Model not fitted yet!")
return None
avg_y = np.mean(self.target_)
squared_errors = (self.target_ - avg_y) ** 2
self.sst_ = np.sum(squared_errors)
return self.sst_
def r_squared(self):
"""Returns calculated value of r^2"""
if not self.is_fitted:
print("Model not fitted yet!")
return None
self.r_sq_ = 1 - self.sse() / self.sst()
return self.r_sq_
def adj_r_squared(self):
"""Returns calculated value of adjusted r^2"""
if not self.is_fitted:
print("Model not fitted yet!")
return None
self.adj_r_sq_ = 1 - (self.sse() / self.dfe_) / (self.sst() / self.dft_)
return self.adj_r_sq_
def mse(self):
"""Returns calculated value of mse"""
if not self.is_fitted:
print("Model not fitted yet!")
return None
self.mse_ = np.mean((self.predict(self.features_) - self.target_) ** 2)
return self.mse_
def aic(self):
"""
Returns AIC (Akaike information criterion)
"""
if not self.is_fitted:
print("Model not fitted yet!")
return None
lm = sm.OLS(self.target_, sm.add_constant(self.features_)).fit()
return lm.aic
def bic(self):
"""
Returns BIC (Bayesian information criterion)
"""
if not self.is_fitted:
print("Model not fitted yet!")
return None
lm = sm.OLS(self.target_, sm.add_constant(self.features_)).fit()
return lm.bic
def print_metrics(self):
"""Prints a report of the useful metrics for a given model object"""
if not self.is_fitted:
print("Model not fitted yet!")
return None
items = (
("sse:", self.sse()),
("sst:", self.sst()),
("mse:", self.mse()),
("r^2:", self.r_squared()),
("adj_r^2:", self.adj_r_squared()),
("AIC:", self.aic()),
("BIC:", self.bic()),
)
for item in items:
print("{0:8} {1:.4f}".format(item[0], item[1]))
def summary_metrics(self):
"""Returns a dictionary of the useful metrics"""
if not self.is_fitted:
print("Model not fitted yet!")
return None
metrics = {}
items = (
("sse", self.sse()),
("sst", self.sst()),
("mse", self.mse()),
("r^2", self.r_squared()),
("adj_r^2:", self.adj_r_squared()),
("AIC:", self.aic()),
("BIC:", self.bic()),
)
for item in items:
metrics[item[0]] = item[1]
return metrics
class Inference:
"""
Inferential statistics:
standard error,
p-values
t-test statistics
F-statistics and p-value of F-test
"""
def __init__():
pass
def std_err(self):
"""
Returns standard error values of the features
"""
if not self.is_fitted:
print("Model not fitted yet!")
return None
lm = sm.OLS(self.target_, sm.add_constant(self.features_)).fit()
return lm.bse
def pvalues(self):
"""
Returns p-values of the features
"""
if not self.is_fitted:
print("Model not fitted yet!")
return None
lm = sm.OLS(self.target_, sm.add_constant(self.features_)).fit()
return lm.pvalues
def tvalues(self):
"""
Returns t-test values of the features
"""
if not self.is_fitted:
print("Model not fitted yet!")
return None
lm = sm.OLS(self.target_, sm.add_constant(self.features_)).fit()
return lm.tvalues
def ftest(self):
"""
Returns the F-statistic of the overall regression and corresponding p-value
"""
if not self.is_fitted:
print("Model not fitted yet!")
return None
lm = sm.OLS(self.target_, sm.add_constant(self.features_)).fit()
return (lm.fvalue, lm.f_pvalue)
class Diagnostics_plots:
"""
Diagnostics plots and methods
Arguments:
fitted_vs_residual: Plots fitted values vs. residuals
fitted_vs_features: Plots residuals vs all feature variables in a grid
histogram_resid: Plots a histogram of the residuals (can be normalized)
shapiro_test: Performs Shapiro-Wilk normality test on the residuals
qqplot_resid: Creates a quantile-quantile plot for residuals comparing with a normal distribution
"""
def __init__():
pass
def fitted_vs_residual(self):
"""Plots fitted values vs. residuals"""
if not self.is_fitted:
print("Model not fitted yet!")
return None
plt.title("Fitted vs. residuals plot", fontsize=14)
plt.scatter(self.fitted_, self.resid_, edgecolor="k")
plt.hlines(
y=0,
xmin=np.amin(self.fitted_),
xmax=np.amax(self.fitted_),
color="k",
linestyle="dashed",
)
plt.xlabel("Fitted values")
plt.ylabel("Residuals")
plt.show()
def fitted_vs_features(self):
"""Plots residuals vs all feature variables in a grid"""
if not self.is_fitted:
print("Model not fitted yet!")
return None
num_plots = self.features_.shape[1]
if num_plots % 3 == 0:
nrows = int(num_plots / 3)
else:
nrows = int(num_plots / 3) + 1
ncols = 3
fig, ax = plt.subplots(nrows, ncols, figsize=(15, nrows * 3.5))
axes = ax.ravel()
for i in range(num_plots, nrows * ncols):
axes[i].set_visible(False)
for i in range(num_plots):
axes[i].scatter(
self.features_.T[i],
self.resid_,
color="orange",
edgecolor="k",
alpha=0.8,
)
axes[i].grid(True)
axes[i].set_xlabel("Feature X[{}]".format(i))
axes[i].set_ylabel("Residuals")
axes[i].hlines(
y=0,
xmin=np.amin(self.features_.T[i]),
xmax=np.amax(self.features_.T[i]),
color="k",
linestyle="dashed",
)
plt.show()
def histogram_resid(self, normalized=True):
"""Plots a histogram of the residuals (can be normalized)"""
if not self.is_fitted:
print("Model not fitted yet!")
return None
if normalized:
norm_r = self.resid_ / np.linalg.norm(self.resid_)
else:
norm_r = self.resid_
num_bins = min(20, int(np.sqrt(self.features_.shape[0])))
plt.title("Histogram of the normalized residuals")
plt.hist(norm_r, bins=num_bins, edgecolor="k")
plt.xlabel("Normalized residuals")
plt.ylabel("Count")
plt.show()
def shapiro_test(self, normalized=True):
"""Performs Shapiro-Wilk normality test on the residuals"""
if not self.is_fitted:
print("Model not fitted yet!")
return None
from scipy.stats import shapiro
if normalized:
norm_r = self.resid_ / np.linalg.norm(self.resid_)
else:
norm_r = self.resid_
_, p = shapiro(norm_r)
if p > 0.01:
print("The residuals seem to have come from a Gaussian process")
else:
print(
"The residuals does not seem to have come from a Gaussian process.\nNormality assumptions of the linear regression may have been violated."
)
def qqplot_resid(self, normalized=True):
"""Creates a quantile-quantile plot for residuals comparing with a normal distribution"""
if not self.is_fitted:
print("Model not fitted yet!")
return None
from scipy.stats import probplot
if normalized:
norm_r = self.resid_ / np.linalg.norm(self.resid_)
else:
norm_r = self.resid_
plt.title("Q-Q plot of the normalized residuals")
probplot(norm_r, dist="norm", plot=plt)
plt.xlabel("Theoretical quantiles")
plt.ylabel("Residual quantiles")
plt.show()
class Data_plots:
"""
Methods for data related plots
pairplot: Creates pairplot of all variables and the target
plot_fitted: Plots fitted values against the true output values from the data
"""
def __init__():
pass
def pairplot(self):
"""Creates pairplot of all variables and the target using the Seaborn library"""
if not self.is_fitted:
print("Model not fitted yet!")
return None
print("This may take a little time. Have patience...")
from seaborn import pairplot
from pandas import DataFrame
df = DataFrame(np.hstack((self.features_, self.target_.reshape(-1, 1))))
pairplot(df)
plt.show()
def plot_fitted(self, reference_line=False):
"""
Plots fitted values against the true output values from the data
Arguments:
reference_line: A Boolean switch to draw a 45-degree reference line on the plot
"""
if not self.is_fitted:
print("Model not fitted yet!")
return None
plt.title("True vs. fitted values", fontsize=14)
plt.scatter(y, self.fitted_, s=100, alpha=0.75, color="red", edgecolor="k")
if reference_line:
plt.plot(y, y, c="k", linestyle="dotted")
plt.xlabel("True values")
plt.ylabel("Fitted values")
plt.grid(True)
plt.show()
class Outliers:
"""
Methods for plotting outliers, leverage, influence points
cook_distance: Computes and plots Cook's distance
influence_plot: Creates the influence plot
leverage_resid_plot: Plots leverage vs normalized residuals' square
"""
def __init__():
pass
def cook_distance(self):
"""Computes and plots Cook\'s distance"""
if not self.is_fitted:
print("Model not fitted yet!")
return None
import statsmodels.api as sm
from statsmodels.stats.outliers_influence import OLSInfluence as influence
lm = sm.OLS(self.target_, sm.add_constant(self.features_)).fit()
inf = influence(lm)
(c, p) = inf.cooks_distance
plt.figure(figsize=(8, 5))
plt.title("Cook's distance plot for the residuals", fontsize=14)
plt.stem(np.arange(len(c)), c, markerfmt=",", use_line_collection=True)
plt.grid(True)
plt.show()
def influence_plot(self):
"""Creates the influence plot"""
if not self.is_fitted:
print("Model not fitted yet!")
return None
import statsmodels.api as sm
lm = sm.OLS(self.target_, sm.add_constant(self.features_)).fit()
fig, ax = plt.subplots(figsize=(10, 8))
fig = sm.graphics.influence_plot(lm, ax=ax, criterion="cooks")
plt.show()
def leverage_resid_plot(self):
"""Plots leverage vs normalized residuals' square"""
if not self.is_fitted:
print("Model not fitted yet!")
return None
import statsmodels.api as sm
lm = sm.OLS(self.target_, sm.add_constant(self.features_)).fit()
fig, ax = plt.subplots(figsize=(10, 8))
fig = sm.graphics.plot_leverage_resid2(lm, ax=ax)
plt.show()
class Multicollinearity:
"""
Methods for checking multicollinearity in the dataset features
vif:Computes variance influence factors for each feature variable
"""
def __init__():
pass
def vif(self):
"""Computes variance influence factors for each feature variable"""
if not self.is_fitted:
print("Model not fitted yet!")
return None
import statsmodels.api as sm
from statsmodels.stats.outliers_influence import (
variance_inflation_factor as vif,
)
lm = sm.OLS(self.target_, sm.add_constant(self.features_)).fit()
for i in range(self.features_.shape[1]):
v = vif(np.matrix(self.features_), i)
print("Variance inflation factor for feature {}: {}".format(i, round(v, 2)))
class MyLinearRegression(
Metrics, Diagnostics_plots, Data_plots, Outliers, Multicollinearity, Inference
):
def __init__(self, fit_intercept=True):
self.coef_ = None
self.intercept_ = None
self.fit_intercept_ = fit_intercept
self.is_fitted = False
self.features_ = None
self.target_ = None
def __repr__(self):
return "I am a Linear Regression model!"
def ingest_data(self, X, y):
"""
Ingests the given data
Arguments:
X: 1D or 2D numpy array
y: 1D numpy array
"""
# check if X is 1D or 2D array
if len(X.shape) == 1:
X = X.reshape(-1, 1)
# features and data
self.features_ = X
self.target_ = y
def fit(self, X=None, y=None, fit_intercept_=True):
"""
Fit model coefficients.
Arguments:
X: 1D or 2D numpy array
y: 1D numpy array
"""
if X != None:
if len(X.shape) == 1:
X = X.reshape(-1, 1)
self.features_ = X
if y != None:
self.target_ = y
# degrees of freedom of population dependent variable variance
self.dft_ = self.features_.shape[0] - 1
# degrees of freedom of population error variance
self.dfe_ = self.features_.shape[0] - self.features_.shape[1] - 1
# add bias if fit_intercept is True
if self.fit_intercept_:
X_biased = np.c_[np.ones(self.features_.shape[0]), self.features_]
else:
X_biased = self.features_
# Assign target_ to a local variable y
y = self.target_
# closed form solution
xTx = np.dot(X_biased.T, X_biased)
inverse_xTx = np.linalg.inv(xTx)
xTy = np.dot(X_biased.T, y)
coef = np.dot(inverse_xTx, xTy)
# set attributes
if self.fit_intercept_:
self.intercept_ = coef[0]
self.coef_ = coef[1:]
else:
self.intercept_ = 0
self.coef_ = coef
# Predicted/fitted y
self.fitted_ = np.dot(self.features_, self.coef_) + self.intercept_
# Residuals
residuals = self.target_ - self.fitted_
self.resid_ = residuals
# Set is_fitted to True
self.is_fitted = True
def fit(self, X=None, y=None, fit_intercept_=True):
"""
Fits model coefficients.
Arguments:
X: 1D or 2D numpy array
y: 1D numpy array
fit_intercept: Boolean, whether an intercept term will be included in the fit
"""
if X != None:
if len(X.shape) == 1:
X = X.reshape(-1, 1)
self.features_ = X
if y != None:
self.target_ = y
# degrees of freedom of population dependent variable variance
self.dft_ = self.features_.shape[0] - 1
# degrees of freedom of population error variance
self.dfe_ = self.features_.shape[0] - self.features_.shape[1] - 1
# add bias if fit_intercept is True
if self.fit_intercept_:
X_biased = np.c_[np.ones(self.features_.shape[0]), self.features_]
else:
X_biased = self.features_
# Assign target_ to a local variable y
y = self.target_
# closed form solution
xTx = np.dot(X_biased.T, X_biased)
inverse_xTx = np.linalg.inv(xTx)
xTy = np.dot(X_biased.T, y)
coef = np.dot(inverse_xTx, xTy)
# set attributes
if self.fit_intercept_:
self.intercept_ = coef[0]
self.coef_ = coef[1:]
else:
self.intercept_ = 0
self.coef_ = coef
# Predicted/fitted y
self.fitted_ = np.dot(self.features_, self.coef_) + self.intercept_
# Residuals
residuals = self.target_ - self.fitted_
self.resid_ = residuals
# Set is_fitted to True
self.is_fitted = True
def fit_dataframe(self, X, y, dataframe, fit_intercept_=True):
"""
Fit model coefficients from a Pandas DataFrame.
Arguments:
X: A list of columns of the dataframe acting as features. Must be only numerical.
y: Name of the column of the dataframe acting as the target
fit_intercept: Boolean, whether an intercept term will be included in the fit
"""
assert (
type(X) == list
), "X must be a list of the names of the numerical feature/predictor columns"
assert (
type(y) == str
), "y must be a string - name of the column you want as target"
self.features_ = np.array(dataframe[X])
self.target_ = np.array(dataframe[y])
# degrees of freedom of population dependent variable variance
self.dft_ = self.features_.shape[0] - 1
# degrees of freedom of population error variance
self.dfe_ = self.features_.shape[0] - self.features_.shape[1] - 1
# add bias if fit_intercept is True
if self.fit_intercept_:
X_biased = np.c_[np.ones(self.features_.shape[0]), self.features_]
else:
X_biased = self.features_
# Assign target_ to a local variable y
y = self.target_
# closed form solution
xTx = np.dot(X_biased.T, X_biased)
inverse_xTx = np.linalg.inv(xTx)
xTy = np.dot(X_biased.T, y)
coef = np.dot(inverse_xTx, xTy)
# set attributes
if self.fit_intercept_:
self.intercept_ = coef[0]
self.coef_ = coef[1:]
else:
self.intercept_ = 0
self.coef_ = coef
# Predicted/fitted y
self.fitted_ = np.dot(self.features_, self.coef_) + self.intercept_
# Residuals
residuals = self.target_ - self.fitted_
self.resid_ = residuals
# Set is_fitted to True
self.is_fitted = True
def predict(self, X):
"""Output model prediction.
Arguments:
X: 1D or 2D numpy array
"""
# check if X is 1D or 2D array
if len(X.shape) == 1:
X = X.reshape(-1, 1)
self.predicted_ = self.intercept_ + np.dot(X, self.coef_)
return self.predicted_
def run_diagnostics(self):
"""Runs diagnostics tests and plots"""
Diagnostics_plots.fitted_vs_residual(self)
Diagnostics_plots.histogram_resid(self)
Diagnostics_plots.qqplot_resid(self)
print()
Diagnostics_plots.shapiro_test(self)
def outlier_plots(self):
"""Creates various outlier plots"""
Outliers.cook_distance(self)
Outliers.influence_plot(self)
Outliers.leverage_resid_plot(self)
|
class NaZakupy:
def __init__(self,nazwa_produktu,ilosc,jednostka_miary,cena_jed):
self.nazwa_produktu=str(nazwa_produktu)
self.ilosc=int(ilosc)
self.jednostka_miary=str(jednostka_miary)
self.cena_jed=float(cena_jed)
def wyswietl_produkt(self):
print('nazwa : ' +str(self.nazwa_produktu))
print('ilosc : ' +str(self.ilosc))
print('jednostka : ' +str(self.jednostka_miary))
print('cena za ' +str(self.jednostka_miary), ': ' +str(self.cena_jed))
def ile_produktu(self):
return str(self.ilosc)+' '+str(self.jednostka_miary)
def ile_kosztuje(self):
return self.ilosc * self.cena_jed
obiekt = NaZakupy('ziemniaki',3,'kg',2.50)
obiekt.wyswietl_produkt()
print('ile produktu: '+obiekt.ile_produktu())
print('ile w sumie kosztuje produkt: '+str(obiekt.ile_kosztuje())+' zł') |
"""
Dialogflow python requests package
"""
import sys
if 'install' not in sys.argv and 'egg_info' not in sys.argv:
from .dialogflow import Dialogflow
__version__ = '0.0.7'
__author__ = 'Mallikarjunarao Kosuri'
__email__ = 'malli.kv2@gmail.com'
|
import time
import tempfile
import json
import os
import urllib
import numpy as np
import pandas as pd
from selenium import webdriver
from selenium.common.exceptions import (NoSuchElementException,
ElementClickInterceptedException,
StaleElementReferenceException)
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from .flyers import get_flyers
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
class Timeout(Exception):
pass
class NoSearchResults(Exception):
pass
def setup_and_teardown_driver(func):
def wrapper(*args, **kwargs):
self = args[0]
if self._driver:
close_driver = False
else:
self.init_driver()
close_driver = True
ret = func(*args, **kwargs)
if close_driver:
self.close_driver()
return ret
return wrapper
class GroceryHelpersAPI:
def __init__(self, user=None, password=None, user_data_dir=None,
data_directory=os.path.join('.', 'data'),
base_url='https://www.realcanadiansuperstore.ca',
store_name='Real Canadian Superstore'):
self._user = user
self._password = password
self._driver = None
self._invoice_list = None
self._temp_download_dir = tempfile.mkdtemp()
self._user_data_dir = user_data_dir
self._data_directory = os.path.join(data_directory,
store_name)
self._store_name = store_name
self._base_url = base_url
def __del__(self):
self.close_driver()
def init_driver(self, headless=False):
options = webdriver.ChromeOptions()
options.add_argument('window-size=1200x600')
if self._user_data_dir:
options.add_argument('user-data-dir=%s' % self._user_data_dir)
if headless:
options.add_argument('headless')
self._driver = webdriver.Chrome(options=options)
self._driver.maximize_window()
def close_driver(self):
if self._driver:
self._driver.close()
self._driver = None
@setup_and_teardown_driver
def search(self, term, timeout=10, follow_first_link=False):
self._driver.get('%s/search?search-bar=%s' % (self._base_url, term))
start_time = time.time()
items = []
while len(items) == 0 and time.time() - start_time < 10:
items = self._driver.find_elements_by_class_name(
'product-tile-group__list__item')
product_data = [json.loads(item.find_element_by_class_name(
'product-tracking').get_attribute('data-track-products-array'))
for item in items]
links = self._driver.find_elements_by_class_name(
'product-tile__details__info__name__link')
if len(self._driver.find_elements_by_class_name(
'search-no-results__section-title')):
raise NoSearchResults
df = pd.DataFrame()
for field in ['productSKU', 'productName', 'productBrand',
'productCatalog', 'productVendor', 'productPrice',
'productQuantity', 'dealBadge', 'loyaltyBadge',
'textBadge', 'productPosition', 'productOrderId',
'productVariant']:
df[field] = [data[0][field] for data in product_data]
df['previouslyPurchased'] = ['Previously Purchased' in
item.find_element_by_class_name(
'product-tile__eyebrow'
).text for item in items]
df['link'] = [link.get_attribute('href') for link in links]
df['categories'] = [urllib.request.unquote(link).replace('-', ' '). \
split('/')[4:-2] for link in df['link']]
unit_price_list = []
for item in items:
unit_price = []
for ul in item.find_elements_by_tag_name('ul'):
for li in ul.find_elements_by_tag_name('li'):
data = [span.text for span in
li.find_elements_by_tag_name('span')]
if data[-1] == 'ea' and len(data) == 5 and data[2] == '(est.)' and data[3] == '(est.)':
price = data[1]
quantity = data[4]
elif len(data) == 3:
price = data[1]
quantity = data[2]
else:
continue
unit_price.append((price, quantity))
unit_price_list.append(unit_price)
df['unitPrice'] = unit_price_list
if follow_first_link and len(items):
items[0].find_element_by_tag_name('a').click()
return df
@setup_and_teardown_driver
def get_product_info(self, link=None, timeout=10):
if link and self._driver.current_url != link:
self._driver.get(link)
elif link is None:
link = self._driver.current_url
start_time = time.time()
sku = link.split('/')[-1]
while time.time() - start_time < timeout:
try:
div = [x for x in self._driver.find_elements_by_class_name('product-tracking')
if x.get_attribute('data-track-product-id') == sku][0]
except NoSuchElementException:
pass
product_data = json.loads(div.get_attribute('data-track-products-array'))[0]
package_size = None
try:
package_size = div.find_element_by_class_name('product-name__item--package-size').text
except:
pass
average_weight = None
try:
average_weight = div.find_element_by_class_name('product-avarage-weight--product-details-page').text
except:
pass
items = div.find_elements_by_class_name('comparison-price-list__item')
unit_price = [item.text for item in items]
product_data['link'] = link
product_data['categories'] = urllib.request.unquote(link).replace('-', ' ').split('/')[4:-2]
product_data['packageSize'] = package_size
product_data['averageWeight'] = average_weight
product_data['unitPrice'] = unit_price
return product_data
@setup_and_teardown_driver
def add_product_to_current_order(self, link, quantity=1, timeout=10):
self._driver.get(link)
self._driver.execute_script("window.scrollTo(0, 0);")
start_time = time.time()
while time.time() - start_time < timeout:
try:
product_details = self._driver.find_element_by_class_name('product-details-page-details')
except NoSuchElementException:
pass
# If we've already added this item to the order, clear it
try:
input_box = self._driver.find_element_by_class_name('quantity-selector__quantity__input')
input_box.click()
ActionChains(self._driver).key_down(Keys.LEFT_CONTROL).send_keys('a').key_up(Keys.LEFT_CONTROL).perform()
input_box.send_keys(0)
input_box.send_keys(Keys.ENTER)
except NoSuchElementException:
pass
self._driver.find_element_by_css_selector(
"button[data-track='productAddToCartButton']").click()
time.sleep(1)
input_box = self._driver.find_element_by_class_name('quantity-selector__quantity__input')
input_box.click()
ActionChains(self._driver).key_down(Keys.LEFT_CONTROL).send_keys('a').key_up(Keys.LEFT_CONTROL).perform()
input_box.send_keys(quantity)
input_box.send_keys(Keys.ENTER)
@setup_and_teardown_driver
def get_past_orders_list(self, timeout=10):
self._driver.get(self._base_url + '/account/order-history')
start_time = time.time()
links = []
while len(links) == 0 and time.time() - start_time < timeout:
links = self._driver.find_elements_by_class_name('account-order-history-past-orders-delivery-list-item')
dates = self._driver.find_elements_by_class_name('account-order-history-past-orders-delivery-list-item__details__date')
prices = self._driver.find_elements_by_class_name('account-order-history-past-orders-delivery-list-item__price')
dates = [date.text for date in dates]
prices = [price.text for price in prices]
links = [link.get_attribute('href') for link in links]
order_numbers = [link.split('/')[-1] for link in links]
df_orders = pd.DataFrame({'date': dates,
'price': prices,
'link': links,
'orderNumber': order_numbers})
return df_orders
@setup_and_teardown_driver
def get_itemized_order_history(self, timeout=10):
df_orders = self.get_past_orders_list(timeout)
orders_path = os.path.join(self._data_directory, 'orders.csv')
if os.path.exists(orders_path):
df = pd.read_csv(orders_path, index_col=0)
else:
df = pd.DataFrame()
for i, link in enumerate(df_orders['link']):
order_number = link.split('/')[-1]
# skip if we've already downloaded this order
if len(df) and int(order_number) in df['orderNumber'].values:
continue
self._driver.get(link)
start_time = time.time()
while time.time() - start_time < timeout:
try:
product_descriptions = self._driver. \
find_elements_by_class_name(
'order-history-details-products__product__info__name')
except NoSuchElementException:
pass
product_descriptions = [product_description.text for
product_description in product_descriptions]
product_skus = self._driver.find_elements_by_class_name(
'order-history-details-products__product__info__code')
product_skus = [product_sku.text for product_sku in product_skus]
product_quantities = self._driver.find_elements_by_class_name(
'order-history-details-products__product__quantity')
product_quantities = [product_quantity.text for product_quantity
in product_quantities]
product_prices = self._driver.find_elements_by_class_name(
'order-history-details-products__product__price')
product_prices = [float(product_price.text[1:])
for product_price in product_prices]
df_products = self.get_product_list()
# add any new products to the products database
for sku in product_skus:
if len(df_products) == 0 or sku not in df_products.index:
try:
link = self.map_sku_to_link(sku)
self.add_product_to_database(link)
except NoSearchResults:
print("Couldn't find sku: %s" % sku)
# Convert quantity field to units / kg
units_list = []
kg_list = []
for j in range(len(product_quantities)):
units = None
kg = None
try:
unit_price = product_quantities[j].split(' @ ')[1]
if unit_price.endswith(' ea'):
units = product_prices[j] / float(unit_price[1:-3])
kg = units * df_products[product_skus[j] == df_products.index]['kg'].values[0]
elif unit_price.endswith(' /kg'):
kg = product_prices[j] / float(unit_price[1:-4])
except IndexError:
pass
units_list.append(units)
kg_list.append(kg)
df = df.append(pd.DataFrame({'description': product_descriptions,
'productSKU': product_skus,
'quantity': units_list,
'kg': kg_list,
'price': product_prices,
'orderNumber': order_number,
'date': df_orders['date'].values[i]}
), ignore_index=True)
# update the orders database
df.to_csv(orders_path)
return df
@setup_and_teardown_driver
def map_sku_to_link(self, sku, follow_link=True):
# note errors with 100% maple syrup, 2% cottage cheese
result = self.search(sku[:-3], follow_first_link=follow_link)
if len(result) == 1:
return result['link'].iloc[0]
else:
return None
@setup_and_teardown_driver
def add_product_to_database(self, link):
products_path = os.path.join(self._data_directory, 'products', 'products.csv')
if os.path.exists(products_path):
df_products = pd.read_csv(products_path, index_col=0)
else:
df_products = pd.DataFrame()
try:
if np.isnan(link):
link = None
except TypeError:
pass
if len(df_products) and link in df_products['link'].values:
return
product_info = self.get_product_info(link)
output_path = os.path.join(self._data_directory, 'products', product_info['productSKU'])
if not os.path.exists(output_path):
os.makedirs(output_path)
with open(os.path.join(output_path, product_info['productName'] + '.html'), "wb") as f:
f.write(self._driver.page_source.encode('utf-8'))
src = self._driver.find_element_by_class_name('responsive-image--product-details-page').get_attribute('src')
ext = os.path.splitext(src)[1]
urllib.request.urlretrieve(src, os.path.join(output_path, product_info['productName'] + ext))
data = {k: [v] for k, v in product_info.items()}
def isnan(field):
if field is None:
return True
try:
return np.isnan(field)
except TypeError:
return False
# Convert package size to weight (assume density of 1 kg/L for everything)
x = data['packageSize'][0]
kg = None
if not isnan(data['averageWeight'][0]):
kg = float(data['averageWeight'][0][18:-3])
elif isnan(x):
pass
elif x.endswith(' mL'):
kg = 1e-3 * float(x[:-3])
elif x.endswith(' L'):
kg = float(x[:-2])
elif x.endswith(' kg'):
kg = float(x[:-3])
elif x.endswith(' lb'):
kg = float(x[:-3]) / 2.2
elif x.endswith(' lb bag'):
kg = float(x[:-7]) / 2.2
elif x.endswith(' g'):
kg = 1e-3 * float(x[:-2])
data['kg'] = [kg]
sku = data.pop('productSKU')[0]
df_products = df_products.append(pd.DataFrame(data, index=[sku]))
df_products.to_csv(products_path)
@setup_and_teardown_driver
def get_pickup_locations(self, postal_code, timeout=10):
self._driver.get(self._base_url + '/store-locator')
"""
First character of the postal code Province, territory or region First character of the postal code Province, territory or region
Note: The regions used in this table are defined by Canada Post Corporation.
Source: Statistics Canada, 2006 Census of Population.
A Newfoundland and Labrador M Metropolitan Toronto
B Nova Scotia N Southwestern Ontario
C Prince Edward Island P Northern Ontario
E New Brunswick R Manitoba
G Eastern Québec S Saskatchewan
H Metropolitan Montréal T Alberta
J Western Québec V British Columbia
K Eastern Ontario X Northwest Territories and Nunavut
L Central Ontario Y Yukon Territory
"""
if postal_code[0] in ['K', 'L', 'M', 'N', 'P']:
region = 'Ontario'
buttons = [button for button in self._driver.find_elements_by_class_name('primary-button--region-selector') if button.text == region]
if len(buttons):
buttons[0].click()
def set_postal_code(postal_code):
start_time = time.time()
while time.time() - start_time < timeout:
try:
postal_code_box = self._driver.find_element_by_class_name("location-search__search__input")
postal_code_box.click()
ActionChains(self._driver) \
.key_down(Keys.CONTROL) \
.send_keys('a') \
.key_up(Keys.CONTROL) \
.perform()
postal_code_box.send_keys(postal_code)
postal_code_box.send_keys(Keys.ENTER)
break
except NoSuchElementException:
pass
set_postal_code(postal_code)
rows = []
while len(rows) == 0:
rows = self._driver.find_elements_by_class_name('location-list-item-details')
data = [row.text.split('\n')[:2] for row in rows]
# transpose the 2d list
data = list(zip(*data))
# get the distance to each store
distances = []
while len(distances) == 0:
distances = [x.text for x in self._driver.find_elements_by_class_name('location-list-item-type__type__distance')]
data.append(distances)
df = pd.DataFrame(dict(zip(['name', 'address', 'distance'], data)))
return df
@setup_and_teardown_driver
def get_pickup_slots(self, postal_code, location=None, timeout=10):
def select_location(postal_code, location):
df_locations = self.get_pickup_locations(postal_code)
if location is None:
i = 0
elif location in df_locations['name'].values:
# Go to the store with the matching name
i = int(df_locations[df_locations['name'] == location].index.values[0])
else:
raise KeyError('%s not in %s' % (location,
df_locations['name'].values.tolist()))
locations = []
while len(locations) == 0:
locations = self._driver.find_elements_by_class_name('location-list__item')
css_selected = "button[data-track='storeLocatorShopNowResetButton']"
css_unselected = "button[data-track='storeLocatorShopNowButton']"
buttons = [loc.find_element_by_css_selector(css_unselected)
if len(loc.find_elements_by_css_selector(css_unselected))
else None for loc in locations]
if buttons[i]:
buttons[i].click()
start_time = time.time()
while time.time() - start_time < timeout:
try:
self._driver.find_element_by_class_name('store-locator-redirect__button').click()
break
except NoSuchElementException:
pass
select_location(postal_code, location)
start_time = time.time()
while time.time() - start_time < timeout:
try:
# Click the "select a timeslot" button
self._driver.find_element_by_css_selector("button[data-auid='timeslot-button']").click()
break
except NoSuchElementException:
pass
def get_pickup_table_for_current_page():
times = []
while len(times) == 0:
times = [t.text for t in self._driver.find_elements_by_class_name('timeslot-selector-timelist__time__text')]
data = self._driver.find_elements_by_class_name('timeslot-selector-daylist__day')
days = [', '.join(day.text.split('\n')[:2]) for day in data if len(day.text)]
time_slots = [day.text.split('\n')[2:] for day in data if len(day.text)]
return pd.DataFrame(dict(zip(days, time_slots)), index=times)
df = get_pickup_table_for_current_page()
while True:
start_time = time.time()
while time.time() - start_time < timeout:
try:
button = self._driver.find_element_by_class_name('slick-next')
button.click()
break
except NoSuchElementException:
pass
time.sleep(2)
df_page = get_pickup_table_for_current_page()
if df_page.columns[-1] in df.columns.values:
break
for col in df_page.columns:
if col not in df.columns.values:
df[col] = df_page[col]
return df
def get_product_list(self):
products_path = os.path.join(self._data_directory, 'products', 'products.csv')
if os.path.exists(products_path):
return pd.read_csv(products_path, index_col=0)
else:
return pd.DataFrame()
def _login(self):
if not self.signed_in():
self._driver.find_element_by_id("accessCode").send_keys(self._user)
self._driver.find_element_by_id ("password").send_keys(self._password)
self._driver.find_element_by_xpath('//*[@id="login-form"]/div[3]/button').click()
def signed_in(self):
url = self._driver.current_url
# If we're not on the base url or the pcid login page, go to the base
# url.
if url.find(self._base_url):
self._driver.get(self._base_url)
while True:
# If the sign in button exists, the user is not logged in.
try:
self._driver.find_element_by_class_name('sign-in')
return False
except NoSuchElementException:
pass
# If the acccounts button exists, the user is logged in.
try:
self._driver.find_element_by_class_name('account__toggle__button')
return True
except NoSuchElementException:
pass
class RealCanadianSuperstoreAPI(GroceryHelpersAPI):
def __init__(self, user=None, password=None, user_data_dir=None,
data_directory=os.path.join('.', 'data')):
super().__init__(user=user, password=password, user_data_dir=user_data_dir,
data_directory=data_directory,
base_url='https://www.realcanadiansuperstore.ca',
store_name='Real Canadian Superstore')
class LowblawsAPI(GroceryHelpersAPI):
def __init__(self, user=None, password=None, user_data_dir=None,
data_directory=os.path.join('.', 'data')):
super().__init__(user=user, password=password, user_data_dir=user_data_dir,
data_directory=data_directory,
base_url='https://www.loblaws.ca',
store_name='Loblaws')
class ZehrsAPI(GroceryHelpersAPI):
def __init__(self, user=None, password=None, user_data_dir=None,
data_directory=os.path.join('.', 'data')):
super().__init__(user=user, password=password, user_data_dir=user_data_dir,
data_directory=data_directory,
base_url='https://www.zehrs.ca',
store_name='Zehrs')
class ValumartAPI(GroceryHelpersAPI):
def __init__(self, user=None, password=None, user_data_dir=None,
data_directory=os.path.join('.', 'data')):
super().__init__(user=user, password=password, user_data_dir=user_data_dir,
data_directory=data_directory,
base_url='https://www.valumart.ca',
store_name='Valu-mart')
class WalmartAPI(GroceryHelpersAPI):
def __init__(self, user=None, password=None, user_data_dir=None,
data_directory=os.path.join('.', 'data')):
super().__init__(user=user, password=password, user_data_dir=user_data_dir,
data_directory=data_directory,
base_url='https://www.walmart.ca',
store_name='Walmart')
@setup_and_teardown_driver
def search(self, term, timeout=10, follow_first_link=False):
self._driver.get('%s/search/%s' % (self._base_url, term))
descriptions = self._driver.find_elements_by_class_name('description')
descriptions = [description.text for description in descriptions]
titles = self._driver.find_elements_by_class_name('title')
titles = [title.text for title in titles][:len(descriptions)]
price_units = self._driver.find_elements_by_class_name('price-unit')
price_units = [price_unit.text for price_unit in price_units]
prices = self._driver.find_elements_by_class_name('price-current')
prices = [price.text.replace('\n', '') for price in prices]
prices = [float(price[1:]) if price.find('¢') == -1 else float('0.%2d' % int(price[:-1])) for price in prices]
skus = self._driver.find_elements_by_class_name('productSkus')
data = [json.loads(sku.get_attribute('value')) for sku in skus]
skus = [item['productid'] for item in data]
links = self._driver.find_elements_by_class_name('product-link')
links = [self._base_url + link.get_attribute('data-bind').split(',')[1][2:-3] for link in links]
if follow_first_link and len(links):
self._driver.get(links[0])
df = pd.DataFrame({
'sku': skus,
'title': titles,
'description': descriptions,
'unit_price': price_units,
'price': prices,
'link': links})
return df
@setup_and_teardown_driver
def get_product_info(self, link=None, timeout=10):
if link and self._driver.current_url != link:
self._driver.get(link)
elif link is None:
link = self._driver.current_url
json_data = [json.loads(script.get_attribute('innerHTML')) for script
in self._driver.find_elements_by_css_selector(
"script[type='application/ld+json']")]
product = [x for x in json_data if x['@type'] == 'Product'][0]
categories = [y['item']['name'] for y in
[x for x in json_data if x['@type'] == 'BreadcrumbList'
][0]['itemListElement']][1:]
ppu = self._driver.find_element_by_css_selector("span[data-automation='buybox-price-ppu']").text
product_ids = {}
div = self._driver.find_element_by_xpath("//*[contains(text(), 'Product Identifiers')]")
for i in range(3):
div = self._driver.execute_script("""
return arguments[0].nextElementSibling
""", div)
field, value = div.text.split('\n')
product_ids[field] = value
product_info = {
'name': product['name'],
'description': product['description'],
'brand': product['brand']['name'],
'price': product['offers']['price'],
'unit_price': ppu,
'categories': categories
}
product_info.update(product_ids)
return product_info
@setup_and_teardown_driver
def add_product_to_current_order(self, link, quantity=1, timeout=10):
self._driver.get(link)
input_box = self._driver.find_element_by_css_selector("span[data-automation='quantity']").find_element_by_tag_name('input')
input_box.click()
ActionChains(self._driver).key_down(Keys.LEFT_CONTROL).send_keys('a').key_up(Keys.LEFT_CONTROL).perform()
input_box.send_keys(5)
self._driver.find_element_by_xpath("//button[contains(text(), 'Add to cart')]").click()
@setup_and_teardown_driver
def get_pickup_locations(self, postal_code):
self._driver.get(self._base_url + '/en/scheduled-shopping')
def set_postal_code(postal_code):
#<input type="text" placeholder="Enter a city or postal code to find a pickup location near you." aria-label="Enter a city or postal code to find a pickup location near you." class="css-1kgtn0i eesbt950" value="N1R1A3">
postal_code_box = self._driver.find_element_by_css_selector("input[placeHolder='Enter a city or postal code to find a pickup location near you.']")
postal_code_box.click()
ActionChains(self._driver) \
.key_down(Keys.CONTROL) \
.send_keys('a') \
.key_up(Keys.CONTROL) \
.perform()
postal_code_box.send_keys(postal_code)
#<input aria-label="Find" type="submit" class="css-hrxt9j e1tmjuvc2" value="Find">
self._driver.find_element_by_css_selector("input[type='submit']").click()
set_postal_code(postal_code)
time.sleep(2)
divs = []
while len(divs) == 0:
divs = self._driver.find_elements_by_css_selector("div[data-automation='pickup-location']")
data = [div.text.split('\n')[:4] for div in divs]
# transpose the 2d list
data = list(zip(*data))
df = pd.DataFrame(dict(zip(['index', 'distance', 'name', 'address'], data)))
return df.set_index('index')
@setup_and_teardown_driver
def get_pickup_slots(self, postal_code, location=None, timeout=10):
def select_location(postal_code, location):
df_locations = self.get_pickup_locations(postal_code)
if location is None:
i = 0
elif location in df_locations['name'].values:
# Go to the store with the matching name
i = int(df_locations[df_locations['name'] == location].index.values[0]) - 1
else:
raise KeyError('%s not in %s' % (location,
df_locations['name'].values.tolist()))
buttons = self._driver.find_elements_by_css_selector("button[data-automation='location-link']")
self._driver.execute_script('arguments[0].scrollIntoView(true);', buttons[i])
# Click on the selected store
buttons[i].click()
select_location(postal_code, location)
def get_pickup_table_for_current_page():
time.sleep(1)
table = self._driver.find_element_by_css_selector("table[aria-label='Select a time slot']")
rows = table.find_elements_by_tag_name("tr")
data = [[td.text for td in row.find_elements_by_tag_name("td")] for row in rows[1:]]
# Transpose the 2d list
data = list(zip(*data))
dates = [th.text for th in rows[0].find_elements_by_tag_name("th")][1:]
times = [row.find_element_by_tag_name("th").text for row in rows[1:]]
return pd.DataFrame(dict(zip(dates, data)), index=times)
df = get_pickup_table_for_current_page()
start_time = time.time()
while time.time() - start_time < timeout:
try:
button = self._driver.find_element_by_id("next-slots")
if button.get_attribute('disabled'):
break
else:
button.click()
df_page = get_pickup_table_for_current_page()
for col in df_page.columns:
if col not in df.columns.values:
df[col] = df_page[col]
except NoSuchElementException:
pass
return df
|
import pandas as pd
import numpy as np
import cv2
data_file = 'thinking.csv'
"""
labels must be a list of segments where the time serie class is true
(i.e. the list of segments where the drawer has "hands down")
"""
labels = [(29,96),(122,145),(162,185),(209,220),(244,261),(339,454)]
df = pd.read_csv('data/unlabeled_csv/'+data_file,index_col=0)
df["label"] = False
for interval in labels:
start,end = interval
df.loc[start:end,"label"] = True
df.to_csv('data/raw_labeled/'+data_file)
good_points = df.loc[df['label'] == True][['x','y']]
pts = good_points.to_numpy().astype(int)
img = np.zeros((720,1280), dtype=np.uint8)
img[pts.T[1],pts.T[0]]=255
img = cv2.flip(img, 1)
cv2.imshow('frame', img)
key = cv2.waitKey(0)
cv2.destroyAllWindows() |
# coding=utf-8
from lxml import etree
from app.utils.parse_url import parse_url
class SearchMetaclass(type):
"""
add __CrawlFunc__ and __CrawlFuncCount__ attr
"""
def __new__(cls, name, bases, attrs):
count = 0
attrs['__SearchFunc__'] = []
for k in attrs.keys():
if k.startswith('search_'):
attrs['__SearchFunc__'].append(k)
count += 1
attrs['__SearchFuncCount__'] = count
return type.__new__(cls, name, bases, attrs)
class SearchCrawler(object, metaclass=SearchMetaclass):
def __init__(self, **kwargs):
self._qidian = 'https://www.qidian.com/search?kw={keyword}'
self._biquge = 'http://www.biquge.com.tw/{category}/'
super(SearchCrawler, self).__init__(**kwargs)
def get_raw_vovels(self, callback):
novels = []
for novel in eval("self.{}()".format(callback)):
print('Getting', novel, 'from', callback)
novels.append(novel)
return novels
def search_qidian(self, keyword):
"""起点网"""
pass
def search_biquge(self, category):
"""笔趣阁"""
pass
def search_xxsy(self):
"""潇湘书院"""
pass
def search_zongheng(self):
"""纵横"""
pass
if __name__ == '__main__':
SearchCrawler().search_biquge('xuanhuan')
|
"""test distutils-related functionality and methods in src/distutils.h"""
import distutils.extension
import distutils.util
import os.path
import subprocess
import sys
import tempfile
import textwrap
import unittest
tests_dir = os.path.dirname(os.path.abspath(__file__))
if __name__ == '__main__':
plat_specifier = 'lib.{0}-{1}'.format(distutils.util.get_platform(),
sys.version[0:3])
sys.path.insert(0, os.path.join(tests_dir, '..', 'build', plat_specifier))
from csdl2 import * # noqa
class TestGetSystemSDL(unittest.TestCase):
"""Test the interface of PyCSDL2_GetSystemSDL()"""
def test_return_value(self):
"Must return either a dict or None"
ret = PyCSDL2_GetSystemSDL()
if ret is not None:
self.assertIs(type(ret), dict)
def test_return_value_system(self):
"""When a dict is returned, it must have the expected keys and values.
It must only have the keys 'include_dirs', 'define_macros',
'undef_macros', 'extra_compile_args', 'library_dirs', 'libraries',
'runtime_library_dirs' and 'extra_link_args'.
Values of 'include_dirs', 'define_macros', 'undef_macros',
'extra_compile_args', 'library_dirs', 'libraries',
'runtime_library_dirs' and 'extra_link_args' must be a list of strings.
Value of 'define_macros' must be a list of 2-tuples of (str, str or
None).
"""
ret = PyCSDL2_GetSystemSDL()
if ret is None:
raise unittest.SkipTest('csdl2 not dynamically linked')
self.assertEqual(len(ret), 8)
self.assertIs(type(ret['include_dirs']), list)
for x in ret['include_dirs']:
self.assertIs(type(x), str)
self.assertIs(type(ret['define_macros']), list)
for x in ret['define_macros']:
self.assertIs(type(x), tuple)
self.assertEqual(len(x), 2)
self.assertIs(type(x[0]), str)
self.assertTrue(type(x[1]) is str or x[1] is None)
self.assertIs(type(ret['undef_macros']), list)
for x in ret['undef_macros']:
self.assertIs(type(x), str)
self.assertIs(type(ret['extra_compile_args']), list)
for x in ret['extra_compile_args']:
self.assertIs(type(x), str)
self.assertIs(type(ret['library_dirs']), list)
for x in ret['library_dirs']:
self.assertIs(type(x), str)
self.assertIs(type(ret['libraries']), list)
for x in ret['libraries']:
self.assertIs(type(x), str)
self.assertIs(type(ret['runtime_library_dirs']), list)
for x in ret['runtime_library_dirs']:
self.assertIs(type(x), str)
self.assertIs(type(ret['extra_link_args']), list)
for x in ret['extra_link_args']:
self.assertIs(type(x), str)
class DistutilsBuildMixin:
"""Mixin for writing test cases that builds extensions with distutils
Building of extensions is done in a sandbox temporary directory.
Facilities are also provided for running scripts in the directory so as to
test the built extension.
"""
def setUp(self):
self.__dir = tempfile.TemporaryDirectory()
super().setUp()
def tearDown(self):
self.__dir.cleanup()
super().tearDown()
def init_ext(self, *args, **kwargs):
"Initializes distutils Extension with pycsdl2's and SDL's include dirs"
ext = distutils.extension.Extension(*args, **kwargs)
# Add pycsdl2.h's include directory
ext.include_dirs.append(os.path.join(tests_dir, '..', 'include'))
# If csdl2 is dynamically linked, add its include directories,
# else add our bundled SDL's include directory so SDL.h can be found
cfg = PyCSDL2_GetSystemSDL()
if cfg:
ext.include_dirs.extend(cfg['include_dirs'])
else:
ext.include_dirs.append(os.path.join(tests_dir, '..', 'deps',
'SDL', 'include'))
return ext
def add_ext_src(self, ext, name, contents):
"""Adds a source file `name` with `contents` to `ext`"""
path = os.path.join(self.__dir.name, name)
with open(path, 'w') as f:
f.write(contents)
ext.sources.append(path)
def __write_setup(self, f, ext_modules):
f.write('from distutils.core import setup\n')
f.write('from distutils.extension import Extension\n')
mods = []
for i, ext in enumerate(ext_modules):
f.write('ext{0} = Extension({1.name!r}, {1.sources!r}, '
'{1.include_dirs!r}, {1.define_macros!r}, '
'{1.undef_macros!r}, {1.library_dirs!r}, '
'{1.libraries!r}, {1.runtime_library_dirs!r}, '
'{1.extra_compile_args!r}, '
'{1.extra_link_args!r})\n'.format(i, ext))
mods.append('ext{0}'.format(i))
f.write("setup(name='csdl2test', "
"ext_modules=[{0}])".format(', '.join(mods)))
def build_exts(self, exts, **kwargs):
"""Builds the distutils.extension.Extension in `exts`."""
setup_path = os.path.join(self.__dir.name, 'setup.py')
with open(setup_path, 'w') as f:
self.__write_setup(f, exts)
subprocess.check_call([sys.executable, setup_path, 'build_ext',
'--inplace'], cwd=self.__dir.name,
stdout=subprocess.DEVNULL,
**kwargs)
def __write_script(self, f, contents):
# Propagate our sys.path to the script
f.write('import sys\n')
for x in reversed(sys.path):
f.write('if {0!r} not in sys.path: '
'sys.path.insert(0, {0!r})\n'.format(x))
# Ensure self.__dir is first in sys.path
f.write('sys.path.insert(0, {0!r})\n'.format(self.__dir.name))
f.write(contents)
def check_call_script(self, name, contents, **kwargs):
"""Check call of script `name` with `contents`"""
script_path = os.path.join(self.__dir.name, name)
with open(script_path, 'w') as f:
self.__write_script(f, contents)
subprocess.check_call([sys.executable, script_path],
cwd=self.__dir.name, **kwargs)
def check_output_script(self, name, contents, **kwargs):
script_path = os.path.join(self.__dir.name, name)
with open(script_path, 'w') as f:
self.__write_script(f, contents)
return subprocess.check_output([sys.executable, script_path],
cwd=self.__dir.name,
universal_newlines=True, **kwargs)
class TestLinkSystemSDL(DistutilsBuildMixin, unittest.TestCase):
"""Test building and importing an extension that links against system SDL
This ensures that the return value of PyCSDL2_GetSystemSDL() is correct.
"""
src = textwrap.dedent('''
#include <Python.h>
#include <SDL.h>
static void log_output_func(void *userdata, int category,
SDL_LogPriority priority, const char *message)
{
printf("%s", message);
}
static PyModuleDef PyCSDL2Test_Module = {
PyModuleDef_HEAD_INIT,
/* m_name */ "_csdl2test",
/* m_doc */ "",
/* m_size */ -1,
/* m_methods */ NULL,
/* m_reload */ NULL,
/* m_traverse */ NULL,
/* m_clear */ NULL,
/* m_free */ NULL
};
PyMODINIT_FUNC
PyInit__csdl2test(void)
{
PyObject *m = PyModule_Create(&PyCSDL2Test_Module);
if (m == NULL) { return NULL; }
SDL_LogSetOutputFunction(log_output_func, NULL);
SDL_Log("OK");
return m;
}
''')
def test_extension(self):
"""Info returned by PyCSDL2_GetSystemSDL() is valid.
It should be possible to use the information to compile and import an
extension that links against the system's SDL library.
"""
cfg = PyCSDL2_GetSystemSDL()
if not cfg:
raise unittest.SkipTest('csdl2 not dynamically linked')
ext = self.init_ext('_csdl2test', [], **cfg)
self.add_ext_src(ext, '_csdl2test.c', self.src)
self.build_exts([ext])
out = self.check_output_script('test.py', 'import _csdl2test')
self.assertEqual(out, 'OK')
class TestPyCSDL2_Import(DistutilsBuildMixin, unittest.TestCase):
"""Test building and running an extension that calls PyCSDL2_Import()
"""
src = textwrap.dedent('''
#include <pycsdl2.h>
#ifdef TEST_DIFF_UNIT_SAME_POINTER
extern const PyCSDL2_CAPI *get_capi(void);
#endif
static PyModuleDef PyCSDL2Test_Module = {
PyModuleDef_HEAD_INIT,
/* m_name */ "_csdl2test",
/* m_doc */ "",
/* m_size */ -1,
/* m_methods */ NULL,
/* m_reload */ NULL,
/* m_traverse */ NULL,
/* m_clear */ NULL,
/* m_free */ NULL
};
PyMODINIT_FUNC
PyInit__csdl2test(void)
{
PyObject *m = PyModule_Create(&PyCSDL2Test_Module);
const PyCSDL2_CAPI *api;
#if defined(TEST_SAME_POINTER) || defined(TEST_DIFF_UNIT_SAME_POINTER)
const PyCSDL2_CAPI *api2;
#endif
#ifdef TEST_VALID_MEM
PyCSDL2_CAPI *api2;
#endif
if (m == NULL) { return NULL; }
if (!(api = PyCSDL2_Import())) { Py_DECREF(m); return NULL; }
#ifdef TEST_SAME_POINTER
if (!(api2 = PyCSDL2_Import())) { Py_DECREF(m); return NULL; }
if (api != api2) {
PyErr_SetString(PyExc_AssertionError, "api != api2");
Py_DECREF(m);
return NULL;
}
#endif
#ifdef TEST_DIFF_UNIT_SAME_POINTER
if (!(api2 = get_capi())) { Py_DECREF(m); return NULL; }
if (api != api2) {
PyErr_SetString(PyExc_AssertionError, "api != api2");
Py_DECREF(m);
return NULL;
}
#endif
#ifdef TEST_VALID_MEM
if (!(api2 = PyMem_New(PyCSDL2_CAPI, 1))) {
Py_DECREF(m);
return NULL;
}
memcpy(api2, api, sizeof(PyCSDL2_CAPI));
PyMem_Del(api2);
#endif
return m;
}
''')
src2 = textwrap.dedent('''
#include <pycsdl2.h>
const PyCSDL2_CAPI *get_capi(void)
{
return PyCSDL2_Import();
}
''')
def test_same_pointer(self):
"When called multiple times, it should return the same ptr"
ext = self.init_ext('_csdl2test', [])
ext.define_macros.append(('TEST_SAME_POINTER', None))
self.add_ext_src(ext, '_csdl2test.c', self.src)
self.build_exts([ext])
self.check_call_script('test.py', 'import _csdl2test')
def test_diff_unit_same_pointer(self):
"When called from different translation units, returns same ptr"
ext = self.init_ext('_csdl2test', [])
ext.define_macros.append(('TEST_DIFF_UNIT_SAME_POINTER', None))
self.add_ext_src(ext, '_csdl2test.c', self.src)
self.add_ext_src(ext, 'src2.c', self.src2)
self.build_exts([ext])
self.check_call_script('test.py', 'import _csdl2test')
def test_valid_mem(self):
"Returns valid memory"
ext = self.init_ext('_csdl2test', [])
ext.define_macros.append(('TEST_VALID_MEM', None))
self.add_ext_src(ext, '_csdl2test.c', self.src)
self.build_exts([ext])
self.check_call_script('test.py', 'import _csdl2test')
class TestSDLCAPI(DistutilsBuildMixin, unittest.TestCase):
"""Test the SDL API exposed through PyCSDL2_CAPI
Unfortunately, since the SDL API is so big, it would be unfeasible to test
every single function. It's also a bit unnecessary, as SDL has its own test
suite. We just do some simple testing to ensure that the SDL function
pointers are being assigned properly.
"""
src = textwrap.dedent('''
#include <pycsdl2.h>
static void log_output_func(void *userdata, int category,
SDL_LogPriority priority, const char *message)
{
printf("%s", message);
}
static PyModuleDef PyCSDL2Test_Module = {
PyModuleDef_HEAD_INIT,
/* m_name */ "_csdl2test",
/* m_doc */ "",
/* m_size */ -1,
/* m_methods */ NULL,
/* m_reload */ NULL,
/* m_traverse */ NULL,
/* m_clear */ NULL,
/* m_free */ NULL
};
PyMODINIT_FUNC
PyInit__csdl2test(void)
{
PyObject *m;
const PyCSDL2_CAPI *capi;
if (!(m = PyModule_Create(&PyCSDL2Test_Module))) { return NULL; }
if (!(capi = PyCSDL2_Import())) { Py_DECREF(m); return NULL; }
capi->_SDL_LogSetOutputFunction(log_output_func, NULL);
capi->_SDL_Log("OK");
return m;
}
''')
def test_log(self):
"""SDL_Log should work through the C API
"""
ext = self.init_ext('_csdl2test', [])
self.add_ext_src(ext, '_csdl2test.c', self.src)
self.build_exts([ext])
out = self.check_output_script('test.py', 'import _csdl2test')
self.assertEqual(out, 'OK')
class TestSDLFuncRedirect(DistutilsBuildMixin, unittest.TestCase):
"""Test SDL function redirection
Unfortunately, since the SDL API is so big, it would be unfeasible to test
every single function. It's also a bit unnecessary, as SDL has its own test
suite. We just do some simple testing to ensure that the SDL function
pointers are being assigned properly.
"""
src = textwrap.dedent('''
#include <pycsdl2.h>
static void log_output_func(void *userdata, int category,
SDL_LogPriority priority, const char *message)
{
printf("%s", message);
}
static PyModuleDef PyCSDL2Test_Module = {
PyModuleDef_HEAD_INIT,
/* m_name */ "_csdl2test",
/* m_doc */ "",
/* m_size */ -1,
/* m_methods */ NULL,
/* m_reload */ NULL,
/* m_traverse */ NULL,
/* m_clear */ NULL,
/* m_free */ NULL
};
PyMODINIT_FUNC
PyInit__csdl2test(void)
{
PyObject *m;
const PyCSDL2_CAPI *capi;
if (!(m = PyModule_Create(&PyCSDL2Test_Module))) { return NULL; }
if (!(capi = PyCSDL2_Import())) { Py_DECREF(m); return NULL; }
SDL_LogSetOutputFunction(log_output_func, NULL);
SDL_Log("OK");
return m;
}
''')
def test_SDL_Log(self):
"""Just calling SDL_Log() should work"""
ext = self.init_ext('_csdl2test', [])
self.add_ext_src(ext, '_csdl2test.c', self.src)
self.build_exts([ext])
out = self.check_output_script('test.py', 'import _csdl2test')
self.assertEqual(out, 'OK')
def test_SDL_Log_no_redirect(self):
"""When PYCSDL2_NO_REDIRECT is defined, SDL_Log() should fail
Because SDL_Log() is not overridden anymore so there should be a linker
error during compilation or when trying to import the extension.
"""
ext = self.init_ext('_csdl2test', [])
ext.define_macros.append(('PYCSDL2_NO_REDIRECT', None))
self.add_ext_src(ext, '_csdl2test.c', self.src)
try:
self.build_exts([ext], stderr=subprocess.DEVNULL)
self.check_call_script('test.py', 'import _csdl2test',
stderr=subprocess.DEVNULL)
except subprocess.CalledProcessError:
return
self.fail('subprocess.CalledProcessError not raised')
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
import sys
import os
from types import StringType
# get bencode package from http://github.com/fishy/scripts/downloads
from bencode.bencode import bencode, bdecode, BTFailure
try :
torrent = sys.argv[1]
except IndexError :
print "Usage: \"%s <torrent_file> [tracker_url]\" to show torrent info (without tracker_url), or to add tracker(s)" % sys.argv[0]
sys.exit()
size = os.stat(torrent).st_size
file = open(torrent, "rb")
data = file.read(size)
file.close()
info = bdecode(data)
if len(sys.argv) == 2 :
print info
sys.exit()
if 'announce-list' not in info :
list = [info['announce']]
for i in range(len(sys.argv)-2) :
tracker = sys.argv[i+2]
if tracker not in list :
list.append(tracker)
print list
info['announce-list'] = [list]
else :
list = info['announce-list'][0]
if type(list) == StringType :
list = [list]
for i in range(len(sys.argv)-2) :
tracker = sys.argv[i+2]
if tracker not in list :
list.append(tracker)
print list
info['announce-list'][0] = list
writedata = bencode(info)
file = open(torrent, "wb")
file.write(writedata)
file.close()
|
import json
import sys
def check(flag):
processed = flag[::-1]
processed = processed.decode("base64")
final = json.loads(processed)
if (final['check_code'] != "AK4782"):
return False
if (final['flag_content']['numbers']*2 != 18529313):
return False
if (final['flag_content']['change'] != "standardisation"[::2]):
return False
if (final['flag_content']['settled'] != "CrossCTF{%s_%d_%s}"):
return False
temp = final['flag_content']
return temp['settled'] % (temp['change'], temp['numbers'], final['check_code'])
def main():
if len(sys.argv) != 2:
print "No"
sys.exit()
result = check(sys.argv[1])
if result:
print result
else:
print "No"
if __name__ == "__main__":
main()
|
from metaflow_test import MetaflowTest, ExpectationFailed, steps, tag
class CardTimeoutTest(MetaflowTest):
"""
Test that checks if the card decorator works as intended with the timeout decorator.
# This test set an artifact in the steps and also set an timeout to the card arguement.
# It will assert the artifact to be None.
"""
PRIORITY = 2
@tag('card(type="test_timeout_card",timeout=10,options=dict(timeout=20),save_errors=False)')
@steps(0, ["start"])
def step_start(self):
from metaflow import current
self.task = current.pathspec
@steps(1, ["all"])
def step_all(self):
pass
def check_results(self, flow, checker):
run = checker.get_run()
for step in flow:
if step.name != "start":
continue
if run is None:
# This means CliCheck is in context.
cli_check_dict = checker.artifact_dict(step.name, "task")
for task_pathspec in cli_check_dict:
task_id = task_pathspec.split("/")[-1]
checker.assert_card(
step.name,
task_id,
"timeout_card",
None,
)
else:
# This means MetadataCheck is in context.
meta_check_dict = checker.artifact_dict(step.name, "task")
for task_id in meta_check_dict:
checker.assert_card(step.name, task_id, "timeout_card", None)
|
#!/usr/bin/env python
# coding=utf-8
# aeneas is a Python/C library and a set of tools
# to automagically synchronize audio and text (aka forced alignment)
#
# Copyright (C) 2012-2013, Alberto Pettarin (www.albertopettarin.it)
# Copyright (C) 2013-2015, ReadBeyond Srl (www.readbeyond.it)
# Copyright (C) 2015-2017, Alberto Pettarin (www.albertopettarin.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
A wrapper for a custom TTS engine.
"""
from __future__ import absolute_import
from __future__ import print_function
from aeneas.language import Language
from aeneas.ttswrappers.basettswrapper import BaseTTSWrapper
class CustomTTSWrapper(BaseTTSWrapper):
"""
A wrapper for the ``espeak`` TTS engine,
to illustrate the use of custom TTS wrapper
loading at runtime.
It will perform one or more calls like ::
$ echo "text to be synthesized" | espeak -v en -w output_file.wav
This wrapper supports calling the TTS engine
only via ``subprocess``.
To use this TTS engine, specify ::
"tts=custom|tts_path=/path/to/this/file.py"
in the ``rconf`` object.
:param rconf: a runtime configuration
:type rconf: :class:`~aeneas.runtimeconfiguration.RuntimeConfiguration`
:param logger: the logger object
:type logger: :class:`~aeneas.logger.Logger`
"""
#
# NOTE create aliases for the language codes
# supported by this TTS: in this example,
# English, Italian, Russian and Ukrainian
#
ENG = Language.ENG
""" English """
ITA = Language.ITA
""" Italian """
RUS = Language.RUS
""" Russian """
UKR = Language.UKR
""" Ukrainian """
#
# NOTE LANGUAGE_TO_VOICE_CODE maps a language code
# to the corresponding voice code
# supported by this custom TTS wrapper;
# mock support for Ukrainian with Russian voice
#
LANGUAGE_TO_VOICE_CODE = {
ENG: "en",
ITA: "it",
RUS: "ru",
UKR: "ru",
}
DEFAULT_LANGUAGE = ENG
#
# NOTE eSpeak always outputs to PCM16 mono WAVE (RIFF) at 22050 Hz
#
OUTPUT_AUDIO_FORMAT = ("pcm_s16le", 1, 22050)
#
# NOTE calling eSpeak via subprocess
#
HAS_SUBPROCESS_CALL = True
TAG = u"CustomTTSWrapperESPEAK"
def __init__(self, rconf=None, logger=None):
#
# NOTE custom TTS wrappers must be implemented
# in a class named CustomTTSWrapper
# otherwise the Synthesizer will not work
#
super(CustomTTSWrapper, self).__init__(rconf=rconf, logger=logger)
#
# NOTE this example is minimal, as we implement only
# the subprocess call method
# hence, all we need to do is to specify
# how to map the command line arguments of the TTS engine
#
# NOTE if our TTS engine was callable via Python
# or a Python C extension,
# we would have needed to write a _synthesize_multiple_python()
# or a _synthesize_multiple_c_extension() function,
# with the same I/O interface of
# _synthesize_multiple_c_extension() in espeakwrapper.py
#
# NOTE on a command line, you will use eSpeak
# to synthesize some text to a WAVE file as follows:
#
# $ echo "text to synthesize" | espeak -v en -w output_file.wav
#
# Observe that text is read from stdin, while the audio data
# is written to a file specified by a given output path,
# introduced by the "-w" switch.
# Also, there is a parameter to select the English voice ("en"),
# introduced by the "-v" switch.
#
self.set_subprocess_arguments([
u"/usr/bin/espeak", # path of espeak executable or just "espeak" if it is in your PATH
u"-v", # append "-v"
self.CLI_PARAMETER_VOICE_CODE_STRING, # it will be replaced by the actual voice code
u"-w", # append "-w"
self.CLI_PARAMETER_WAVE_PATH, # it will be replaced by the actual output file path
self.CLI_PARAMETER_TEXT_STDIN # text is read from stdin
])
#
# NOTE if your TTS engine only reads text from a file
# you can use the
# BaseTTSWrapper.CLI_PARAMETER_TEXT_PATH placeholder.
#
# NOTE if your TTS engine only writes audio data to stdout
# you can use the
# BaseTTSWrapper.CLI_PARAMETER_WAVE_STDOUT placeholder.
#
# NOTE if your TTS engine needs a more complex parameter
# for selecting the voice, e.g. Festival needs
# '-eval "(language_italian)"',
# you can implement a _voice_code_to_subprocess() function
# and use the
# BaseTTSWrapper.CLI_PARAMETER_VOICE_CODE_FUNCTION placeholder
# instead of the
# BaseTTSWrapper.CLI_PARAMETER_VOICE_CODE_STRING placeholder.
# See the aeneas/ttswrappers/festivalttswrapper.py file
# for an example.
#
|
"""Test the smarttub config flow."""
from unittest.mock import patch
from smarttub import LoginFailed
from openpeerpower import config_entries, data_entry_flow
from openpeerpower.components.smarttub.const import DOMAIN
from openpeerpower.const import CONF_EMAIL, CONF_PASSWORD
from tests.common import MockConfigEntry
async def test_form(opp):
"""Test we get the form."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"openpeerpower.components.smarttub.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await opp.config_entries.flow.async_configure(
result["flow_id"],
{CONF_EMAIL: "test-email", CONF_PASSWORD: "test-password"},
)
assert result["type"] == "create_entry"
assert result["title"] == "test-email"
assert result["data"] == {
CONF_EMAIL: "test-email",
CONF_PASSWORD: "test-password",
}
await opp.async_block_till_done()
mock_setup_entry.assert_called_once()
async def test_form_invalid_auth(opp, smarttub_api):
"""Test we handle invalid auth."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
smarttub_api.login.side_effect = LoginFailed
result = await opp.config_entries.flow.async_configure(
result["flow_id"],
{CONF_EMAIL: "test-email", CONF_PASSWORD: "test-password"},
)
assert result["type"] == "form"
assert result["errors"] == {"base": "invalid_auth"}
async def test_reauth_success(opp, smarttub_api, account):
"""Test reauthentication flow."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_EMAIL: "test-email", CONF_PASSWORD: "test-password"},
unique_id=account.id,
)
mock_entry.add_to_opp(opp)
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"unique_id": mock_entry.unique_id,
"entry_id": mock_entry.entry_id,
},
data=mock_entry.data,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "reauth_confirm"
result = await opp.config_entries.flow.async_configure(
result["flow_id"], {CONF_EMAIL: "test-email3", CONF_PASSWORD: "test-password3"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "reauth_successful"
assert mock_entry.data[CONF_EMAIL] == "test-email3"
assert mock_entry.data[CONF_PASSWORD] == "test-password3"
async def test_reauth_wrong_account(opp, smarttub_api, account):
"""Test reauthentication flow if the user enters credentials for a different already-configured account."""
mock_entry1 = MockConfigEntry(
domain=DOMAIN,
data={CONF_EMAIL: "test-email1", CONF_PASSWORD: "test-password1"},
unique_id=account.id,
)
mock_entry1.add_to_opp(opp)
mock_entry2 = MockConfigEntry(
domain=DOMAIN,
data={CONF_EMAIL: "test-email2", CONF_PASSWORD: "test-password2"},
unique_id="mockaccount2",
)
mock_entry2.add_to_opp(opp)
# we try to reauth account #2, and the user successfully authenticates to account #1
account.id = mock_entry1.unique_id
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"unique_id": mock_entry2.unique_id,
"entry_id": mock_entry2.entry_id,
},
data=mock_entry2.data,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "reauth_confirm"
result = await opp.config_entries.flow.async_configure(
result["flow_id"], {CONF_EMAIL: "test-email1", CONF_PASSWORD: "test-password1"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
|
from chain import chain
from copy import deepcopy
import numpy as np
def create(*args,**kwargs):
return CLP(*args,**kwargs)
class CLP(chain):
def __init__(self,
rodLength=15,
rodDiameters=[1.0],
rodBondLength=1.0,
rodBondTypes=[1.0],
rodSequence=[1],
rodAngleType=None,
numRods=3,
rodCentralDist=2.0,
numCoils=3,
coilLength=20,
coilDiameters=[2.0],
coilSequence=[2],
coilBondLength=1.0,
coilBondTypes=[1],
coilAngleType=None):
super(CLP,self).__init__()
self.name='CLP'
self.placed=False
self.natoms=0
self.positions=[]
self.dihedrals=[]
self.diameters=[]
self.bodies=[]
self.types=[]
self.bonds = []
self.angles=[]
self.beadVol=0
self.moleculeIDs=[]
#Make rod
rod = chain(length=rodLength,
beadDiameters=rodDiameters,
bondTypes=rodBondTypes,
angleType=rodAngleType,
sequence=rodSequence)
rod.makeLinear(bondLength=rodBondLength)
rodList = []
newPos = np.array([0.,0.,0.])
newPos[0] += rodCentralDist
th = 2*np.pi/numRods
for i in range(numRods):
cc = deepcopy(rod)
cc.moleculeIDs = [0]*rodLength
diff = np.subtract(newPos,cc.positions[0])
cc.positions = np.add(cc.positions,diff)
rodList.append(cc)
xy = newPos[:2]
xy = np.dot(xy,[[np.cos(th),-np.sin(th)],[np.sin(th),np.cos(th)]])
newPos[:2]=xy
#make coils
coil = chain(length=coilLength,
beadDiameters=coilDiameters,
bondTypes=coilBondTypes,
angleType=coilAngleType,
sequence=coilSequence)
coil.makeLinear(bondLength=coilBondLength)
coilList = []
newPos = np.array([0.,0.,0.])
newPos[0] += 1.13*(rod.diameters[0]+coil.diameters[0])/2.0 + rodCentralDist
th = 2*np.pi/numCoils
for i in range(numCoils):
cc = deepcopy(coil)
cc.moleculeIDs = [i+1]*coilLength
diff = np.subtract(newPos,cc.positions[0])
cc.positions = np.add(cc.positions,diff)
coilList.append(cc)
xy = newPos[:2]
xy = np.dot(xy,[[np.cos(th),-np.sin(th)],[np.sin(th),np.cos(th)]])
newPos[:2]=xy
#combine molecules
for k,r in enumerate(rodList):
self.addMolecule(r)
newBondList = []
graftIndex = 0
for c in coilList:
newBondList.append([coilBondTypes[0],graftIndex,self.natoms])
graftIndex += rodLength
self.addMolecule(c)
self.bonds.extend(newBondList)
|
#!/usr/bin/env python
import unittest
from cogent3 import DNA, make_aligned_seqs
from cogent3.core.annotation import Feature, Variable, _Feature
from cogent3.core.location import Map, Span, as_map
from cogent3.core.sequence import DnaSequence, RnaSequence
__author__ = "Gavin Huttley"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["Gavin Huttley"]
__license__ = "BSD-3"
__version__ = "2020.2.7a"
__maintainer__ = "Gavin Huttley"
__email__ = "gavin.huttley@anu.edu.au"
__status__ = "Production"
def makeSampleSequence(with_gaps=False):
raw_seq = "AACCCAAAATTTTTTGGGGGGGGGGCCCC"
cds = (15, 25)
utr = (12, 15)
if with_gaps:
raw_seq = raw_seq[:5] + "-----" + raw_seq[10:-2] + "--"
seq = DNA.make_seq(raw_seq)
seq.add_annotation(Feature, "CDS", "CDS", [cds])
seq.add_annotation(Feature, "5'UTR", "5' UTR", [utr])
return seq
def makeSampleAlignment():
seq1 = makeSampleSequence()
seq2 = makeSampleSequence(with_gaps=True)
seqs = {"FAKE01": seq1, "FAKE02": seq2}
aln = make_aligned_seqs(data=seqs, array_align=False)
aln.add_annotation(Feature, "misc_feature", "misc", [(12, 25)])
aln.add_annotation(Feature, "CDS", "blue", [(15, 25)])
aln.add_annotation(Feature, "5'UTR", "red", [(2, 4)])
aln.add_annotation(Feature, "LTR", "fake", [(2, 15)])
return aln
class TestAnnotations(unittest.TestCase):
def setUp(self):
self.seq = makeSampleSequence()
self.aln = makeSampleAlignment()
def test_inherit_feature(self):
"""should be able to subclass and extend _Feature"""
class NewFeat(_Feature):
def __init__(self, *args, **kwargs):
super(NewFeat, self).__init__(*args, **kwargs)
def newMethod(self):
if len(self.map.spans) > 1:
as_one = self.as_one_span() # should create new instance of NewFeat
return as_one.newMethod()
return True
seq = DNA.make_seq("ACGTACGTACGT")
f = seq.add_annotation(
NewFeat, as_map([(1, 3), (5, 7)], len(seq)), type="gene", name="abcd"
)
self.assertEqual(type(f.as_one_span()), NewFeat)
self.assertEqual(type(f.get_shadow()), NewFeat)
f2 = seq.add_annotation(
NewFeat, as_map([(3, 5)], len(seq)), type="gene", name="def"
)
self.assertEqual(
type(seq.get_region_covering_all([f, f2], feature_class=NewFeat)), NewFeat
)
# now use the new method
f.newMethod()
def test_slice_seq_with_annotations(self):
newseq = self.seq[:5] + self.seq[10:]
for annot_type in ["CDS", "5'UTR"]:
orig = str(list(self.seq.get_by_annotation(annot_type))[0])
new = str(list(newseq.get_by_annotation(annot_type))[0])
assert orig == new, (annot_type, orig, new)
def test_aln_annotations(self):
"""test that annotations to alignment and its' sequences"""
aln_expecteds = {
"misc_feature": {"FAKE01": "TTTGGGGGGGGGG", "FAKE02": "TTTGGGGGGGGGG"},
"CDS": {"FAKE01": "GGGGGGGGGG", "FAKE02": "GGGGGGGGGG"},
"5'UTR": {"FAKE01": "CC", "FAKE02": "CC"},
"LTR": {"FAKE01": "CCCAAAATTTTTT", "FAKE02": "CCC-----TTTTT"},
}
seq_expecteds = {
"CDS": {"FAKE01": "GGGGGGGGGG", "FAKE02": "GGGGGGGGGG"},
"5'UTR": {"FAKE01": "TTT", "FAKE02": "TTT"},
}
for annot_type in ["misc_feature", "CDS", "5'UTR", "LTR"]:
observed = list(self.aln.get_by_annotation(annot_type))[0].to_dict()
expected = aln_expecteds[annot_type]
assert observed == expected, (annot_type, expected, observed)
if annot_type in ["misc_feature", "LTR"]:
continue # because seqs haven't been annotated with it
for name in self.aln.names:
observed = list(
self.aln.named_seqs[name].data.get_by_annotation(annot_type)
)[0]
observed = str(observed)
expected = seq_expecteds[annot_type][name]
assert str(observed) == expected, (annot_type, name, expected, observed)
def test_slice_aln_with_annotations(self):
"""test that annotations of sequences and alignments survive alignment
slicing."""
aln_expecteds = {
"misc_feature": {"FAKE01": "TTTGGGGGGGGGG", "FAKE02": "TTTGGGGGGGGGG"},
"CDS": {"FAKE01": "GGGGGGGGGG", "FAKE02": "GGGGGGGGGG"},
"5'UTR": {"FAKE01": "CC", "FAKE02": "CC"},
"LTR": {"FAKE01": "CCCTTTTT", "FAKE02": "CCCTTTTT"},
}
newaln = self.aln[:5] + self.aln[10:]
feature_list = newaln.get_annotations_matching("LTR")
for annot_type in ["LTR", "misc_feature", "CDS", "5'UTR"]:
feature_list = newaln.get_annotations_matching(annot_type)
new = newaln.get_region_covering_all(feature_list).get_slice().to_dict()
expected = aln_expecteds[annot_type]
assert expected == new, (annot_type, expected, new)
if annot_type in ["misc_feature", "LTR"]:
continue # because seqs haven't been annotated with it
for name in self.aln.names:
orig = str(
list(self.aln.get_annotations_from_seq(name, annot_type))[
0
].get_slice()
)
new = str(
list(newaln.get_annotations_from_seq(name, annot_type))[
0
].get_slice()
)
assert orig == new, (name, annot_type, orig, new)
def test_feature_projection(self):
expecteds = {"FAKE01": "CCCAAAATTTTTT", "FAKE02": "CCC-----TTTTT"}
aln_ltr = self.aln.get_annotations_matching("LTR")[0]
for seq_name in ["FAKE01", "FAKE02"]:
expected = expecteds[seq_name]
seq_ltr = self.aln.project_annotation(seq_name, aln_ltr)
if "-" in expected:
self.assertRaises(ValueError, seq_ltr.get_slice)
seq_ltr = seq_ltr.without_lost_spans()
expected = expected.replace("-", "")
self.assertEqual(seq_ltr.get_slice(), expected)
def test_feature_copy_annotations_to(self):
"""test correct copy of annotations"""
orig = DnaSequence("TTTTTTTTTTAAAA", name="Orig")
annot = orig.add_annotation(Feature, "exon", "fred", [(0, 14)])
seq = RnaSequence("UUUUUUUUUUAAAA", name="Test")
got = annot.copy_annotations_to(seq)
self.assertEqual(len(orig.annotations), len(got.annotations))
for src, dest in zip(orig.annotations, got.annotations):
self.assertEqual(src.get_coordinates(), dest.get_coordinates())
self.assertIsInstance(src, dest.__class__)
self.assertIs(dest.parent, seq)
with self.assertRaises(AssertionError):
_ = annot.copy_annotations_to(seq[:-2])
def test_reverse_complement(self):
"""test correct translation of annotations on reverse complement."""
aln_expecteds = {
"misc_feature": {"FAKE01": "TTTGGGGGGGGGG", "FAKE02": "TTTGGGGGGGGGG"},
"CDS": {"FAKE01": "GGGGGGGGGG", "FAKE02": "GGGGGGGGGG"},
"5'UTR": {"FAKE01": "CC", "FAKE02": "CC"},
"LTR": {"FAKE01": "CCCAAAATTTTTT", "FAKE02": "CCC-----TTTTT"},
}
seq_expecteds = {
"CDS": {"FAKE01": "GGGGGGGGGG", "FAKE02": "GGGGGGGGGG"},
"5'UTR": {"FAKE01": "TTT", "FAKE02": "TTT"},
}
rc = self.aln.rc()
# rc'ing an Alignment or Sequence rc's their annotations too. This means
# slicing returns the same sequence as the non-rc'd alignment/seq
for annot_type in ["misc_feature", "CDS", "5'UTR", "LTR"]:
observed = list(self.aln.get_by_annotation(annot_type))[0].to_dict()
expected = aln_expecteds[annot_type]
assert observed == expected, ("+", annot_type, expected, observed)
observed = list(rc.get_by_annotation(annot_type))[0].to_dict()
expected = aln_expecteds[annot_type]
assert observed == expected, ("-", annot_type, expected, observed)
if annot_type in ["misc_feature", "LTR"]:
continue # because seqs haven't been annotated with it
for name in self.aln.names:
observed = list(
self.aln.named_seqs[name].data.get_by_annotation(annot_type)
)[0]
observed = str(observed)
expected = seq_expecteds[annot_type][name]
assert str(observed) == expected, (
"+",
annot_type,
name,
expected,
observed,
)
observed = list(rc.named_seqs[name].data.get_by_annotation(annot_type))[
0
]
observed = str(observed)
expected = seq_expecteds[annot_type][name]
assert str(observed) == expected, (
"-",
annot_type,
name,
expected,
observed,
)
class TestMapSpans(unittest.TestCase):
"""Test attributes of Map & Spans classes critical to annotation
manipulation."""
def test_span(self):
length = 100
forward = Span(20, 30)
reverse = Span(70, 80, reverse=True)
assert forward.reversed_relative_to(100) == reverse
assert reverse.reversed_relative_to(100) == forward
def test_map(self):
"""reversing a map with multiple spans should preserve span relative
order"""
forward = [Span(20, 30), Span(40, 50)]
fmap = Map(spans=forward, parent_length=100)
fmap_reversed = fmap.nucleic_reversed()
reverse = [Span(70, 80, reverse=True), Span(50, 60, reverse=True)]
rmap = Map(spans=reverse, parent_length=100)
for i in range(2):
self.assertEqual(fmap_reversed.spans[i], rmap.spans[i])
if __name__ == "__main__":
unittest.main()
|
import robosuite as suite
from robosuite.models import MujocoWorldBase
from robosuite.models.robots import UR5e
from robosuite.models.grippers import gripper_factory
from robosuite.models.arenas import EmptyArena
from mujoco_py import MjSim, MjViewer
world = MujocoWorldBase()
ur5 = UR5e()
gripper = gripper_factory("RobotiqThreeFingerDexterousGripper")
ur5.add_gripper(gripper)
ur5.set_base_xpos([0,0,0])
world.merge(ur5)
arena = EmptyArena()
world.merge(arena)
model = world.get_model()
sim = MjSim(model)
viewer = MjViewer(sim)
while True:
viewer.render()
|
from unittest import TestCase, main
import os
import dask
import numpy as np
import pandas as pd
import pandas.testing as pdt
from skbio import DNA
from qiime2.plugin.testing import TestPluginBase
from qiime2 import Artifact, Metadata
from q2_types.feature_data import DNAIterator, DNAFASTAFormat
from q2_sidle._tree import (reconstruct_fragment_rep_seqs,
_expand_primer,
_find_exact_forward,
_find_exact_reverse,
_find_approx_forward,
_find_approx_reverse,
_group_concensus,
)
from q2_sidle.tests import test_set as ts
class TreeTest(TestCase):
def setUp(self):
self.base_dir = \
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'files/little_test')
self.aligned_seqs = pd.Series({
'seq01': DNA('-CTAGTCATGCGAAGCGGCTCAGGATGATGATGAAGAC---------------------------------'),
'seq02': DNA('ACTAGTCATGCGAAGCGGCTCAGGATGATGATGAAGAC---------------------------------'),
'seq03': DNA('CATAGTCATWTCCGCGTTGGAGTTATGATGATGAWACCACCTCGTCCCAGTTCCGCGCTTCTGACGTGCA-'),
'seq04': DNA('------------------GGAGTTATGATGA--AGACCACCTCGTCCCAGTTCCGCGCTTCTGACGTGCAC'),
'seq05': DNA('CATAGTCATCGTTTATGTATGCCCATGATGATGCGAGCACCTCGTATGGATGTAGAGCCACTGACGTGCGG'),
})
kmer1 = Artifact.load(os.path.join(self.base_dir, 'frag_r1_db_map.qza'))
kmer2 = Artifact.load(os.path.join(self.base_dir, 'frag_r2_db_map.qza'))
self.kmer_map1 = kmer1.view(pd.DataFrame)
self.kmer_map2 = kmer2.view(pd.DataFrame)
np.random.seed(5)
def test_reconstruct_fragment_rep_seqs(self):
recon_map = pd.Series(
data=['seq01|seq02', 'seq01|seq02', 'seq03|seq04',
'seq03|seq04', 'seq05'],
index=pd.Index(['seq01', 'seq02', 'seq03', 'seq04', 'seq05'],
name='db-seq'),
name='clean_name'
)
recon_summary = pd.DataFrame(
data=[[1, 2, 2, 0, 'asv01|asv02'],
[2, 3, 1.5, np.std([1, 2], ddof=1), 'asv03|asv04'],
[2, 2, 1, 0, 'asv07|asv08']],
index=pd.Index(['seq01|seq02', 'seq03|seq04', 'seq05'],
name='clean_name'),
columns=['num-regions', 'total-kmers-mapped',
'mean-kmer-per-region', 'stdv-kmer-per-region',
'mapped-asvs'],
)
known = pd.Series(
data=['GCGAAGCGGCTCAGG',
'WTCCGCGTTGGAGTTATGATGATGAGACCACCTCGTCCCAGTTCCGCGCTTC'],
index=pd.Index(['seq01|seq02', 'seq03|seq04'], name='clean_name'),
)
test = reconstruct_fragment_rep_seqs(
region=['Bludhaven', 'Gotham'],
kmer_map=[self.kmer_map1, self.kmer_map2],
reconstruction_map=recon_map,
reconstruction_summary=recon_summary,
aligned_sequences=self.aligned_seqs,
)
pdt.assert_series_equal(test.view(pd.Series).astype(str), known)
def test_expand_primer_miss(self):
primer = 'WANTCAT'
known = '((?<=([AT])))((A[ACGT]TCAT){e<=1})'
test = _expand_primer(primer, 1)
self.assertEqual(known, test)
def test_expand_primer_none(self):
primer = 'WANTCAT'
known = '[AT]A[ACGT]TCAT'
test = _expand_primer(primer, None)
self.assertEqual(known, test)
def test_find_exact_forward_match(self):
args = pd.Series([
'-CTAGTCATGCGAAGCGGCTCAGGATGATGATGAAGAC--------------',
'[AT]A[ACGT]TCAT'])
test = _find_exact_forward(args)
self.assertEqual(test, 9)
def test_find_exact_forward_miss(self):
args = pd.Series([
'-CTAGTCATGCGAAGCGGCTCAGGATGATGATGAAGAC--------------',
'WANTCAT'])
test = _find_exact_forward(args)
self.assertTrue(np.isnan(test))
def test_find_extract_reverse_match(self):
args = pd.Series([
'-CTAGTCATGCGAAGCGGCTCAGGATGATGATGAAGAC--------------',
'ATGATGATG'])
test = _find_exact_reverse(args)
self.assertTrue(test, 24)
def test_find_extract_reverse_match(self):
args = pd.Series([
'-CTAGTCATGCGAAGCGGCTCAGGATGATGATGAAGAC--------------',
'CATCATCAT'])
test = _find_exact_reverse(args)
self.assertTrue(test, np.nan)
def test_find_approx_forward(self):
args = pd.Series([
DNA('-CTAGTCATGCGAAGCGGCTCAGGATGATGATGAAGAC--------------'),
'WANTCAT'])
test = _find_approx_forward(args)
self.assertEqual(test, 9)
def test_find_approx_reverse(self):
args = pd.Series([
DNA('-CTAGTCATGCGAAGCGGCTCAGGATGATGATGAAGAC--------------'),
'ATGATGATG'])
test = _find_approx_reverse(args)
self.assertEqual(test, 24)
def test_get_consus_seq(self):
g = self.aligned_seqs.loc[['seq03', 'seq04']]
known = ('CATAGTCATWTCCGCGTTGGAGTTATGATGATGAGACCACCTCGTCCCAGTTCCGCGCTTCTGACGTGCAC')
test = _group_concensus(g)
self.assertEqual(known, str(test))
if __name__ == '__main__':
main() |
# 이미지 및 라벨을 생성하기 위한 코드 ( Team. NMSP )
import numpy as np
import cv2
import os
DATA_DIR = "Group_Image" # 데이터(이미지) 생성 경로
LABEL_DIR="Group_Label" # 라벨 생성 경로
if os.path.exists(DATA_DIR) is False:
os.makedirs(DATA_DIR) # 이미지 경로 폴더 생성
if os.path.exists(LABEL_DIR) is False:
os.makedirs(LABEL_DIR) # 라벨 경로 폴더 생성
# 이미지 생성 및 label 파일을 생성하기 위해 Accuracy 95% 이상인 데이터 셋을 불러온다.
label_true=np.load("label_true.npy")
label_pred=np.load("label_pred.npy")
data_true=np.load("data_true.npy")
one_hot_label_true=np.load("one_hot_label_true.npy")
# 이미지 생성
for x in range(len(data_true)):
cv2.imwrite(DATA_DIR+"/"+str(x)+".jpg",data_true[x])
label_t=[]
label_p=[]
for x in range(len(label_true)):
label_t.append(str(label_true[x][0]))
label_p.append(str(label_pred[x][0]))
# 라벨 생성
with open(os.path.join(LABEL_DIR,"label_true.txt"), 'w') as f:
f.writelines([line + "\n" for line in label_t])
with open(os.path.join(LABEL_DIR,"label_pred.txt"), 'w') as f:
f.writelines([line + "\n" for line in label_p])
|
"""
Unary operations on SomeValues.
"""
from __future__ import absolute_import
from collections import defaultdict
from rpython.tool.pairtype import pair
from rpython.flowspace.operation import op
from rpython.flowspace.model import const, Constant
from rpython.flowspace.argument import CallSpec
from rpython.annotator.model import (SomeObject, SomeInteger, SomeBool,
SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue,
SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeBuiltinMethod,
SomeFloat, SomeIterator, SomePBC, SomeNone, SomeTypeOf, s_ImpossibleValue,
s_Bool, s_None, s_Int, unionof, add_knowntypedata,
SomeWeakRef, SomeUnicodeString, SomeByteArray, SomeOrderedDict)
from rpython.annotator.bookkeeper import getbookkeeper, immutablevalue
from rpython.annotator.binaryop import _clone ## XXX where to put this?
from rpython.annotator.binaryop import _dict_can_only_throw_keyerror
from rpython.annotator.binaryop import _dict_can_only_throw_nothing
from rpython.annotator.classdesc import ClassDesc, is_primitive_type, BuiltinTypeDesc
from rpython.annotator.model import AnnotatorError
from rpython.annotator.argument import simple_args, complex_args
UNARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values()
if oper.dispatch == 1])
UNARY_OPERATIONS.remove('contains')
@op.type.register(SomeObject)
def type_SomeObject(annotator, v_arg):
return SomeTypeOf([v_arg])
def our_issubclass(bk, cls1, cls2):
def toclassdesc(cls):
if isinstance(cls, ClassDesc):
return cls
elif is_primitive_type(cls):
return BuiltinTypeDesc(cls)
else:
return bk.getdesc(cls)
return toclassdesc(cls1).issubclass(toclassdesc(cls2))
def s_isinstance(annotator, s_obj, s_type, variables):
if not s_type.is_constant():
return SomeBool()
r = SomeBool()
typ = s_type.const
bk = annotator.bookkeeper
if s_obj.is_constant():
r.const = isinstance(s_obj.const, typ)
elif our_issubclass(bk, s_obj.knowntype, typ):
if not s_obj.can_be_none():
r.const = True
elif not our_issubclass(bk, typ, s_obj.knowntype):
r.const = False
elif s_obj.knowntype == int and typ == bool: # xxx this will explode in case of generalisation
# from bool to int, notice that isinstance( , bool|int)
# is quite border case for RPython
r.const = False
for v in variables:
assert v.annotation == s_obj
knowntypedata = defaultdict(dict)
if not hasattr(typ, '_freeze_') and isinstance(s_type, SomePBC):
add_knowntypedata(knowntypedata, True, variables, bk.valueoftype(typ))
r.set_knowntypedata(knowntypedata)
return r
@op.isinstance.register(SomeObject)
def isinstance_SomeObject(annotator, v_obj, v_cls):
s_obj = annotator.annotation(v_obj)
s_cls = annotator.annotation(v_cls)
return s_isinstance(annotator, s_obj, s_cls, variables=[v_obj])
@op.bool.register(SomeObject)
def bool_SomeObject(annotator, obj):
r = SomeBool()
annotator.annotation(obj).bool_behavior(r)
s_nonnone_obj = annotator.annotation(obj)
if s_nonnone_obj.can_be_none():
s_nonnone_obj = s_nonnone_obj.nonnoneify()
knowntypedata = defaultdict(dict)
add_knowntypedata(knowntypedata, True, [obj], s_nonnone_obj)
r.set_knowntypedata(knowntypedata)
return r
@op.contains.register(SomeObject)
def contains_SomeObject(annotator, obj, element):
return s_Bool
contains_SomeObject.can_only_throw = []
@op.contains.register(SomeNone)
def contains_SomeNone(annotator, obj, element):
# return False here for the case "... in None", because it can be later
# generalized to "... in d" where d is either None or the empty dict
# (which would also return the constant False)
s_bool = SomeBool()
s_bool.const = False
return s_bool
contains_SomeNone.can_only_throw = []
@op.contains.register(SomeInteger)
@op.contains.register(SomeFloat)
@op.contains.register(SomeBool)
def contains_number(annotator, number, element):
raise AnnotatorError("number is not iterable")
@op.simple_call.register(SomeObject)
def simple_call_SomeObject(annotator, func, *args):
s_func = annotator.annotation(func)
argspec = simple_args([annotator.annotation(arg) for arg in args])
return s_func.call(argspec)
@op.call_args.register_transform(SomeObject)
def transform_varargs(annotator, v_func, v_shape, *data_v):
callspec = CallSpec.fromshape(v_shape.value, list(data_v))
v_vararg = callspec.w_stararg
if callspec.w_stararg:
s_vararg = annotator.annotation(callspec.w_stararg)
if not isinstance(s_vararg, SomeTuple):
raise AnnotatorError(
"Calls like f(..., *arg) require 'arg' to be a tuple")
n_items = len(s_vararg.items)
ops = [op.getitem(v_vararg, const(i)) for i in range(n_items)]
new_args = callspec.arguments_w + [hlop.result for hlop in ops]
if callspec.keywords:
newspec = CallSpec(new_args, callspec.keywords)
shape, data_v = newspec.flatten()
call_op = op.call_args(v_func, const(shape), *data_v)
else:
call_op = op.simple_call(v_func, *new_args)
ops.append(call_op)
return ops
@op.call_args.register(SomeObject)
def call_args(annotator, func, *args_v):
callspec = complex_args([annotator.annotation(v_arg) for v_arg in args_v])
return annotator.annotation(func).call(callspec)
@op.issubtype.register(SomeObject)
def issubtype(annotator, v_type, v_cls):
s_type = v_type.annotation
s_cls = annotator.annotation(v_cls)
if s_type.is_constant() and s_cls.is_constant():
return annotator.bookkeeper.immutablevalue(
issubclass(s_type.const, s_cls.const))
return s_Bool
class __extend__(SomeObject):
def len(self):
return SomeInteger(nonneg=True)
def bool_behavior(self, s):
if self.is_immutable_constant():
s.const = bool(self.const)
else:
s_len = self.len()
if s_len.is_immutable_constant():
s.const = s_len.const > 0
def hash(self):
raise AnnotatorError("cannot use hash() in RPython")
def str(self):
return SomeString()
def unicode(self):
return SomeUnicodeString()
def repr(self):
return SomeString()
def hex(self):
return SomeString()
def oct(self):
return SomeString()
def id(self):
raise AnnotatorError("cannot use id() in RPython; "
"see objectmodel.compute_xxx()")
def int(self):
return SomeInteger()
def float(self):
return SomeFloat()
def delattr(self, s_attr):
if self.__class__ != SomeObject or self.knowntype != object:
getbookkeeper().warning(
("delattr on potentally non-SomeObjects is not RPythonic: delattr(%r,%r)" %
(self, s_attr)))
def find_method(self, name):
"Look for a special-case implementation for the named method."
try:
analyser = getattr(self.__class__, 'method_' + name)
except AttributeError:
return None
else:
return SomeBuiltinMethod(analyser, self, name)
def getattr(self, s_attr):
# get a SomeBuiltin if the SomeObject has
# a corresponding method to handle it
if not s_attr.is_constant() or not isinstance(s_attr.const, str):
raise AnnotatorError("getattr(%r, %r) has non-constant argument"
% (self, s_attr))
attr = s_attr.const
s_method = self.find_method(attr)
if s_method is not None:
return s_method
# if the SomeObject is itself a constant, allow reading its attrs
if self.is_immutable_constant() and hasattr(self.const, attr):
return immutablevalue(getattr(self.const, attr))
raise AnnotatorError("Cannot find attribute %r on %r" % (attr, self))
getattr.can_only_throw = []
def setattr(self, *args):
return s_ImpossibleValue
def bind_callables_under(self, classdef, name):
return self # default unbound __get__ implementation
def call(self, args, implicit_init=False):
raise AnnotatorError("Cannot prove that the object is callable")
def hint(self, *args_s):
return self
def getslice(self, *args):
return s_ImpossibleValue
def setslice(self, *args):
return s_ImpossibleValue
def delslice(self, *args):
return s_ImpossibleValue
def pos(self):
return s_ImpossibleValue
neg = abs = ord = invert = long = iter = next = pos
class __extend__(SomeFloat):
def pos(self):
return self
def neg(self):
return SomeFloat()
abs = neg
def bool(self):
if self.is_immutable_constant():
return getbookkeeper().immutablevalue(bool(self.const))
return s_Bool
def len(self):
raise AnnotatorError("'float' has no length")
class __extend__(SomeInteger):
def invert(self):
return SomeInteger(knowntype=self.knowntype)
invert.can_only_throw = []
def pos(self):
return SomeInteger(knowntype=self.knowntype)
pos.can_only_throw = []
int = pos
# these are the only ones which can overflow:
def neg(self):
return SomeInteger(knowntype=self.knowntype)
neg.can_only_throw = []
neg_ovf = _clone(neg, [OverflowError])
def abs(self):
return SomeInteger(nonneg=True, knowntype=self.knowntype)
abs.can_only_throw = []
abs_ovf = _clone(abs, [OverflowError])
def len(self):
raise AnnotatorError("'int' has no length")
class __extend__(SomeBool):
def bool(self):
return self
def invert(self):
return SomeInteger()
invert.can_only_throw = []
def neg(self):
return SomeInteger()
neg.can_only_throw = []
neg_ovf = _clone(neg, [OverflowError])
def abs(self):
return SomeInteger(nonneg=True)
abs.can_only_throw = []
abs_ovf = _clone(abs, [OverflowError])
def pos(self):
return SomeInteger(nonneg=True)
pos.can_only_throw = []
int = pos
class __extend__(SomeTuple):
def len(self):
return immutablevalue(len(self.items))
def iter(self):
return SomeIterator(self)
iter.can_only_throw = []
def getanyitem(self, position):
return unionof(*self.items)
def getslice(self, s_start, s_stop):
assert s_start.is_immutable_constant(),"tuple slicing: needs constants"
assert s_stop.is_immutable_constant(), "tuple slicing: needs constants"
items = self.items[s_start.const:s_stop.const]
return SomeTuple(items)
@op.contains.register(SomeList)
def contains_SomeList(annotator, obj, element):
annotator.annotation(obj).listdef.generalize(annotator.annotation(element))
return s_Bool
contains_SomeList.can_only_throw = []
class __extend__(SomeList):
def method_append(self, s_value):
self.listdef.resize()
self.listdef.generalize(s_value)
def method_extend(self, s_iterable):
self.listdef.resize()
if isinstance(s_iterable, SomeList): # unify the two lists
self.listdef.agree(getbookkeeper(), s_iterable.listdef)
else:
s_iter = s_iterable.iter()
self.method_append(s_iter.next())
def method_reverse(self):
self.listdef.mutate()
def method_insert(self, s_index, s_value):
self.method_append(s_value)
def method_remove(self, s_value):
self.listdef.resize()
self.listdef.generalize(s_value)
def method_pop(self, s_index=None):
position = getbookkeeper().position_key
self.listdef.resize()
return self.listdef.read_item(position)
method_pop.can_only_throw = [IndexError]
def method_index(self, s_value):
self.listdef.generalize(s_value)
return SomeInteger(nonneg=True)
def len(self):
position = getbookkeeper().position_key
s_item = self.listdef.read_item(position)
if isinstance(s_item, SomeImpossibleValue):
return immutablevalue(0)
return SomeObject.len(self)
def iter(self):
return SomeIterator(self)
iter.can_only_throw = []
def getanyitem(self, position):
return self.listdef.read_item(position)
def hint(self, *args_s):
hints = args_s[-1].const
if 'maxlength' in hints:
# only for iteration over lists or dicts or strs at the moment,
# not over an iterator object (because it has no known length)
s_iterable = args_s[0]
if isinstance(s_iterable, (SomeList, SomeDict, SomeString)):
self = SomeList(self.listdef) # create a fresh copy
self.listdef.resize()
self.listdef.listitem.hint_maxlength = True
elif 'fence' in hints:
self.listdef.resize()
self = self.listdef.offspring(getbookkeeper())
return self
def getslice(self, s_start, s_stop):
bk = getbookkeeper()
check_negative_slice(s_start, s_stop)
return self.listdef.offspring(bk)
def setslice(self, s_start, s_stop, s_iterable):
check_negative_slice(s_start, s_stop)
if not isinstance(s_iterable, SomeList):
raise AnnotatorError("list[start:stop] = x: x must be a list")
self.listdef.mutate()
self.listdef.agree(getbookkeeper(), s_iterable.listdef)
self.listdef.resize()
def delslice(self, s_start, s_stop):
check_negative_slice(s_start, s_stop)
self.listdef.resize()
def check_negative_slice(s_start, s_stop, error="slicing"):
if isinstance(s_start, SomeInteger) and not s_start.nonneg:
raise AnnotatorError("%s: not proven to have non-negative start" %
error)
if isinstance(s_stop, SomeInteger) and not s_stop.nonneg and \
getattr(s_stop, 'const', 0) != -1:
raise AnnotatorError("%s: not proven to have non-negative stop" % error)
def dict_contains(s_dct, s_element, position):
s_dct.dictdef.generalize_key(s_element)
if s_dct._is_empty(position):
s_bool = SomeBool()
s_bool.const = False
return s_bool
return s_Bool
@op.contains.register(SomeDict)
def contains_SomeDict(annotator, dct, element):
position = annotator.bookkeeper.position_key
return dict_contains(annotator.annotation(dct),
annotator.annotation(element),
position)
contains_SomeDict.can_only_throw = _dict_can_only_throw_nothing
class __extend__(SomeDict):
def _is_empty(self, position):
s_key = self.dictdef.read_key(position)
s_value = self.dictdef.read_value(position)
return (isinstance(s_key, SomeImpossibleValue) or
isinstance(s_value, SomeImpossibleValue))
def len(self):
position = getbookkeeper().position_key
if self._is_empty(position):
return immutablevalue(0)
return SomeObject.len(self)
def iter(self):
return SomeIterator(self)
iter.can_only_throw = []
def getanyitem(self, position, variant='keys'):
if variant == 'keys':
return self.dictdef.read_key(position)
elif variant == 'values':
return self.dictdef.read_value(position)
elif variant == 'items' or variant == 'items_with_hash':
s_key = self.dictdef.read_key(position)
s_value = self.dictdef.read_value(position)
if (isinstance(s_key, SomeImpossibleValue) or
isinstance(s_value, SomeImpossibleValue)):
return s_ImpossibleValue
elif variant == 'items':
return SomeTuple((s_key, s_value))
elif variant == 'items_with_hash':
return SomeTuple((s_key, s_value, s_Int))
elif variant == 'keys_with_hash':
s_key = self.dictdef.read_key(position)
if isinstance(s_key, SomeImpossibleValue):
return s_ImpossibleValue
return SomeTuple((s_key, s_Int))
raise ValueError(variant)
def method_get(self, key, dfl=s_None):
position = getbookkeeper().position_key
self.dictdef.generalize_key(key)
self.dictdef.generalize_value(dfl)
return self.dictdef.read_value(position)
method_setdefault = method_get
def method_copy(self):
return SomeDict(self.dictdef)
def method_update(dct1, dct2):
if s_None.contains(dct2):
return SomeImpossibleValue()
dct1.dictdef.union(dct2.dictdef)
def method__prepare_dict_update(dct, num):
pass
def method_keys(self):
bk = getbookkeeper()
return bk.newlist(self.dictdef.read_key(bk.position_key))
def method_values(self):
bk = getbookkeeper()
return bk.newlist(self.dictdef.read_value(bk.position_key))
def method_items(self):
bk = getbookkeeper()
return bk.newlist(self.getanyitem(bk.position_key, variant='items'))
def method_iterkeys(self):
return SomeIterator(self, 'keys')
def method_itervalues(self):
return SomeIterator(self, 'values')
def method_iteritems(self):
return SomeIterator(self, 'items')
def method_iterkeys_with_hash(self):
return SomeIterator(self, 'keys_with_hash')
def method_iteritems_with_hash(self):
return SomeIterator(self, 'items_with_hash')
def method_clear(self):
pass
def method_popitem(self):
position = getbookkeeper().position_key
return self.getanyitem(position, variant='items')
def method_pop(self, s_key, s_dfl=None):
self.dictdef.generalize_key(s_key)
if s_dfl is not None:
self.dictdef.generalize_value(s_dfl)
position = getbookkeeper().position_key
return self.dictdef.read_value(position)
def method_contains_with_hash(self, s_key, s_hash):
position = getbookkeeper().position_key
return dict_contains(self, s_key, position)
method_contains_with_hash.can_only_throw = _dict_can_only_throw_nothing
def method_setitem_with_hash(self, s_key, s_hash, s_value):
pair(self, s_key).setitem(s_value)
method_setitem_with_hash.can_only_throw = _dict_can_only_throw_nothing
def method_getitem_with_hash(self, s_key, s_hash):
# XXX: copy of binaryop.getitem_SomeDict
self.dictdef.generalize_key(s_key)
position = getbookkeeper().position_key
return self.dictdef.read_value(position)
method_getitem_with_hash.can_only_throw = _dict_can_only_throw_keyerror
def method_delitem_with_hash(self, s_key, s_hash):
pair(self, s_key).delitem()
method_delitem_with_hash.can_only_throw = _dict_can_only_throw_keyerror
def method_delitem_if_value_is(self, s_key, s_value):
pair(self, s_key).setitem(s_value)
pair(self, s_key).delitem()
class __extend__(SomeOrderedDict):
def method_move_to_end(self, s_key, s_last):
assert s_Bool.contains(s_last)
pair(self, s_key).delitem()
method_move_to_end.can_only_throw = _dict_can_only_throw_keyerror
@op.contains.register(SomeString)
@op.contains.register(SomeUnicodeString)
def contains_String(annotator, string, char):
if annotator.annotation(char).is_constant() and annotator.annotation(char).const == "\0":
r = SomeBool()
knowntypedata = defaultdict(dict)
add_knowntypedata(knowntypedata, False, [string],
annotator.annotation(string).nonnulify())
r.set_knowntypedata(knowntypedata)
return r
else:
return contains_SomeObject(annotator, string, char)
contains_String.can_only_throw = []
class __extend__(SomeString,
SomeUnicodeString):
def method_startswith(self, frag):
if self.is_constant() and frag.is_constant():
return immutablevalue(self.const.startswith(frag.const))
return s_Bool
def method_endswith(self, frag):
if self.is_constant() and frag.is_constant():
return immutablevalue(self.const.endswith(frag.const))
return s_Bool
def method_find(self, frag, start=None, end=None):
check_negative_slice(start, end, "find")
return SomeInteger()
def method_rfind(self, frag, start=None, end=None):
check_negative_slice(start, end, "rfind")
return SomeInteger()
def method_count(self, frag, start=None, end=None):
check_negative_slice(start, end, "count")
return SomeInteger(nonneg=True)
def method_strip(self, chr=None):
if chr is None and isinstance(self, SomeUnicodeString):
raise AnnotatorError("unicode.strip() with no arg is not RPython")
return self.basestringclass(no_nul=self.no_nul)
def method_lstrip(self, chr=None):
if chr is None and isinstance(self, SomeUnicodeString):
raise AnnotatorError("unicode.lstrip() with no arg is not RPython")
return self.basestringclass(no_nul=self.no_nul)
def method_rstrip(self, chr=None):
if chr is None and isinstance(self, SomeUnicodeString):
raise AnnotatorError("unicode.rstrip() with no arg is not RPython")
return self.basestringclass(no_nul=self.no_nul)
def method_join(self, s_list):
if s_None.contains(s_list):
return SomeImpossibleValue()
position = getbookkeeper().position_key
s_item = s_list.listdef.read_item(position)
if s_None.contains(s_item):
if isinstance(self, SomeUnicodeString):
return immutablevalue(u"")
return immutablevalue("")
no_nul = self.no_nul and s_item.no_nul
return self.basestringclass(no_nul=no_nul)
def iter(self):
return SomeIterator(self)
iter.can_only_throw = []
def getanyitem(self, position):
return self.basecharclass()
def method_split(self, patt, max=-1):
# special-case for .split( '\x00') or .split(u'\x00')
if max == -1 and patt.is_constant() and (
len(patt.const) == 1 and ord(patt.const) == 0):
no_nul = True
else:
no_nul = self.no_nul
s_item = self.basestringclass(no_nul=no_nul)
return getbookkeeper().newlist(s_item)
def method_rsplit(self, patt, max=-1):
s_item = self.basestringclass(no_nul=self.no_nul, can_be_None=False)
return getbookkeeper().newlist(s_item)
def method_replace(self, s1, s2):
return self.basestringclass(no_nul=self.no_nul and s2.no_nul)
def getslice(self, s_start, s_stop):
check_negative_slice(s_start, s_stop)
result = self.basestringclass(no_nul=self.no_nul)
return result
def method_format(self, *args):
raise AnnotatorError("Method format() is not RPython")
class __extend__(SomeByteArray):
def getslice(ba, s_start, s_stop):
check_negative_slice(s_start, s_stop)
return SomeByteArray()
class __extend__(SomeUnicodeString):
def method_encode(self, s_enc):
if not s_enc.is_constant():
raise AnnotatorError("Non-constant encoding not supported")
enc = s_enc.const
if enc not in ('ascii', 'latin-1', 'utf-8', 'utf8'):
raise AnnotatorError("Encoding %s not supported for unicode" % (enc,))
if enc == 'utf-8':
from rpython.rlib import runicode
bookkeeper = getbookkeeper()
s_func = bookkeeper.immutablevalue(
runicode.unicode_encode_utf_8_elidable)
s_errors = bookkeeper.immutablevalue('strict')
s_errorhandler = bookkeeper.immutablevalue(
runicode.default_unicode_error_encode)
s_allow_surr = bookkeeper.immutablevalue(True)
args = [self, self.len(), s_errors, s_errorhandler, s_allow_surr]
bookkeeper.emulate_pbc_call(bookkeeper.position_key, s_func, args)
return SomeString(no_nul=self.no_nul)
method_encode.can_only_throw = [UnicodeEncodeError]
class __extend__(SomeString):
def method_isdigit(self):
return s_Bool
def method_isalpha(self):
return s_Bool
def method_isalnum(self):
return s_Bool
def method_upper(self):
return SomeString()
def method_lower(self):
return SomeString()
def method_splitlines(self, s_keep_newlines=None):
s_list = getbookkeeper().newlist(self.basestringclass())
# Force the list to be resizable because ll_splitlines doesn't
# preallocate the list.
s_list.listdef.listitem.resize()
return s_list
def method_decode(self, s_enc):
if not s_enc.is_constant():
raise AnnotatorError("Non-constant encoding not supported")
enc = s_enc.const
if enc not in ('ascii', 'latin-1', 'utf-8', 'utf8'):
raise AnnotatorError("Encoding %s not supported for strings" % (enc,))
if enc == 'utf-8':
from rpython.rlib import runicode
bookkeeper = getbookkeeper()
s_func = bookkeeper.immutablevalue(
runicode.str_decode_utf_8_elidable)
s_errors = bookkeeper.immutablevalue('strict')
s_final = bookkeeper.immutablevalue(True)
s_errorhandler = bookkeeper.immutablevalue(
runicode.default_unicode_error_decode)
s_allow_surr = bookkeeper.immutablevalue(True)
args = [self, self.len(), s_errors, s_final, s_errorhandler,
s_allow_surr]
bookkeeper.emulate_pbc_call(bookkeeper.position_key, s_func, args)
return SomeUnicodeString(no_nul=self.no_nul)
method_decode.can_only_throw = [UnicodeDecodeError]
class __extend__(SomeChar, SomeUnicodeCodePoint):
def len(self):
return immutablevalue(1)
class __extend__(SomeChar):
def ord(self):
return SomeInteger(nonneg=True)
def method_isspace(self):
return s_Bool
def method_isalnum(self):
return s_Bool
def method_islower(self):
return s_Bool
def method_isupper(self):
return s_Bool
def method_lower(self):
return self
def method_upper(self):
return self
class __extend__(SomeUnicodeCodePoint):
def ord(self):
# warning, on 32-bit with 32-bit unichars, this might return
# negative numbers
return SomeInteger(nonneg=True)
class __extend__(SomeIterator):
def iter(self):
return self
iter.can_only_throw = []
def _can_only_throw(self):
can_throw = [StopIteration]
if isinstance(self.s_container, SomeDict):
can_throw.append(RuntimeError)
return can_throw
def next(self):
position = getbookkeeper().position_key
if s_None.contains(self.s_container):
return s_ImpossibleValue # so far
if self.variant == ("enumerate",):
s_item = self.s_container.getanyitem(position)
return SomeTuple((SomeInteger(nonneg=True), s_item))
variant = self.variant
if variant == ("reversed",):
variant = ()
return self.s_container.getanyitem(position, *variant)
next.can_only_throw = _can_only_throw
method_next = next
class __extend__(SomeInstance):
def getattr(self, s_attr):
if not(s_attr.is_constant() and isinstance(s_attr.const, str)):
raise AnnotatorError("A variable argument to getattr is not RPython")
attr = s_attr.const
if attr == '__class__':
return self.classdef.read_attr__class__()
getbookkeeper().record_getattr(self.classdef.classdesc, attr)
return self.classdef.s_getattr(attr, self.flags)
getattr.can_only_throw = []
def setattr(self, s_attr, s_obj):
if s_attr.is_constant() and isinstance(s_attr.const, str):
attr = s_attr.const
# find the (possibly parent) class where this attr is defined
clsdef = self.classdef.locate_attribute(attr)
attrdef = clsdef.attrs[attr]
attrdef.modified(clsdef)
# if the attrdef is new, this must fail
if attrdef.s_value.contains(s_obj):
return
# create or update the attribute in clsdef
clsdef.generalize_attr(attr, s_obj)
if isinstance(s_obj, SomeList):
clsdef.classdesc.maybe_return_immutable_list(attr, s_obj)
else:
raise AnnotatorError("setattr(instance, variable_attr, value)")
def bool_behavior(self, s):
if not self.can_be_None:
s.const = True
@op.len.register_transform(SomeInstance)
def len_SomeInstance(annotator, v_arg):
get_len = op.getattr(v_arg, const('__len__'))
return [get_len, op.simple_call(get_len.result)]
@op.iter.register_transform(SomeInstance)
def iter_SomeInstance(annotator, v_arg):
get_iter = op.getattr(v_arg, const('__iter__'))
return [get_iter, op.simple_call(get_iter.result)]
@op.next.register_transform(SomeInstance)
def next_SomeInstance(annotator, v_arg):
get_next = op.getattr(v_arg, const('next'))
return [get_next, op.simple_call(get_next.result)]
@op.getslice.register_transform(SomeInstance)
def getslice_SomeInstance(annotator, v_obj, v_start, v_stop):
get_getslice = op.getattr(v_obj, const('__getslice__'))
return [get_getslice, op.simple_call(get_getslice.result, v_start, v_stop)]
@op.setslice.register_transform(SomeInstance)
def setslice_SomeInstance(annotator, v_obj, v_start, v_stop, v_iterable):
get_setslice = op.getattr(v_obj, const('__setslice__'))
return [get_setslice,
op.simple_call(get_setslice.result, v_start, v_stop, v_iterable)]
def _find_property_meth(s_obj, attr, meth):
result = []
for clsdef in s_obj.classdef.getmro():
dct = clsdef.classdesc.classdict
if attr not in dct:
continue
obj = dct[attr]
if (not isinstance(obj, Constant) or
not isinstance(obj.value, property)):
return
result.append(getattr(obj.value, meth))
return result
@op.getattr.register_transform(SomeInstance)
def getattr_SomeInstance(annotator, v_obj, v_attr):
s_attr = annotator.annotation(v_attr)
if not s_attr.is_constant() or not isinstance(s_attr.const, str):
return
attr = s_attr.const
getters = _find_property_meth(annotator.annotation(v_obj), attr, 'fget')
if getters:
if all(getters):
get_getter = op.getattr(v_obj, const(attr + '__getter__'))
return [get_getter, op.simple_call(get_getter.result)]
elif not any(getters):
raise AnnotatorError("Attribute %r is unreadable" % attr)
@op.setattr.register_transform(SomeInstance)
def setattr_SomeInstance(annotator, v_obj, v_attr, v_value):
s_attr = annotator.annotation(v_attr)
if not s_attr.is_constant() or not isinstance(s_attr.const, str):
return
attr = s_attr.const
setters = _find_property_meth(annotator.annotation(v_obj), attr, 'fset')
if setters:
if all(setters):
get_setter = op.getattr(v_obj, const(attr + '__setter__'))
return [get_setter, op.simple_call(get_setter.result, v_value)]
elif not any(setters):
raise AnnotatorError("Attribute %r is unwritable" % attr)
class __extend__(SomeBuiltin):
def call(self, args, implicit_init=False):
args_s, kwds = args.unpack()
# prefix keyword arguments with 's_'
kwds_s = {}
for key, s_value in kwds.items():
kwds_s['s_'+key] = s_value
return self.analyser(*args_s, **kwds_s)
class __extend__(SomeBuiltinMethod):
def _can_only_throw(self, *args):
analyser_func = getattr(self.analyser, 'im_func', None)
can_only_throw = getattr(analyser_func, 'can_only_throw', None)
if can_only_throw is None or isinstance(can_only_throw, list):
return can_only_throw
return can_only_throw(self.s_self, *args)
def simple_call(self, *args):
return self.analyser(self.s_self, *args)
simple_call.can_only_throw = _can_only_throw
def call(self, args, implicit_init=False):
args_s, kwds = args.unpack()
# prefix keyword arguments with 's_'
kwds_s = {}
for key, s_value in kwds.items():
kwds_s['s_'+key] = s_value
return self.analyser(self.s_self, *args_s, **kwds_s)
class __extend__(SomePBC):
def getattr(self, s_attr):
assert s_attr.is_constant()
if s_attr.const == '__name__':
from rpython.annotator.classdesc import ClassDesc
if self.getKind() is ClassDesc:
return SomeString()
bookkeeper = getbookkeeper()
return bookkeeper.pbc_getattr(self, s_attr)
getattr.can_only_throw = []
def setattr(self, s_attr, s_value):
raise AnnotatorError("Cannot modify attribute of a pre-built constant")
def call(self, args):
bookkeeper = getbookkeeper()
return bookkeeper.pbc_call(self, args)
def bind_callables_under(self, classdef, name):
d = [desc.bind_under(classdef, name) for desc in self.descriptions]
return SomePBC(d, can_be_None=self.can_be_None)
def bool_behavior(self, s):
if not self.can_be_None:
s.const = True
def len(self):
raise AnnotatorError("Cannot call len on a pbc")
class __extend__(SomeNone):
def bind_callables_under(self, classdef, name):
return self
def getattr(self, s_attr):
return s_ImpossibleValue
getattr.can_only_throw = []
def setattr(self, s_attr, s_value):
return None
def call(self, args):
return s_ImpossibleValue
def bool_behavior(self, s):
s.const = False
def len(self):
# This None could later be generalized into a list, for example.
# For now, we give the impossible answer (because len(None) would
# really crash translated code). It can be generalized later.
return SomeImpossibleValue()
@op.issubtype.register(SomeTypeOf)
def issubtype(annotator, v_type, v_cls):
args_v = v_type.annotation.is_type_of
return s_isinstance(annotator, args_v[0].annotation,
annotator.annotation(v_cls), args_v)
#_________________________________________
# weakrefs
class __extend__(SomeWeakRef):
def simple_call(self):
if self.classdef is None:
return s_None # known to be a dead weakref
else:
return SomeInstance(self.classdef, can_be_None=True)
|
#!/usr/bin/env python3
# ==============================================================================
#
import freeton_utils
from freeton_utils import *
class SubscriptionDebot(BaseContract):
def __init__(self, tonClient: TonClient, ownerAddress: str, signer: Signer = None):
genSigner = generateSigner() if signer is None else signer
self.CONSTRUCTOR = {"ownerAddress":ownerAddress}
BaseContract.__init__(self, tonClient=tonClient, contractName="SubscriptionDebot", pubkey=genSigner.keys.public, signer=genSigner)
def _callFromMultisig(self, msig: SetcodeMultisig, functionName, functionParams, value, flags):
messageBoc = prepareMessageBoc(abiPath=self.ABI, functionName=functionName, functionParams=functionParams)
result = msig.callTransfer(addressDest=self.ADDRESS, value=value, payload=messageBoc, flags=flags)
return result
#========================================
#
def setSsigCode(self, msig: SetcodeMultisig, value: int, code: str):
result = self._callFromMultisig(msig=msig, functionName="setSsigCode", functionParams={"code":code}, value=value, flags=1)
return result
def setSubscriptionCode(self, msig: SetcodeMultisig, value: int, code: str):
result = self._callFromMultisig(msig=msig, functionName="setSubscriptionCode", functionParams={"code":code}, value=value, flags=1)
return result
def setABI(self, msig: SetcodeMultisig, value: int):
result = self._callFromMultisig(msig=msig, functionName="setABI", functionParams={"dabi":stringToHex(getAbi(self.ABI).value)}, value=value, flags=1)
return result
def addService(self, msig: SetcodeMultisig, value: int, serviceName: str, serviceAddress: str):
result = self._callFromMultisig(msig=msig, functionName="addService", functionParams={"serviceName":serviceName, "serviceAddress":serviceAddress}, value=value, flags=1)
return result
def clearServices(self, msig: SetcodeMultisig, value: int):
result = self._callFromMultisig(msig=msig, functionName="clearServices", functionParams={}, value=value, flags=1)
return result
# ==============================================================================
#
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-1405-Longest-Happy-String.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-02-07
=================================================================="""
import sys
import time
# from typing import List
# import collections
"""
LeetCode - 1405 - (Medium) - Longest Happy String
https://leetcode.com/problems/longest-happy-string/
Description & Requirement:
A string s is called happy if it satisfies the following conditions:
s only contains the letters 'a', 'b', and 'c'.
s does not contain any of "aaa", "bbb", or "ccc" as a substring.
s contains at most a occurrences of the letter 'a'.
s contains at most b occurrences of the letter 'b'.
s contains at most c occurrences of the letter 'c'.
Given three integers a, b, and c, return the longest possible happy string.
If there are multiple longest happy strings, return any of them.
If there is no such string, return the empty string "".
A substring is a contiguous sequence of characters within a string.
Example 1:
Input: a = 1, b = 1, c = 7
Output: "ccaccbcc"
Explanation: "ccbccacc" would also be a correct answer.
Example 2:
Input: a = 7, b = 1, c = 0
Output: "aabaa"
Explanation: It is the only correct answer in this case.
Constraints:
0 <= a, b, c <= 100
a + b + c > 0
"""
class Solution:
def longestDiverseString(self, a: int, b: int, c: int) -> str:
# exception case
if not (isinstance(a, int) and isinstance(b, int) and isinstance(c, int)):
return "" # Error input type
if not (a >= 0 and b >= 0 and c >= 0):
return "" # Error input type
# main method: (Greedy: each step, choose the max rest one char (a xor b xor c))
return self._longestDiverseString(a, b, c)
def _longestDiverseString(self, a: int, b: int, c: int) -> str:
res = ""
char_counter = [["a", a], ["b", b], ["c", c]] # each loop, sort this list to find max, mid, min chars
while char_counter[0][1] > 0 or char_counter[1][1] > 0 or char_counter[2][1] > 0:
# select the char (a xor b xor c) that remains the most
char_counter.sort(key=lambda x: -x[1]) # sort by a, b, c in descending order
# choose only one char (one time) to add to res string
have_choice = False
for idx, c_c in enumerate(char_counter):
char, counter = c_c
if counter <= 0: # the max rest one is 0, break the loop
break
if len(res) >= 2 and res[-2] == res[-1] == char: # check if 3 same chars are adjacent
continue # if it is, then choose the second max rest one char
# now, there's a char to choose
have_choice = True
res += char
char_counter[idx][1] -= 1
break
# if there's no choice in a loop, then there'll never be a choice in the future, just break the loop
if not have_choice:
break
return res
def main():
# Example 1: Output: "ccaccbcc"
# a = 1
# b = 1
# c = 7
# Example 2: Output: "aabaa"
# a = 7
# b = 1
# c = 0
# Example 3: Output: "ccbccbbccbbccbbccbc"
a = 0
b = 8
c = 11
# init instance
solution = Solution()
# run & time
start = time.process_time()
ans = solution.longestDiverseString(a, b, c)
end = time.process_time()
# show answer
print('\nAnswer:')
print(ans)
# show time consumption
print('Running Time: %.5f ms' % ((end - start) * 1000))
if __name__ == "__main__":
sys.exit(main())
|
# Generated by Django 2.2.13 on 2020-06-30 21:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('features', '0021_historicalfeaturesegment'),
]
operations = [
# this migration should have no affect but should fix the issues on dev after
# screwing around with the migrations
migrations.AlterUniqueTogether(
name='featuresegment',
unique_together={('feature', 'environment', 'segment')},
),
]
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.libraries.functions import format
from resource_management.libraries.functions.default import default
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import get_kinit_path
from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames
from resource_management.libraries.functions.constants import Direction
config = Script.get_config()
stack_root = Script.get_stack_root()
install_dir = stack_root + '/flume'
download_url = config['configurations']['flume-env']['download_url']
filename = download_url.split('/')[-1]
version_dir = filename.replace('.tar.gz', '').replace('.tgz', '')
flume_conf_dir = default("/configurations/flume-env/flume_conf_dir",
'/etc/flume')
flume_user = config['configurations']['flume-env']['flume_user']
flume_group = user_group = config['configurations']['cluster-env']['user_group']
proxyuser_group = config['configurations']['hadoop-env']['proxyuser_group']
security_enabled = config['configurations']['cluster-env']['security_enabled']
flume_jaas_conf_template = default("/configurations/flume-env/jaas_content",
None)
if security_enabled:
_hostname_lowercase = config['agentLevelParams']['hostname'].lower()
flume_jaas_princ = config['configurations']['flume-env'][
'flume_principal_name'].replace('_HOST', _hostname_lowercase)
flume_keytab_path = config['configurations']['flume-env'][
'flume_keytab_path']
# hadoop default parameters
flume_bin = install_dir + '/bin/flume-ng'
flume_hive_home = stack_root + '/hive'
flume_hcat_home = stack_root + '/hive/hcatalog/share/webhcat/svr/lib/'
java_home = config['ambariLevelParams']['java_home']
flume_log_dir = config['configurations']['flume-env']['flume_log_dir']
flume_run_dir = config['configurations']['flume-env']['flume_run_dir']
flume_pid_file = flume_run_dir + '/flume.pid'
flume_conf_content = config['configurations']['flume-env']['conf_content']
flume_env_sh_template = config['configurations']['flume-env']['content']
hostname = config['agentLevelParams']['hostname']
cluster_name = config["clusterName"]
if 'cluster-env' in config['configurations'] and \
'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
ams_collector_hosts = config['configurations']['cluster-env'][
'metrics_collector_external_hosts']
else:
ams_collector_hosts = ",".join(
default("/clusterHostInfo/metrics_collector_hosts", []))
has_metric_collector = not len(ams_collector_hosts) == 0
metric_collector_port = None
if has_metric_collector:
metric_collector_host = select_metric_collector_hosts_from_hostnames(
ams_collector_hosts)
if 'cluster-env' in config['configurations'] and \
'metrics_collector_external_port' in config['configurations']['cluster-env']:
metric_collector_port = config['configurations']['cluster-env'][
'metrics_collector_external_port']
else:
metric_collector_web_address = default(
"/configurations/ams-site/timeline.metrics.service.webapp.address",
"0.0.0.0:6188")
if metric_collector_web_address.find(':') != -1:
metric_collector_port = metric_collector_web_address.split(':')[1]
else:
metric_collector_port = '6188'
if default("/configurations/ams-site/timeline.metrics.service.http.policy",
"HTTP_ONLY") == "HTTPS_ONLY":
metric_collector_protocol = 'https'
else:
metric_collector_protocol = 'http'
metric_truststore_path = default(
"/configurations/ams-ssl-client/ssl.client.truststore.location", "")
metric_truststore_type = default(
"/configurations/ams-ssl-client/ssl.client.truststore.type", "")
metric_truststore_password = default(
"/configurations/ams-ssl-client/ssl.client.truststore.password", "")
pass
metrics_report_interval = default(
"/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
metrics_collection_period = default(
"/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
host_in_memory_aggregation = default(
"/configurations/ams-site/timeline.metrics.host.inmemory.aggregation",
True)
host_in_memory_aggregation_port = default(
"/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port",
61888)
# Cluster Zookeeper quorum
zookeeper_quorum = None
if not len(default("/clusterHostInfo/zookeeper_server_hosts", [])) == 0:
if 'zoo.cfg' in config['configurations'] and 'clientPort' in config[
'configurations']['zoo.cfg']:
zookeeper_clientPort = config['configurations']['zoo.cfg'][
'clientPort']
else:
zookeeper_clientPort = '2181'
zookeeper_quorum = (':' + zookeeper_clientPort + ',').join(
config['clusterHostInfo']['zookeeper_server_hosts'])
# last port config
zookeeper_quorum += ':' + zookeeper_clientPort
# smokeuser
kinit_path_local = get_kinit_path(
default('/configurations/kerberos-env/executable_search_paths', None))
smokeuser = config['configurations']['cluster-env']['smokeuser']
smokeuser_principal = config['configurations']['cluster-env'][
'smokeuser_principal_name']
smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
data_dirs = []
with open('/proc/mounts', 'r') as f:
data_dirs = [
line.split()[1] + '/flume/data' for line in f.readlines()
if line.split()[0].startswith('/dev')
and line.split()[1] not in ['/boot', '/var/log', '/']
]
datadirs = ','.join(data_dirs)
kafka_hosts = default('/clusterHostInfo/kafka_hosts', [])
from random import shuffle
shuffle(kafka_hosts)
kafka_url = ''
if len(kafka_hosts) > 0:
kafka_url = ':6667,'.join(kafka_hosts) + ':6667'
|
import datetime
from operator import itemgetter
from babel.core import get_global
from babel.numbers import get_currency_name, get_territory_currencies
from django import forms
from django_file_form.forms import FileFormMixin
from addressfield.fields import AddressField
from hypha.apply.stream_forms.fields import MultiFileField
from ..models.vendor import VendorFormSettings
def get_active_currencies():
active_currencies = []
territories = get_global('territory_currencies').keys()
for territory in territories:
currencies = get_territory_currencies(territory, datetime.date.today())
if currencies:
for currency in currencies:
if currency not in active_currencies:
active_currencies.append(currencies[0])
return active_currencies
class BaseVendorForm:
def __init__(self, site=None, *args, **kwargs):
if site:
self.form_settings = VendorFormSettings.for_site(site)
super().__init__(*args, **kwargs)
def apply_form_settings(self, fields):
for field in fields:
try:
self.fields[field].label = getattr(self.form_settings, f'{field}_label')
except AttributeError:
pass
try:
self.fields[field].help_text = getattr(self.form_settings, f'{field}_help_text')
except AttributeError:
pass
return fields
class CreateVendorFormStep1(BaseVendorForm, forms.Form):
TYPE_CHOICES = [
('organization', 'Yes, the account belongs to the organisation above'),
('personal', 'No, it is a personal bank account'),
]
name = forms.CharField(required=True)
contractor_name = forms.CharField(required=True)
type = forms.ChoiceField(choices=TYPE_CHOICES, required=True, widget=forms.RadioSelect)
def __init__(self, *args, **kwargs):
super(CreateVendorFormStep1, self).__init__(*args, **kwargs)
self.fields = self.apply_form_settings(self.fields)
class CreateVendorFormStep2(BaseVendorForm, forms.Form):
required_to_pay_taxes = forms.TypedChoiceField(
choices=((False, 'No'), (True, 'Yes')),
coerce=lambda x: x == 'True',
widget=forms.RadioSelect,
required=True
)
def __init__(self, *args, **kwargs):
super(CreateVendorFormStep2, self).__init__(*args, **kwargs)
self.fields = self.apply_form_settings(self.fields)
class CreateVendorFormStep3(FileFormMixin, BaseVendorForm, forms.Form):
due_diligence_documents = MultiFileField(required=True)
def __init__(self, *args, **kwargs):
super(CreateVendorFormStep3, self).__init__(*args, **kwargs)
self.fields = self.apply_form_settings(self.fields)
class CreateVendorFormStep4(BaseVendorForm, forms.Form):
CURRENCY_CHOICES = [
(currency, f'{get_currency_name(currency)} - {currency}')
for currency in get_active_currencies()
]
account_holder_name = forms.CharField(required=True)
account_routing_number = forms.CharField(required=True)
account_number = forms.CharField(required=True)
account_currency = forms.ChoiceField(
choices=sorted(CURRENCY_CHOICES, key=itemgetter(1)),
required=True,
initial='USD'
)
def __init__(self, *args, **kwargs):
super(CreateVendorFormStep4, self).__init__(*args, **kwargs)
self.fields = self.apply_form_settings(self.fields)
class CreateVendorFormStep5(BaseVendorForm, forms.Form):
need_extra_info = forms.TypedChoiceField(
choices=((False, 'No'), (True, 'Yes')),
coerce=lambda x: x == 'True',
widget=forms.RadioSelect,
required=True
)
def __init__(self, *args, **kwargs):
super(CreateVendorFormStep5, self).__init__(*args, **kwargs)
self.fields = self.apply_form_settings(self.fields)
class CreateVendorFormStep6(BaseVendorForm, forms.Form):
CURRENCY_CHOICES = [
(currency, f'{get_currency_name(currency)} - {currency}')
for currency in get_active_currencies()
]
branch_address = AddressField()
ib_account_routing_number = forms.CharField(required=False)
ib_account_number = forms.CharField(required=False)
ib_account_currency = forms.ChoiceField(
choices=sorted(CURRENCY_CHOICES, key=itemgetter(1)),
required=False,
initial='USD'
)
ib_branch_address = AddressField()
nid_type = forms.CharField(required=False)
nid_number = forms.CharField(required=False)
other_info = forms.CharField(required=False, widget=forms.Textarea)
def __init__(self, *args, **kwargs):
super(CreateVendorFormStep6, self).__init__(*args, **kwargs)
self.fields = self.apply_form_settings(self.fields)
|
"""
Configuration file for the Python Atmospheric data Community Toolkit (ACT)
The values for a number of ACT parameters and the default metadata created
when reading files, correcting fields, etc. is controlled by this single
Python configuration file.
Examples:
---------
from act.config import DEFAULT_DATASTREAM_NAME
"""
DEFAULT_DATASTREAM_NAME = 'act_datastream'
|
"""Attribute Tags for IPP."""
from .enums import IppTag
ATTRIBUTE_TAG_MAP = {
"attributes-charset": IppTag.CHARSET,
"attributes-natural-language": IppTag.LANGUAGE,
"document-number": IppTag.INTEGER,
"printer-uri": IppTag.URI,
"requesting-user-name": IppTag.NAME,
"job-id": IppTag.INTEGER,
"document-name": IppTag.NAME,
"job-name": IppTag.NAME,
"document-format": IppTag.MIME_TYPE,
"last-document": IppTag.BOOLEAN,
"copies": IppTag.INTEGER,
"job-hold-until": IppTag.KEYWORD,
"job-priority": IppTag.INTEGER,
"number-up": IppTag.INTEGER,
"job-sheets": IppTag.NAME,
"job-uri": IppTag.URI,
"job-state": IppTag.ENUM,
"job-state-reasons": IppTag.KEYWORD,
"requested-attributes": IppTag.KEYWORD,
"member-uris": IppTag.URI,
"operations-supported": IppTag.ENUM,
"ppd-name": IppTag.NAME,
"printer-state-reason": IppTag.KEYWORD,
"printer-is-shared": IppTag.BOOLEAN,
"printer-error-policy": IppTag.NAME,
"printer-info": IppTag.TEXT,
"which-jobs": IppTag.KEYWORD,
"my-jobs": IppTag.BOOLEAN,
"purge-jobs": IppTag.BOOLEAN,
"hold-job-until": IppTag.KEYWORD,
"job-printer-uri": IppTag.URI,
"printer-location": IppTag.TEXT,
"printer-state": IppTag.ENUM,
"printer-state-reasons": IppTag.KEYWORD,
"printer-up-time": IppTag.INTEGER,
"printer-uri-supported": IppTag.URI,
"document-state": IppTag.ENUM,
"device-uri": IppTag.URI,
}
|
import numpy as np
from sklearn import preprocessing, neighbors, model_selection, svm
import pandas as pd
import pickle
#import serial
import re
import random
from sklearn.metrics import confusion_matrix, plot_confusion_matrix, plot_precision_recall_curve
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
def load_data():
df_B = pd.read_table('sign2/b.txt', header=None, sep=',')#
B = np.array(df_B)
df_C = pd.read_table('sign2/c.txt', header=None, sep=',')#
C = np.array(df_C)
df_D = pd.read_table('sign2/d.txt', header=None, sep=',')#
D = np.array(df_D)
df_F = pd.read_table('sign2/f.txt', header=None, sep=',')#
F = np.array(df_F)
df_H = pd.read_table('sign2/h.txt', header=None, sep=',')#
df_G = pd.read_table('sign2/g.txt', header=None, sep=',')#
G = np.array(df_G)
H = np.array(df_H)
df_I = pd.read_table('sign2/i.txt', header=None, sep=',')#
I = np.array(df_I)
df_K = pd.read_table('sign2/k.txt', header=None, sep=',')#
K = np.array(df_K)
df_L = pd.read_table('sign2/l.txt', header=None, sep=',')#
L = np.array(df_L)
df_P = pd.read_table('sign2/p.txt', header=None, sep=',')#
P = np.array(df_P)
df_V = pd.read_table('sign2/v.txt', header=None, sep=',')#
V = np.array(df_V)
df_O = pd.read_table('sign2/o.txt', header=None, sep=',')#
O = np.array(df_O)
df_Q = pd.read_table('sign2/q.txt', header=None, sep=',')#
Q = np.array(df_Q)
df_R = pd.read_table('sign2/r.txt', header=None, sep=',')#
R = np.array(df_R)
df_W = pd.read_table('sign2/w.txt', header=None, sep=',')#
W = np.array(df_W)
df_A = pd.read_table('sign2/a.txt', header=None, sep=',')
A = np.array(df_A)
df_E = pd.read_table('sign2/e.txt', header=None, sep=',')
E = np.array(df_E)
df_J = pd.read_table('sign2/j.txt', header=None, sep=',')
J = np.array(df_J)
df_M = pd.read_table('sign2/m.txt', header=None, sep=',')
M = np.array(df_M)
df_N = pd.read_table('sign2/n.txt', header=None, sep=',')
N = np.array(df_N)
df_S = pd.read_table('sign2/s.txt', header=None, sep=',')
S = np.array(df_S)
df_T = pd.read_table('sign2/t.txt', header=None, sep=',')
T = np.array(df_T)
df_U = pd.read_table('sign2/u.txt', header=None, sep=',')
U = np.array(df_U)
df_X = pd.read_table('sign2/x.txt', header=None, sep=',')
X = np.array(df_X)
df_Y = pd.read_table('sign2/y.txt', header=None, sep=',')
Y = np.array(df_Y)
df_Z = pd.read_table('sign2/z.txt', header=None, sep=',')
Z = np.array(df_Z)
df = df_A.append(df_B) #
df = df.append(df_C)
df = df.append(df_D)#
df = df.append(df_E)
df = df.append(df_F)#
df = df.append(df_G)#
df = df.append(df_H)#
df = df.append(df_I)#
df = df.append(df_J)
df = df.append(df_K)#
df = df.append(df_L)#
df = df.append(df_M)
df = df.append(df_N)
df = df.append(df_O)#
df = df.append(df_P)#
df = df.append(df_Q)#
df = df.append(df_R)#
df = df.append(df_S)
df = df.append(df_T)
df = df.append(df_U)
df = df.append(df_V)#
df = df.append(df_W)#
df = df.append(df_X)
df = df.append(df_Y)
df = df.append(df_Z)
df = df.drop(df.columns[-1], axis=1)
data = pd.DataFrame(df).to_numpy()
print(type(data))
print(data.shape)
class_a = [0 for i in range(len(A))]
class_b = [1 for i in range(len(B))]#
class_c = [2 for i in range(len(C))]#
class_d = [3 for i in range(len(D))]#
class_e = [4 for i in range(len(E))]
class_f = [5 for i in range(len(F))]#
class_g = [6 for i in range(len(G))]#
class_h = [7 for i in range(len(H))]#
class_i = [8 for i in range(len(I))]#
class_j = [9 for i in range(len(J))]
class_k = [10 for i in range(len(K))]#
class_l = [11 for i in range(len(L))]#
class_m = [12 for i in range(len(M))]
class_n = [13 for i in range(len(N))]
class_o = [14 for i in range(len(O))]#
class_p = [15 for i in range(len(P))]#
class_q = [16 for i in range(len(Q))]#
class_r = [17 for i in range(len(R))]#
class_s = [18 for i in range(len(S))]
class_t = [19 for i in range(len(T))]
class_u = [20 for i in range(len(U))]
class_v = [21 for i in range(len(V))]#
class_w = [22 for i in range(len(W))]#
class_x = [23 for i in range(len(X))]
class_y = [24 for i in range(len(Y))]
class_z = [25 for i in range(len(Z))]
y_label = np.append(class_a, class_b)#
y_label = np.append(y_label, class_c)
y_label = np.append(y_label, class_d)
y_label = np.append(y_label, class_e)
y_label = np.append(y_label, class_f)#
y_label = np.append(y_label, class_g)#
y_label = np.append(y_label, class_h)#
y_label = np.append(y_label, class_i)#
y_label = np.append(y_label, class_j)
y_label = np.append(y_label, class_k)#
y_label = np.append(y_label, class_l)#
y_label = np.append(y_label, class_m)
y_label = np.append(y_label, class_n)
y_label = np.append(y_label, class_o)#
y_label = np.append(y_label, class_p)#
y_label = np.append(y_label, class_q)#
y_label = np.append(y_label, class_r)#
y_label = np.append(y_label, class_s)
y_label = np.append(y_label, class_t)
y_label = np.append(y_label, class_u)
y_label = np.append(y_label, class_v)#
y_label = np.append(y_label, class_w)#
y_label = np.append(y_label, class_x)
y_label = np.append(y_label, class_y)
y_label = np.append(y_label, class_z)
num=len(y_label)
y_label = y_label.reshape(num, 1)
print(type(y_label))
print(y_label.shape)
return data, y_label
def normalize_data(data):
normalized_data=data
max=np.max(abs(data),axis=0)
for i in range (0,len(data)):
normalized_data[i,:]=normalized_data[i,:]/max
return normalized_data
"""
if __name__=="__main__":
data, label=load_data()
norm_data=normalize_data(data)
print(data.shape)
""" |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This module mainly provides a ``XenaDataset`` class representing one Xena
matrix in a Xena cohort. Three class, ``GDCOmicset``, ``GDCPhenoset``,
``GDCSurvivalset`` and ``GDCAPIPhenoset``are derived from ``XenaDataset``,
representing genomic data, phenotype info of TCGA and phenotype info of TARGET,
survival data and phenotype info from GDC API respectively.
In general, a ``XenaDataset`` class contains 3 methods, ``download``,
``transform`` and ``metadata``, which can be used for quickly assembling an
ETL pipeline importing data into Xena.
"""
# Ensure Python 2 and 3 compatibility
from __future__ import division
from __future__ import print_function
import functools
import time
import os
import re
import sys
import warnings
import jinja2
from lxml import etree
import numpy as np
import pandas as pd
from xena_gdc_etl import gdc
from .utils import mkdir_p, requests_retry_session, reduce_json_array
from .constants import (
GDC_XENA_COHORT,
METADATA_TEMPLATE,
METADATA_VARIABLES,
GDC_RELEASE_URL,
CASES_FIELDS_EXPANDS,
LIST_FIELDS,
)
def read_by_ext(filename, mode='r'):
"""Automatically decide how to open a file which might be compressed.
Leveraged codes from the "hook_compressed" function in python's fileinput
module.
Args:
filename (str): Must contain proper extension indicating the
compression condition.
mode (str, optional): To specify the mode in which the file is opened.
It will be passed to corresponding open function (``open``,
``gzip.open`` or ``bz2.BZ2File``); please check them for details.
Defaults to 'r'.
Returns:
file object: A filehandle to be used with `with`.
"""
ext = os.path.splitext(filename)[1]
if ext == '.gz':
import gzip
return gzip.open(filename, mode)
elif ext == '.bz2':
import bz2
return bz2.BZ2File(filename, mode)
else:
return open(filename, mode)
def read_biospecimen(fileobj):
"""Extract info from GDC's biospecimen supplement and re-organize them
into a pandas DataFrame.
Args:
fileobj (file or path): XML file of GDC's biospecimen supplement.
Returns:
pandas.core.frame.DataFrame: Transformed pandas DataFrame.
"""
if hasattr(fileobj, 'name'):
filename = fileobj.name
else:
filename = fileobj
ext = os.path.splitext(filename)[1]
if ext == '.xlsx':
# Design specifically for TARGET biospecimen
try:
df = pd.read_excel(
filename, sheet_name='Sample Names', header=None
)
except Exception:
try:
df = pd.read_excel(
filename, sheet_name='SampleNames', header=None
)
except Exception:
raise
df.iloc[0].fillna(method='ffill', inplace=True)
df.columns = df.iloc[0:2].apply(lambda x: x.str.cat(sep='.'))
return df.drop(df.index[0:2]).set_index(df.columns[0])
elif ext != '.xml':
raise IOError('Unknown file type for biospecimen data: {}'.format(ext))
root = etree.parse(fileobj).getroot()
ns = root.nsmap
assert (
'biospecimen'
in root.xpath('@xsi:schemaLocation', namespaces=ns)[0].lower()
)
samples_common = {}
for child in root.find('admin:admin', ns):
try:
samples_common[child.tag.split('}', 1)[-1]] = child.text.strip()
except AttributeError:
samples_common[child.tag.split('}', 1)[-1]] = ''
for child in root.find('bio:patient', ns):
try:
samples_common[child.tag.split('}', 1)[-1]] = child.text.strip()
except AttributeError:
samples_common[child.tag.split('}', 1)[-1]] = ''
# Add 'primary_diagnosis' according to
# https://gdc.cancer.gov/resources-tcga-users/tcga-code-tables/tcga-study-abbreviations
samples_common['primary_diagnosis'] = gdc.TCGA_STUDY_ABBR[
samples_common['disease_code']
]
samples = {}
for sample in root.find('bio:patient/bio:samples', ns):
record = {}
for child in sample:
if child.text and child.text.strip():
record[child.tag.split('}', 1)[-1]] = child.text.strip()
record.update(samples_common)
samples[record['bcr_sample_barcode']] = record
df = pd.DataFrame(samples).T
sample_mask = df.bcr_sample_barcode.map(lambda s: s[-3:-1] not in ['10'])
df = df[sample_mask]
df['bcr_patient_barcode'] = root.find(
'bio:patient/shared:bcr_patient_barcode', ns
).text
return df
def read_clinical(fileobj):
"""Extract info from GDC's clinical supplement and re-organize them into a
pandas DataFrame.
Args:
fileobj (file or path): XML file of GDC's clinical supplement.
Returns:
pandas.core.frame.DataFrame: Transformed pandas DataFrame.
"""
if hasattr(fileobj, 'name'):
filename = fileobj.name
else:
filename = fileobj
ext = os.path.splitext(filename)[1]
if ext == '.xlsx':
xl_file = pd.ExcelFile(filename)
sheets = xl_file.sheet_names
if sheets[0] == "Clinical Data":
return xl_file.parse(sheets[0], index_col=0)
else:
print("Clincal Data not found, skipping this file ...")
return pd.DataFrame()
elif ext != '.xml':
raise IOError('Unknown file type for clinical data: {}'.format(ext))
root = etree.parse(fileobj).getroot()
ns = root.nsmap
assert (
'clinical'
in root.xpath('@xsi:schemaLocation', namespaces=ns)[0].lower()
)
patient = {}
# "Dirty" extraction
for child in root.xpath('.//*[not(*)]'):
field_name = child.tag.split('}', 1)[-1]
if field_name in LIST_FIELDS:
continue
try:
patient[child.tag.split('}', 1)[-1]] = child.text.strip()
except AttributeError:
patient[child.tag.split('}', 1)[-1]] = ''
# Redo 'race'
if 'race_list' in patient:
del patient['race_list']
try:
patient['race'] = ','.join(
[
child.text.strip()
for child in root.find('.//clin_shared:race_list', ns)
if child.text and child.text.strip()
]
)
except Exception:
patient['race'] = ''
# Redo the most recent "follow_up" and update the patient dict if there is
# an overlapped key.
follow_ups = root.xpath('.//*[local-name()="follow_up"]')
if follow_ups:
most_recent = follow_ups[0]
for follow_up in follow_ups:
if follow_up.attrib['version'] > most_recent.attrib['version']:
most_recent = follow_up
for child in most_recent:
try:
patient[child.tag.split('}', 1)[-1]] = child.text.strip()
except AttributeError:
patient[child.tag.split('}', 1)[-1]] = ''
return pd.DataFrame({patient['bcr_patient_barcode']: patient}).T
def merge_cnv(filelist):
"""Transform GDC's CNV data into Xena data matrix.
Args:
filelist (list of path): The list of input raw MAF file for mutation
data.
Returns:
pandas.core.frame.DataFrame: Transformed pandas DataFrame.
"""
xena_matrix = pd.DataFrame()
total = len(filelist)
count = 0
for path in filelist:
xena_matrix = xena_matrix.append(
pd.read_csv(path, sep="\t", header=0, usecols=[1, 2, 3, 5]).assign(
sample=os.path.basename(path).split('.', 1)[0]
)
)
count += 1
print('\rProcessed {}/{} file...'.format(count, total), end='')
sys.stdout.flush()
print('\rAll {} files have been processed. '.format(total))
return xena_matrix.rename(
columns={'Chromosome': 'Chrom', 'Segment_Mean': 'value'}
).set_index('sample')
def snv_maf_matrix(filelist):
"""Transform GDC's MAF data into Xena data matrix.
A new column of DNA variant allele frequencies named "dna_vaf" will
calculated by division "t_alt_count" / "t_depth". Columns "t_alt_count"
and "t_depth" will then be dropped. In the end, columns will be renamed
accordingly and row index will be set as sample ID.
Args:
filelist (list of path): The list of input raw MAF file for mutation
data.
Returns:
pandas.core.frame.DataFrame: Transformed pandas DataFrame.
"""
assert len(filelist) == 1
print('\rProcessing 1/1 file...', end='')
df = pd.read_csv(
filelist[0],
sep="\t",
header=0,
comment='#',
usecols=[12, 36, 4, 5, 6, 39, 41, 51, 0, 10, 15, 110],
)
print('\rCalculating "dna_vaf" ...', end='')
df['dna_vaf'] = df['t_alt_count'] / df['t_depth']
print('\rTrim "Tumor_Sample_Barcode" into Xena sample ID ...', end='')
df['Tumor_Sample_Barcode'] = df['Tumor_Sample_Barcode'].apply(
lambda x: '-'.join(x.split('-', 4)[0:4])
)
print('\rRe-organizing matrix ...', end='')
df = (
df.drop(['t_alt_count', 't_depth'], axis=1)
.set_index('Tumor_Sample_Barcode')
.rename(
columns={
'Hugo_Symbol': 'gene',
'Chromosome': 'chrom',
'Start_Position': 'start',
'End_Position': 'end',
'Reference_Allele': 'ref',
'Tumor_Seq_Allele2': 'alt',
'Tumor_Sample_Barcode': 'sampleid',
'HGVSp_Short': 'Amino_Acid_Change',
'Consequence': 'effect',
'FILTER': 'filter',
}
)
)
df.index.name = 'Sample_ID'
return df
def merge_sample_cols(
filelist,
header='infer',
index_col=0,
usecols=[0, 1],
comment=None,
index_name='id',
get_sid=lambda f: os.path.basename(f).split('.', 1)[0],
log2TF=True,
):
"""Merge and process a list of raw data files to make a Xena data matrix.
Each file will be considered as data from one sample and will be extracted
as one column in the merged matrix. Data from the same sample (identified
by sample ID) will be averaged before being put into the matrix. By
default (``log2TF=True``), merged will be transformed by log2(x + 1).
Args:
filelist (list of path): The list of input raw data.
header:
index_col:
usecols:
comment:
index_name:
get_sid:
log2TF:
Returns:
pandas.core.frame.DataFrame: Ready to load Xena matrix.
"""
# Group data by sample (sid), especially for samples with more than 1 data
# files (repeats). If repeats within a sample need to be averaged, this
# will be very helpful.
sample_dict = {}
for path in filelist:
sample_id = get_sid(path) # os.path.basename(path).split('.', 1)[0]
if sample_id not in sample_dict:
sample_dict[sample_id] = []
sample_dict[sample_id].append(path)
# Read raw data sample by sample, average data for each sample if needed,
# and put data for each sample into the matrix
xena_matrix = pd.DataFrame()
total = len(filelist)
count = 0
for sample_id in sample_dict:
df_list = [
pd.read_csv(
f,
sep="\t",
header=header,
index_col=index_col,
usecols=usecols,
comment=comment,
names=[index_name, sample_id],
)
for f in sample_dict[sample_id]
]
if len(sample_dict[sample_id]) > 1:
df_list = [
xena_matrix,
pd.concat(df_list, axis=1, copy=False)
.mean(1)
.rename(sample_id),
]
else:
df_list.insert(0, xena_matrix)
xena_matrix = pd.concat(df_list, axis=1, copy=False)
count += len(sample_dict[sample_id])
print('\rProcessed {}/{} file...'.format(count, total), end='')
sys.stdout.flush()
print('\rAll {} files have been processed. '.format(total))
xena_matrix.index.name = index_name
if log2TF:
return np.log2(xena_matrix + 1)
else:
return xena_matrix
def handle_gistic(filelist):
"""Handles GISTIC Data type.
Args:
filelist (list of path): The list of input raw data.
Returns:
pandas.core.frame.DataFrame: Ready to load Xena matrix.
"""
assert len(filelist) == 1
print('\rProcessing file {}'.format(filelist[0]), end='')
df = pd.read_csv(
filelist[0],
sep="\t",
header=0,
comment='#',
index_col=0,
)
df = df.drop(["Gene ID", "Cytoband"], axis=1)
columns = list(df)
mapping = gdc.map_two_fields(
endpoint="cases",
input_field="samples.portions.analytes.aliquots.aliquot_id",
output_field="samples.submitter_id",
input_values=columns,
)
mapping = reduce_json_array(mapping)
df = df.rename(columns=mapping)
return df
class XenaDataset(object):
r"""XenaDataset represents for one Xena matrix in a Xena cohort.
This class provides a set of method for downloading and transforming data
into Xena matrix, as well as generating associated metadata. It also
provides a set of attributes to control these processes.
Attributes:
projects (str or list of str): One or a list of project IDs describing
study projects included in this dataset.
xena_dtype (str): A short string (like an ID) describing the type of
data in this dataset. It is highly recommended, though not
required, that one dataset has a single type of data.
root_dir (str): Defines the root directory for this dataset. The
XenaDataset and the importing process can be highly customized,
with directories for every data and each step explicitly assigned.
You can set directories for raw data (through the ``raw_data_dir``
property) and Xena matrix (through the ``matrix_dir``property)
specifically. The ``root_dir`` will be essentially useless under
such situation.
If some or all directories remain unassigned when being used, a
default directory tree will be used, with a structure like this::
root_dir
└── projects
├── "Raw_Data"
│ └── xena_dtype
│ ├── data1
│ ├── data2
│ ├── ...
│ └── dataN
└── "Xena_Matrices"
├── projects.xena_dtype(1).tsv
├── projects.xena_dtype(1).tsv.json
├── projects.xena_dtype(2).tsv
├── projects.xena_dtype(2).tsv.json
├── ...
├── projects.xena_dtype(N).tsv
└── projects.xena_dtype(N).tsv.json
By default, all files related to a dataset, such as raw data, Xena
matrix, metadata, should and highly recommended to be organized
and saved under the root directory. However, this is neither
required nor checked. Setting directory related properties
(including ``root_dir`` and some properties listed below) will not
trigger creations of any directories. Directories, if not exist,
will only be made right before being needed.
Defaults to "." which points to current python work directory.
raw_data_dir (str): A directory for raw data. Please Check the
``raw_data_list`` property for its potential usage for defining
raw data for Xena matrix ``transform``, and check the ``root_dir``
property for the default "Raw_Data" directory structure. Defaults
to None.
matrix_dir (str): A path for saving the transformed Xena matrix for
this dataset. If the ``matrix_dir`` is not available at the time
of being used, the ``matrix`` property will be checked first. If
the ``matrix`` property is available, its directory will be
assigned to the ``matrix_dir``. If the ``matrix`` property is not
available, a default path will be assigned according to the
default directory structure. Check the ``root_dir`` property for
the default directory structure. Defaults to None.
download_map (dict): A dict with the key being a URL for one raw data
to be downloaded and the value being a path for saving downloaded
raw data.
raw_data_list (list): A list of file path(s) for all raw data related
to this dataset. It will be automatically set by the ``download``
method; or it can be assigned directly as a public attribute. This
``raw_data_list`` attribute will be used by the ``transform``
method for making a Xena matrix from raw data. If the
``raw_data_list`` is not available at the time of being used, the
``raw_data_dir`` property will be checked. All files under
``raw_data_dir`` will be treated as data and used for creating a
``raw_data_list``.
raws2matrix (callable): A function used for merging multiple raw data
in the ``raw_data_list`` into one Xena matrix, as well as
processing the merged matrix if needed. A valid ``raws2matrix``
must accept only one argument, which is ``raw_data_list``.
matrix (str): A path for the Xena matrix of this dataset.
This attribute will be used but not validated by the ``transform``
method for saving newly generated Xena matrix. This attribute will
also be used yet will be validated (i.e. it has to point to a
valid existing file) by the ``metadata`` method before making
metadata associated with the Xena matrix and with this dataset. If
``matrix`` is not available at the time of being used by the
``transform`` method, it will use ``matrix_dir`` as its directory
and will adapte a filename with the "projects.xena_type.tsv"
pattern. Please check the ``matrix_dir`` property to see how it is
determined.
metadata_template (jinja2.environment.Template or str): A Jinja2
template for rendering metadata of this dataset. When using a
string to set ``metadata_template``, it will be used as a path to
the template file and the corresponding template will be retrieved
and assigned to this attribute.
metadata_vars (dict): A dict of variables which will be used (by \*\*
unpacking) for rendering the ``metadata_template``.
"""
@property
def projects(self):
return self._projects
@projects.setter
def projects(self, projects):
if isinstance(projects, str):
self._projects = [projects]
elif isinstance(projects, list):
self._projects = projects
else:
raise ValueError('"projects" must be either str or list.')
@property
def root_dir(self):
"""A path of an existing directory for keeping files (raw data, matrix
and metadata) of this dataset.
"""
return self._root_dir
@root_dir.setter
def root_dir(self, path):
if os.path.isdir(path):
self._root_dir = os.path.abspath(path)
else:
raise IOError('{} is not an existing directory.'.format(path))
@property
def raw_data_dir(self):
try:
return self._raw_data_dir
except AttributeError:
self._raw_data_dir = os.path.join(
self.root_dir,
'_'.join(self.projects),
'Raw_Data',
str(self.xena_dtype),
)
return self._raw_data_dir
@raw_data_dir.setter
def raw_data_dir(self, path):
self._raw_data_dir = os.path.abspath(path)
@property
def matrix_dir(self):
try:
return self._matrix_dir
except AttributeError:
try:
self._matrix_dir = os.path.dirname(self._matrix)
return self._matrix_dir
except AttributeError:
self._matrix_dir = os.path.join(
self.root_dir, '_'.join(self.projects), 'Xena_Matrices'
)
return self._matrix_dir
@matrix_dir.setter
def matrix_dir(self, path):
self._matrix_dir = os.path.abspath(path)
@property
def download_map(self):
assert self._download_map and isinstance(self._download_map, dict)
return self._download_map
@download_map.setter
def download_map(self, d):
if isinstance(d, dict):
self._download_map = d
else:
raise TypeError(
'download_map should be a dict, ' 'not {}'.format(type(d))
)
# Raw data list: try to get the list from ``raw_data_dir`` if unavailable.
@property
def raw_data_list(self):
try:
return self._raw_data_list
except AttributeError:
try:
raw_data_dir = os.path.abspath(self.raw_data_dir)
raw_data = []
for f in os.listdir(raw_data_dir):
f_path = os.path.join(raw_data_dir, f)
if os.path.isfile(f_path):
raw_data.append(f_path)
if raw_data:
self._raw_data_list = raw_data
else:
raise ValueError
except Exception:
raise ValueError('Cannot find raw data.')
return self._raw_data_list
@raw_data_list.setter
def raw_data_list(self, raw_data):
self._raw_data_list = raw_data
@property
def matrix(self):
try:
assert self._matrix
return self._matrix
except (AttributeError, AssertionError):
self._matrix = os.path.join(
self.matrix_dir,
'{}.{}.tsv'.format('_'.join(self.projects), self.xena_dtype),
)
return self._matrix
@matrix.setter
def matrix(self, path):
self._matrix = os.path.abspath(path)
self.matrix_dir = os.path.dirname(self._matrix)
@property
def metadata_template(self):
assert isinstance(self._metadata_template, jinja2.environment.Template)
return self._metadata_template
@metadata_template.setter
def metadata_template(self, template):
if isinstance(template, jinja2.environment.Template):
self._metadata_template = template
elif isinstance(template, str):
jinja2_env = jinja2.Environment(
loader=jinja2.PackageLoader('xena_gdc_etl', 'resources')
)
self._metadata_template = jinja2_env.get_template(template)
else:
raise TypeError(
'metadata_template should be a jinja2 template or an existing '
'path to a template JSON file, not a :{}.'.format(
type(template)
)
)
def __init__(
self,
projects,
xena_dtype,
root_dir='.',
raw_data_dir=None,
matrix_dir=None,
):
self.projects = projects
self.xena_dtype = xena_dtype
self.root_dir = root_dir
if raw_data_dir is not None:
self.raw_data_dir = raw_data_dir
if matrix_dir is not None:
self.matrix_dir = matrix_dir
def download(self, chunk_size=4096):
"""Download file(s) according to the ``download_map`` property.
A list of paths for downloaded files will be assigned to the
``raw_data_list`` property which can be used for Xena matrix
``transform`` processing. Check the ``transform`` method for details.
Args:
chunk_size (int, optional): The chunk size is the number of bytes
it should read into memory, when the response is got with
"stream=True". Check the documentation of "requests" module
for details. Defaults to 4096.
Returns:
self: allow method chaining.
"""
print('Starts to download...', end='')
total = len(self.download_map)
count = 0
download_list = []
for url, path in self.download_map.items():
count += 1
response = requests_retry_session().get(url, stream=True)
if response.ok:
path = os.path.abspath(path)
status = '\r[{:d}/{:d}] Downloading to "{}" ...'
print(status.format(count, total, path), end='')
sys.stdout.flush()
mkdir_p(os.path.dirname(path))
with open(path, 'wb') as f:
for chunk in response.iter_content(chunk_size):
f.write(chunk)
download_list.append(path)
else:
raise IOError(
'\nFail to download file {}. Response {}'.format(
url, response.status_code
)
)
print('')
self.raw_data_list = download_list
print(
'Raw {} data for {} is ready.'.format(
self.projects, self.xena_dtype
)
)
return self
def transform(self):
"""Transform raw data in a dataset into Xena matrix.
The transformation process 1) takes in a list of path for raw data; 2)
open each data based on its file extension; 3) read the file object by
``read_func`` and append the readout to a list, which will be 4)
assembled into a Xena matrix by ``raws2matrix``. The generated Xena
matrix will be saved at the path defined by the ``matrix`` property.
Returns:
self: allow method chaining.
"""
message = 'Make Xena matrix for {} data of {}.'
print(message.format(self.xena_dtype, self.projects))
xena_matrix = self.raws2matrix(self.raw_data_list)
# Transformation done
print('\rSaving matrix to {} ...'.format(self.matrix), end='')
mkdir_p(self.matrix_dir)
xena_matrix.to_csv(self.matrix, sep='\t', encoding='utf-8')
print('\rXena matrix is saved at {}.'.format(self.matrix))
return self
def metadata(self):
"""Make "metadata.json" for Xena data loading.
A JSON of metadata will be created for the Xena matrix defined by the
``matrix`` property. The ``matrix`` property has to point to an
existing file when this ``metadata`` method is being called. The
metadata JSON file will be saved under the same directory as the
matrix file and named with a ".json" postfix appended to the filename
of Xena matrix. JSON templates for making metatdata are defined by the
``metadata_template`` property, and variables for rendering the
template are defined by the ``metadata_vars`` property.
Returns:
self: allow method chaining.
"""
message = 'Create metadata for {} data matrix of {}.'
print(message.format(self.xena_dtype, self.projects))
try:
assert os.path.isfile(self.matrix)
except AttributeError:
raise IOError(
'Xena matrix for this dataset is unknown; please create a '
'matrix or assign an existing matrix file to the "matrix" '
'property before making metadata.'
)
except AssertionError:
raise IOError('{} is not an existing file.'.format(self.matrix))
# Start to generate metadata.
# General jinja2 Variables
print('Creating metadata file ...', end='')
self._metadata = self.matrix + '.json'
with open(self._metadata, 'w') as f:
f.write(self.metadata_template.render(**self.metadata_vars))
print('\rMetadata JSON is saved at {}.'.format(self._metadata))
return self
class GDCOmicset(XenaDataset):
r"""GDCOmicset is derived from the ``XenaDataset`` class and represents for
a Xena matrix whose data is genomic data from GDC.
This class provides a set of default configurations for downloading and
transforming GDC data, as well as generating associated metadata for the
transformed Xena matrix. These default configurations are stored as
private constants, and they can be checked and/or changed through the
following attributes: ``gdc_release``, ``gdc_filter``, ``gdc_prefix``,
``download_map``, ``raws2matrix``, ``metadata_template``, and
``metadata_vars``.
Attributes:
projects (str or list): One (string) or a list of GDC's
"cases.project.project_id". All corresponding projects will be
included in this dataset.
xena_dtype (str): A dataset type supported by this class. To get a
list of supported types, use ``GDCOmicset.get_supported_dtype()``.
gdc_release (str): URL to the data release note for the dataset. It
will be used by the ``metadata`` method when making the metadata
for this dataset. It is highly recommended that this attribute is
set explicitly by the user so that it is guaranteed to match the
data (raw data) underlying this dataset. If it is not available,
the most recent data release will be queried and used.
gdc_filter (dict): A filter for querying GDC data underlying this
dataset. Each item of this dict means to be an "in" operation,
with its key being one GDC API available field and its value being
a string or a list of strings. It can be automatically derived
from ``projects`` and ``xena_dtype`` if it is not assigned
explicitly by the user when being used. Please check `GDC API
documentation
<https://docs.gdc.cancer.gov/API/Users_Guide/Search_and_Retrieval/#filters-specifying-the-query>`_
for details.
gdc_prefix (str): A GDC available file field whost value will be used
in the filename of corresponding download file. It will be used by
``download_map`` for making default download map. It can be
automatically mapped from ``xena_dtype`` if it is not assigned
explicitly by the user when being used. Please check
``download_map`` and `GDC API documentation
<https://docs.gdc.cancer.gov/API/Users_Guide/Search_and_Retrieval/#filters-specifying-the-query>`_
for details.
download_map (dict): A dict with the key being a URL for one raw data
to be downloaded and the value being a path for saving downloaded
raw data. If it hasn't been assigned explicitly by the user when
being used, it can be automatically generated by querying through
GDC API according to ``gdc_filter`` and ``gdc_prefix`` which are
based on ``projects`` and ``xena_dtype``. Please check
``gdc_filter`` for details about querying conditions. Filename of
data files, by default, will adapt a pattern of
"<value of gdc_prefix>.<GDC file UUID>.<file extension>"
It is worth noting that the data transformation process may need
an ID for every data files. The ``raws2matrix`` functions may
extract the ID from the filename (the first substring when
splitting the filename by "."). For example, Xena uses GDC's
"cases.samples.submitter_id" for sample ID. Therefore,
``gdc_prefix`` should be set to "cases.samples.submitter_id" so
that data files for each sample will be renamed to
"<cases.samples.submitter_id>.<file UUID>.<file extension>",
allowing the desired sample ID to be extracted correctly. Please
keep that in mind when trying to define your own download dict but
use default transformation settings (``raws2matrix``). Please
check the ``raws2matrix`` properties, as well as the ``transform``
method for details.
raws2matrix (callable): A function which accepts only one argument of
``raw_data_list``, merges them into one Xena matrix, and processes
the merged matrix if needed. Defaults, if needed, can be mapped
from ``xena_dtype``.
metadata_template (jinja2.environment.Template or str): A Jinja2
template for rendering metadata of this dataset. When setting this
attribute with a string, it will be taken as a path to the
template file and the corresponding template will be retrieved and
assigned to this attribute. Defaults, if needed, can be mapped
from ``xena_dtype``.
metadata_vars (dict): A dict of variables which will be used (by \*\*
unpacking) when rendering the ``metadata_template``. Defaults, if
needed, can be derived from corresponding matrix and ``projects``
and ``xena_dtype`` properties.
"""
# Map Xena dtype code to GDC data query dict
_XENA_GDC_DTYPE = {
'htseq_counts': {
'data_type': 'Gene Expression Quantification',
'analysis.workflow_type': 'HTSeq - Counts',
},
'htseq_fpkm': {
'data_type': 'Gene Expression Quantification',
'analysis.workflow_type': 'HTSeq - FPKM',
},
'htseq_fpkm-uq': {
'data_type': 'Gene Expression Quantification',
'analysis.workflow_type': 'HTSeq - FPKM-UQ',
},
'mirna': {
'data_type': 'miRNA Expression Quantification',
'analysis.workflow_type': 'BCGSC miRNA Profiling',
},
'mirna_isoform': {
'data_type': 'Isoform Expression Quantification',
'analysis.workflow_type': 'BCGSC miRNA Profiling',
},
'cnv': {
'data_type': 'Copy Number Segment',
'analysis.workflow_type': 'DNAcopy',
'cases.samples.sample_type_id': (
[
'01',
'02',
'03',
'04',
'05',
'06',
'07',
'08',
'09',
'15',
'16',
'20',
'40',
'50',
'60',
'61',
'99',
]
),
},
'masked_cnv': {
'data_type': 'Masked Copy Number Segment',
'analysis.workflow_type': 'DNAcopy',
'cases.samples.sample_type_id': (
[
'01',
'02',
'03',
'04',
'05',
'06',
'07',
'08',
'09',
'15',
'16',
'20',
'40',
'50',
'60',
'61',
'99',
]
),
},
'muse_snv': {
'data_type': 'Masked Somatic Mutation',
'analysis.workflow_type': 'MuSE Variant Aggregation and Masking',
},
'mutect2_snv': {
'data_type': 'Masked Somatic Mutation',
'analysis.workflow_type':
'MuTect2 Variant Aggregation and Masking',
},
'somaticsniper_snv': {
'data_type': 'Masked Somatic Mutation',
'analysis.workflow_type':
'SomaticSniper Variant Aggregation and Masking',
},
'varscan2_snv': {
'data_type': 'Masked Somatic Mutation',
'analysis.workflow_type':
'VarScan2 Variant Aggregation and Masking',
},
'gistic': {
'data_type': 'Gene Level Copy Number Scores',
'analysis.workflow_type': 'GISTIC - Copy Number Score',
},
'star_counts': {
'analysis.workflow_type': 'STAR - Counts',
},
'methylation27': {
'data_type': 'Methylation Beta Value',
'platform': 'Illumina Human Methylation 27',
},
'methylation450': {
'data_type': 'Methylation Beta Value',
'platform': 'Illumina Human Methylation 450',
},
}
# Prefix in filenames for downloaded files
_GDC_PREFIX = {
'htseq_counts': 'cases.samples.submitter_id',
'htseq_fpkm': 'cases.samples.submitter_id',
'htseq_fpkm-uq': 'cases.samples.submitter_id',
'mirna': 'cases.samples.submitter_id',
'mirna_isoform': 'cases.samples.submitter_id',
'cnv': 'cases.samples.submitter_id',
'masked_cnv': 'cases.samples.submitter_id',
'muse_snv': 'submitter_id',
'mutect2_snv': 'submitter_id',
'somaticsniper_snv': 'submitter_id',
'varscan2_snv': 'submitter_id',
'gistic': 'submitter_id',
'star_counts': 'cases.samples.submitter_id',
'methylation27': 'cases.samples.submitter_id',
'methylation450': 'cases.samples.submitter_id',
}
# Settings for making Xena matrix from GDC data
_RAWS2MATRIX_FUNCS = dict.fromkeys(
['htseq_counts', 'htseq_fpkm', 'htseq_fpkm-uq'],
functools.partial(
merge_sample_cols, header=None, index_name='Ensembl_ID'
),
)
_RAWS2MATRIX_FUNCS['mirna'] = functools.partial(
merge_sample_cols, header=0, usecols=[0, 2], index_name='miRNA_ID'
)
_RAWS2MATRIX_FUNCS['mirna_isoform'] = functools.partial(
merge_sample_cols,
header=0,
usecols=[1, 3],
index_name='isoform_coords',
)
_RAWS2MATRIX_FUNCS.update(dict.fromkeys(['cnv', 'masked_cnv'], merge_cnv))
_RAWS2MATRIX_FUNCS.update(
dict.fromkeys(
['muse_snv', 'mutect2_snv', 'somaticsniper_snv', 'varscan2_snv'],
snv_maf_matrix,
)
)
_RAWS2MATRIX_FUNCS['gistic'] = handle_gistic
_RAWS2MATRIX_FUNCS['star_counts'] = functools.partial(
merge_sample_cols,
header=0,
index_name='Ensembl_ID',
)
_RAWS2MATRIX_FUNCS.update(
dict.fromkeys(
['methylation27', 'methylation450'],
functools.partial(
merge_sample_cols,
header=0,
log2TF=False,
index_name='Composite Element REF',
),
)
)
@property
def xena_dtype(self):
return self.__xena_dtype
@xena_dtype.setter
def xena_dtype(self, xena_dtype):
if xena_dtype in self._XENA_GDC_DTYPE:
self.__xena_dtype = xena_dtype
else:
raise ValueError("Unsupported data type: {}".format(xena_dtype))
@classmethod
def get_supported_dtype(cls):
"""Return a list of dataset type codes supported by this class."""
return cls._XENA_GDC_DTYPE.keys()
@property
def gdc_release(self):
try:
return self.__gdc_release
except AttributeError:
data_release = gdc.search('status', typ='json')['data_release']
anchor = (
re.match(r'(Data Release [^\s]+)\s', data_release)
.group(1)
.replace(' ', '-')
.replace('.', '')
.lower()
)
self.__gdc_release = GDC_RELEASE_URL + '#' + anchor
return self.__gdc_release
@gdc_release.setter
def gdc_release(self, url):
self.__gdc_release = url
# Set default query filter dict for GDC API if it hasn't been set yet.
@property
def gdc_filter(self):
try:
assert self.__gdc_filter
return self.__gdc_filter
except (AttributeError, AssertionError):
self.__gdc_filter = {
'access': 'open',
'cases.project.project_id': self.projects,
}
self.__gdc_filter.update(self._XENA_GDC_DTYPE[self.xena_dtype])
return self.__gdc_filter
@gdc_filter.setter
def gdc_filter(self, filter_dict):
self.__gdc_filter = filter_dict
# Set default GDC field for prefixing filename of downloaded files.
@property
def gdc_prefix(self):
try:
assert self.__gdc_prefix
return self.__gdc_prefix
except (AttributeError, AssertionError):
self.__gdc_prefix = self._GDC_PREFIX[self.xena_dtype]
return self.__gdc_prefix
@gdc_prefix.setter
def gdc_prefix(self, gdc_field):
self.__gdc_prefix = gdc_field
@XenaDataset.download_map.getter
def download_map(self):
try:
assert self._download_map
return self._download_map
except (AttributeError, AssertionError):
fields = ['file_id', 'file_name', self.gdc_prefix]
try:
print('Searching for raw data ...', end='')
file_df = gdc.search(
'files', in_filter=self.gdc_filter, fields=fields
)
except Exception:
file_dict = {}
else:
file_df.set_index('file_id', drop=False, inplace=True)
file_dict = (
file_df[self.gdc_prefix].astype(str)
+ '.'
+ file_df['file_id'].astype(str)
+ '.'
+ file_df['file_name'].apply(gdc.get_ext)
).to_dict()
if not file_dict:
msg = '\rNo {} data found for project {}.'
gdc_dtype = self._XENA_GDC_DTYPE[self.xena_dtype]
print(
msg.format(
' - '.join(sorted(gdc_dtype.values())),
str(self.projects),
)
)
return file_dict
file_dict = {
'{}/data/{}'.format(gdc.GDC_API_BASE, uuid): os.path.join(
self.raw_data_dir, name
)
for uuid, name in file_dict.items()
}
self._download_map = file_dict
msg = '\r{} files found for {} data of {}.'
print(msg.format(len(file_dict), self.xena_dtype, self.projects))
return self._download_map
@property
def raws2matrix(self):
try:
return self.__raws2matrix
except Exception:
self.__raws2matrix = self._RAWS2MATRIX_FUNCS[self.xena_dtype]
return self.__raws2matrix
@raws2matrix.setter
def raws2matrix(self, func):
self.__raws2matrix = func
@XenaDataset.metadata_template.getter
def metadata_template(self):
try:
assert isinstance(
self._metadata_template, jinja2.environment.Template
)
return self._metadata_template
except (AttributeError, AssertionError):
template_json = METADATA_TEMPLATE[self.xena_dtype]
jinja2_env = jinja2.Environment(
loader=jinja2.PackageLoader('xena_gdc_etl', 'resources')
)
self._metadata_template = jinja2_env.get_template(template_json)
return self._metadata_template
@property
def metadata_vars(self):
try:
assert self.__metadata_vars and isinstance(
self.__metadata_vars, dict
)
return self.__metadata_vars
except (AttributeError, AssertionError):
matrix_date = time.strftime(
"%m-%d-%Y", time.gmtime(os.path.getmtime(self.matrix))
)
projects = ','.join(self.projects)
variables = {
'project_id': projects,
'date': matrix_date,
'gdc_release': self.gdc_release,
}
if projects in GDC_XENA_COHORT:
variables['xena_cohort'] = GDC_XENA_COHORT[projects]
else:
variables['xena_cohort'] = 'GDC ' + projects
try:
variables.update(METADATA_VARIABLES[self.xena_dtype])
except KeyError:
pass
# Data type specific jinja2 Variables
if self.xena_dtype in [
'muse_snv',
'mutect2_snv',
'somaticsniper_snv',
'varscan2_snv',
]:
try:
print(
'\rSearching the specific URL for raw MAF data ...',
end='',
)
res_df = gdc.search(
'files', in_filter=self.gdc_filter, fields='file_id'
)
if res_df['file_id'].shape == (1,):
variables['maf_uuid'] = str(res_df['file_id'][0])
except Exception:
message = (
'Fail to get a specific URL for the MAF file '
'for: matrix "{}"; "{}" data of cohort "{}".'
)
warnings.warn(
message.format(
self.matrix,
variables['project_id'],
variables['xena_cohort'],
),
stacklevel=2,
)
self.__metadata_vars = variables
return self.__metadata_vars
@metadata_vars.setter
def metadata_vars(self, variables):
self.__metadata_vars = variables
class GDCPhenoset(XenaDataset):
r"""GDCPhenoset is derived from the ``XenaDataset`` class and represents for
a Xena matrix whose data is phenotype data from GDC.
This class provides a set of default configurations for downloading and
transforming GDC data, as well as generating associated metadata for the
transformed Xena matrix. These default configurations are stored as
private constants, and they can be checked and/or changed through the
following attributes: ``gdc_release``, ``gdc_filter``, ``download_map``,
``raws2matrix``, ``metadata_template``, and ``metadata_vars``.
Attributes:
projects (str or list): One (string) or a list of GDC's
"cases.project.project_id". All corresponding projects will be
included in this dataset.
xena_dtype (str): One dataset type of "biospecimen", "clinical",
"raw_phenotype" or "GDC_phenotype". Defaults to None, for which
the class will guess the correct type to use from ``projects``.
gdc_release (str): URL to the data release note for the dataset. It
will be used by the ``metadata`` method when making the metadata
for this dataset. It is highly recommended that this attribute is
set explicitly by the user so that it is guaranteed to match the
data (raw data) underlying this dataset. If it is not available,
the most recent data release will be queried and used.
gdc_filter (dict): A filter for querying GDC data underlying this
dataset. Each item of this dict means to be an "in" operation,
with its key being one GDC API available field and its value being
a string or a list of strings. It can be automatically derived
from ``projects`` and ``xena_dtype`` if it is not assigned
explicitly by the user when being used. Please check `GDC API
documentation
<https://docs.gdc.cancer.gov/API/Users_Guide/Search_and_Retrieval/#filters-specifying-the-query>`_
for details.
download_map (dict): A dict with the key being a URL for one raw data
to be downloaded and the value being a path for saving downloaded
raw data. If it hasn't been assigned explicitly by the user when
being used, it can be automatically generated by querying through
GDC API according to ``gdc_filter`` which are based on
``projects`` and ``xena_dtype``. Filename of data files, by
default, will adapt a pattern of
"<data_category>.<GDC file UUID>.<file extension>"
It is worth noting the "<data_category>" prefix can be useful or
even necessary for ``transform`` method to apply correct
transformation to the file. "<data_category>" is closely related
to the format of the file.
metadata_template (jinja2.environment.Template or str): A Jinja2
template for rendering metadata of this dataset. When setting this
attribute with a string, it will be taken as a path to the
template file and the corresponding template will be retrieved and
assigned to this attribute. Defaults, if needed, can be mapped
from ``xena_dtype``.
metadata_vars (dict): A dict of variables which will be used (by \*\*
unpacking) when rendering the ``metadata_template``. Defaults, if
needed, can be derived from corresponding matrix and ``projects``
and ``xena_dtype`` properties.
"""
# Map Xena dtype code to GDC data query dict
_XENA_GDC_DTYPE = {
'biospecimen': {
'data_category': 'Biospecimen',
'data_format': ['BCR XML', 'XLSX'],
},
'clinical': {
'data_category': 'Clinical',
'data_format': ['BCR XML', 'XLSX'],
},
'raw_phenotype': {
'data_category': ['Biospecimen', 'Clinical'],
'data_format': ['BCR XML', 'XLSX'],
},
'GDC_phenotype': {
'data_category': ['Biospecimen', 'Clinical'],
'data_format': ['BCR XML', 'XLSX'],
},
}
# To resovle overlapping between raw data and API data, remove columns
# according to the following lists.
_API_DROPS = [
'id',
'case_id',
'state',
'created_datetime',
'updated_datetime',
'demographic_id.demographic',
'submitter_id.demographic',
'state.demographic',
'created_datetime.demographic',
'updated_datetime.demographic',
'diagnosis_id.diagnoses',
'submitter_id.diagnoses',
'state.diagnoses',
'created_datetime.diagnoses',
'updated_datetime.diagnoses',
'treatment_id.treatments.diagnoses',
'submitter_id.treatments.diagnoses',
'state.treatments.diagnoses',
'created_datetime.treatments.diagnoses',
'updated_datetime.treatments.diagnoses',
'exposure_id.exposures',
'submitter_id.exposures',
'state.exposures',
'created_datetime.exposures',
'updated_datetime.exposures',
'pathology_report_uuid.samples',
'state.project',
'released.project',
'sample_id.samples',
'created_datetime.samples',
'updated_datetime.samples',
'tissue_source_site_id.tissue_source_site',
]
_RAW_DROPS = [
'alcohol_history_documented',
'bcr_patient_barcode',
'bcr_patient_uuid',
'bcr_sample_uuid',
'composition',
'current_weight',
'days_to_birth',
'days_to_collection',
'days_to_death',
'days_to_last_followup',
'days_to_sample_procurement',
'ethnicity',
'freezing_method',
'gender',
'height',
'icd_10',
'icd_o_3_histology',
'icd_o_3_site',
'initial_weight',
'intermediate_dimension',
'is_ffpe',
'longest_dimension',
'oct_embedded',
'pathologic_stage',
'pathology_report_uuid',
'preservation_method',
'primary_diagnosis',
'race',
'sample_type',
'sample_type_id',
'shortest_dimension',
'state',
'time_between_clamping_and_freezing',
'time_between_excision_and_freezing',
'tissue_type',
'tumor_descriptor',
'tumor_tissue_site',
'vital_status',
]
@property
def xena_dtype(self):
return self.__xena_dtype
@xena_dtype.setter
def xena_dtype(self, xena_dtype):
if xena_dtype in self._XENA_GDC_DTYPE:
self.__xena_dtype = xena_dtype
else:
raise ValueError("Unsupported data type: {}".format(xena_dtype))
@property
def gdc_release(self):
try:
return self.__gdc_release
except AttributeError:
data_release = gdc.search('status', typ='json')['data_release']
anchor = (
re.match(r'(Data Release [^\s]+)\s', data_release)
.group(1)
.replace(' ', '-')
.replace('.', '')
.lower()
)
self.__gdc_release = GDC_RELEASE_URL + '#' + anchor
return self.__gdc_release
@gdc_release.setter
def gdc_release(self, url):
self.__gdc_release = url
# Set default query filter dict for GDC API if it hasn't been set yet.
@property
def gdc_filter(self):
try:
assert self.__gdc_filter
return self.__gdc_filter
except (AttributeError, AssertionError):
self.__gdc_filter = {
'access': 'open',
'cases.project.project_id': self.projects,
}
self.__gdc_filter.update(self._XENA_GDC_DTYPE[self.xena_dtype])
return self.__gdc_filter
@gdc_filter.setter
def gdc_filter(self, filter_dict):
self.__gdc_filter = filter_dict
@XenaDataset.download_map.getter
def download_map(self):
try:
assert self._download_map
return self._download_map
except (AttributeError, AssertionError):
fields = ['file_id', 'file_name', 'data_category']
try:
print('Searching for raw data ...', end='')
file_df = gdc.search(
'files', in_filter=self.gdc_filter, fields=fields
)
except Exception:
file_dict = {}
else:
file_df.set_index('file_id', drop=False, inplace=True)
file_dict = (
file_df['data_category'].astype(str)
+ '.'
+ file_df['file_id'].astype(str)
+ '.'
+ file_df['file_name'].apply(gdc.get_ext)
).to_dict()
if not file_dict:
msg = '\rNo {} data found for project {}.'
gdc_dtype = self._XENA_GDC_DTYPE[self.xena_dtype]
print(
msg.format(
' - '.join(sorted(gdc_dtype.values())),
str(self.projects),
)
)
return file_dict
file_dict = {
'{}/data/{}'.format(gdc.GDC_API_BASE, uuid): os.path.join(
self.raw_data_dir, name
)
for uuid, name in file_dict.items()
}
self._download_map = file_dict
msg = '\r{} files found for {} data of {}.'
print(msg.format(len(file_dict), self.xena_dtype, self.projects))
return self._download_map
@property
def metadata_vars(self):
try:
assert self.__metadata_vars and isinstance(
self.__metadata_vars, dict
)
return self.__metadata_vars
except (AttributeError, AssertionError):
matrix_date = time.strftime(
"%m-%d-%Y", time.gmtime(os.path.getmtime(self.matrix))
)
projects = ','.join(self.projects)
variables = {
'project_id': projects,
'date': matrix_date,
'gdc_release': self.gdc_release,
}
if projects in GDC_XENA_COHORT:
variables['xena_cohort'] = GDC_XENA_COHORT[projects]
else:
variables['xena_cohort'] = 'GDC ' + projects
self.__metadata_vars = variables
return self.__metadata_vars
@metadata_vars.setter
def metadata_vars(self, variables):
self.__metadata_vars = variables
def __init__(
self,
projects,
xena_dtype=None,
root_dir='.',
raw_data_dir=None,
matrix_dir=None,
):
self.projects = projects
if xena_dtype is not None:
self.xena_dtype = xena_dtype
elif all([i.startswith('TCGA-') for i in self.projects]):
self.xena_dtype = 'GDC_phenotype'
elif all([i.startswith('TARGET-') for i in self.projects]):
self.xena_dtype = 'clinical'
else:
warnings.warn(
'Caution: fail to guess phenotype data type for project '
'{}; use "raw_phenotype" as default.'.format(self.projects)
)
self.xena_dtype = 'raw_phenotype'
self.root_dir = root_dir
if matrix_dir is not None:
self.matrix_dir = matrix_dir
jinja2_env = jinja2.Environment(
loader=jinja2.PackageLoader('xena_gdc_etl', 'resources')
)
self.metadata_template = jinja2_env.get_template(
'template.phenotype.meta.json'
)
def transform(self):
"""Transform raw phenotype data into Xena matrix.
Raw clinical data and/or biospecimen data will first be transformed
separately. Then if needed (i.e. for TCGA projects) the clinical
matrix and biospecimen matrix will be merged on "cases.submitter_id"
and processed properly.
Returns:
self: allow method chaining.
"""
message = 'Make Xena matrix for {} data of {}.'
print(message.format(self.xena_dtype, self.projects))
total = len(self.raw_data_list)
count = 0
bio_dfs = []
clin_dfs = []
for path in self.raw_data_list:
count = count + 1
print('\rProcessing {}/{} file...'.format(count, total), end='')
sys.stdout.flush()
# `read_biospecimen` and `read_clinical` will check file format
try:
df = read_clinical(path)
if not df.empty:
clin_dfs.append(df)
except Exception:
try:
df = read_biospecimen(path)
bio_dfs.append(df)
except Exception:
raise TypeError('Fail to process file {}.'.format(path))
print('\rAll {} files have been processed. '.format(total))
try:
bio_matrix = (
pd.concat(bio_dfs, axis=0)
.replace(r'\r\n', ' ', regex=True)
.replace(r'^\s*$', np.nan, regex=True)
.dropna(axis=1, how='all')
.rename(columns={
'bcr_sample_barcode': 'submitter_id.samples',
'bcr_patient_barcode': 'submitter_id',
})
)
except Exception:
bio_matrix = pd.DataFrame()
try:
clin_matrix = (
pd.concat(clin_dfs, axis=0)
.replace(r'\r\n', ' ', regex=True)
.replace(r'^\s*$', np.nan, regex=True)
.dropna(axis=1, how='all')
.rename(columns={
'bcr_patient_barcode': 'submitter_id',
})
)
except Exception:
clin_matrix = pd.DataFrame()
if self.xena_dtype == 'clinical':
try:
xena_matrix = clin_matrix.set_index('bcr_patient_barcode')
except Exception:
xena_matrix = clin_matrix
print('\rMapping clinical info to individual samples...', end='')
cases = gdc.search(
'cases',
in_filter={'project.project_id': self.projects},
fields=['submitter_id', 'samples.submitter_id'],
typ='json',
)
cases_samples = [c for c in cases if 'samples' in c]
from pandas.io.json import json_normalize
cases_samples_map = json_normalize(
cases_samples,
'samples',
['submitter_id'],
meta_prefix='cases.',
)
if all([i.startswith('TCGA-') for i in self.projects]):
cases_samples_map = cases_samples_map.rename(
columns={
'submitter_id': 'sample_id',
'cases.submitter_id': 'bcr_patient_barcode',
}
)
xena_matrix = pd.merge(
xena_matrix.reset_index(),
cases_samples_map,
how='inner',
on='bcr_patient_barcode',
).set_index('sample_id')
elif all([i.startswith('TARGET-') for i in self.projects]):
cases_samples_map = cases_samples_map.rename(
columns={
'submitter_id': 'sample_id',
'cases.submitter_id': 'TARGET USI',
}
)
xena_matrix = pd.merge(
xena_matrix.reset_index(),
cases_samples_map,
how='inner',
on='TARGET USI',
).set_index('sample_id')
else:
warnings.warn('Fail to get per sample based clinical matrix.')
elif self.xena_dtype == 'biospecimen':
try:
xena_matrix = bio_matrix.set_index('bcr_sample_barcode')
except Exception:
xena_matrix = bio_matrix
if self.xena_dtype == 'GDC_phenotype':
# Query GDC API for GDC harmonized phenotype info
api_clin = gdc.get_samples_clinical(self.projects)
# Revert hierarchy order in column names
api_clin = api_clin.rename(
columns={
n: '.'.join(reversed(n.split('.')))
for n in api_clin.columns
}
)
if all([i.startswith('TCGA-') for i in self.projects]):
# Remove code 10, Blood Derived Normal, sample:
# https://gdc.cancer.gov/resources-tcga-users/tcga-code-tables/sample-type-codes
sample_mask = api_clin['submitter_id.samples'].map(
lambda s: s[-3:-1] not in ['10']
)
api_clin = api_clin[sample_mask].set_index(
'submitter_id.samples'
)
# Remove all empty columns
api_clin = api_clin.dropna(axis=1, how='all')
# For overlapping columns between raw data matrix and GDC'S
# API data matrix, use API data.
for c in self._API_DROPS:
try:
api_clin.drop(c, axis=1, inplace=True)
except Exception:
pass
for c in self._RAW_DROPS:
try:
clin_matrix.drop(c, axis=1, inplace=True)
except Exception:
pass
try:
bio_matrix.drop(c, axis=1, inplace=True)
except Exception:
pass
# Merge phenotype matrices from raw data and that from GDC's
# API
bio_columns = bio_matrix.columns.difference(
clin_matrix.columns
).insert(0, 'submitter_id')
xena_matrix = (
pd.merge(
bio_matrix[bio_columns],
api_clin.reset_index(),
how='outer',
on=['submitter_id.samples', 'submitter_id'],
)
.replace(r'^\s*$', np.nan, regex=True)
)
xena_matrix = (
pd.merge(
clin_matrix,
xena_matrix,
how='outer',
on='submitter_id',
)
.replace(r'^\s*$', np.nan, regex=True)
.set_index('submitter_id.samples')
.fillna(bio_matrix.set_index('submitter_id.samples'))
)
elif all([i.startswith('TARGET-') for i in self.projects]):
xena_matrix = api_clin.dropna(axis=1, how='all').set_index(
'submitter_id.samples'
)
else:
raise ValueError(
'Getting "GDC_phenotype" for a cohort with mixed TCGA and '
'TARGET projects is not currently suppported.'
)
print('Dropping TCGA-**-****-**Z samples ...')
xena_matrix = xena_matrix[~xena_matrix.index.str.endswith('Z')]
# Transformation done
print('\rSaving matrix to {} ...'.format(self.matrix), end='')
mkdir_p(self.matrix_dir)
xena_matrix.to_csv(self.matrix, sep='\t', encoding='utf-8')
print('\rXena matrix is saved at {}.'.format(self.matrix))
return self
class GDCAPIPhenoset(XenaDataset):
r"""GDCAPIPhenoset is derived from the ``XenaDataset`` class and represents
for a Xena matrix whose data is phenotype data from the GDC API only.
Attributes:
projects (str or list): One (string) or a list of GDC's
"cases.project.project_id". All corresponding projects will be
included in this dataset.
gdc_release (str): URL to the data release note for the dataset. It
will be used by the ``metadata`` method when making the metadata
for this dataset. It is highly recommended that this attribute is
set explicitly by the user so that it is guaranteed to match the
data (raw data) underlying this dataset. If it is not available,
the most recent data release will be queried and used.
metadata_vars (dict): A dict of variables which will be used (by \*\*
unpacking) when rendering the ``metadata_template``. Defaults, if
needed, can be derived from corresponding matrix and ``projects``
and ``xena_dtype`` properties.
"""
@property
def gdc_release(self):
try:
return self.__gdc_release
except AttributeError:
data_release = gdc.search('status', typ='json')['data_release']
anchor = (
re.match(r'(Data Release [^\s]+)\s', data_release)
.group(1)
.replace(' ', '-')
.replace('.', '')
.lower()
)
self.__gdc_release = GDC_RELEASE_URL + '#' + anchor
return self.__gdc_release
@gdc_release.setter
def gdc_release(self, url):
self.__gdc_release = url
@property
def metadata_vars(self):
try:
assert self.__metadata_vars and isinstance(
self.__metadata_vars, dict
)
return self.__metadata_vars
except (AttributeError, AssertionError):
matrix_date = time.strftime(
"%m-%d-%Y", time.gmtime(os.path.getmtime(self.matrix))
)
projects = ','.join(self.projects)
variables = {
'project_id': projects,
'date': matrix_date,
'gdc_release': self.gdc_release,
}
if projects == "GDC-PANCAN":
variables['xena_cohort'] = "GDC Pan-Cancer (PANCAN)"
elif projects in GDC_XENA_COHORT:
variables['xena_cohort'] = GDC_XENA_COHORT[projects]
else:
variables['xena_cohort'] = 'GDC ' + projects
variables["projects"] = projects
try:
variables.update(METADATA_VARIABLES[self.xena_dtype])
except KeyError:
pass
self.__metadata_vars = variables
return self.__metadata_vars
@metadata_vars.setter
def metadata_vars(self, variables):
self.__metadata_vars = variables
@XenaDataset.download_map.getter
def download_map(self):
print("Xena_phenotype is selected. No files will be downloaded.")
return {}
def __get_samples_clinical(self, projects, fields, expand):
"""Get info for all samples of ``projects`` and clinical info for all
cases of ``projects`` through GDC API.
Args:
projects (list or str): one (str) or a list of GDC "project_id"(s),
whose info will be returned. If None, projects will not be
filtered, i.e. info for all GDC projects will be returned.
Defaults to None.
fields (list or str): one (str) or a list of GDC "cases"
expand (list or str): one (str) or a list of GDC "expand"
Returns:
pandas.core.frame.DataFrame: A DataFrame organized by samples,
having info for all samples of ``projects``, as well as
corresponding clinical info.
"""
in_filter = {}
if projects is not None:
if isinstance(projects, list):
in_filter = {'project.project_id': projects}
else:
in_filter = {'project.project_id': [projects]}
res = gdc.search(
'cases',
in_filter=in_filter,
fields=fields,
expand=expand,
typ='json',
method='POST',
)
to_drops = set()
for ele in res:
to_drops |= set(gdc.get_to_drops(ele))
print(
"Dropping columns {} for {} projects".format(to_drops, projects)
)
reduced_no_samples_json = reduce_json_array(
[{k: v for k, v in d.items() if k != 'samples'} for d in res]
)
cases_df = pd.io.json.json_normalize(reduced_no_samples_json)
samples_df = pd.io.json.json_normalize(
[r for r in res if 'samples' in r],
'samples',
'id',
record_prefix='samples.',
)
merged_df = pd.merge(cases_df, samples_df, how='inner', on='id')
merged_df.drop(list(to_drops), axis=1, inplace=True)
return merged_df
def __init__(
self,
projects,
root_dir='.',
matrix_dir=None,
):
super(GDCAPIPhenoset, self).__init__(
projects, 'Xena_phenotype', root_dir, matrix_dir,
)
if any(
[
project not in CASES_FIELDS_EXPANDS.keys()
for project in self.projects
]
):
raise NotImplementedError(
"'Xena_phenotype' for {} project is not implemented".format(
projects
)
)
jinja2_env = jinja2.Environment(
loader=jinja2.PackageLoader("xena_gdc_etl", "resources")
)
self.metadata_template = jinja2_env.get_template(
"template.api_phenotype.meta.json"
)
def transform(self):
if self.projects == ["CPTAC-3"]:
xena_matrix = self.__get_samples_clinical(
projects=["CPTAC-3"],
fields=CASES_FIELDS_EXPANDS["CPTAC-3"]["fields"],
expand=CASES_FIELDS_EXPANDS["CPTAC-3"]["expand"],
)
xena_matrix = xena_matrix.set_index("samples.submitter_id")
elif self.projects == ["GDC-PANCAN"]:
xena_matrix = self.__get_samples_clinical(
projects=list(GDC_XENA_COHORT.keys()),
fields=CASES_FIELDS_EXPANDS["GDC-PANCAN"]["fields"],
expand=CASES_FIELDS_EXPANDS["GDC-PANCAN"]["expand"],
)
xena_matrix = (
xena_matrix
.dropna(axis=1, how="all")
.set_index("samples.submitter_id")
)
print('Dropping TCGA-**-****-**Z samples ...')
xena_matrix = xena_matrix[~xena_matrix.index.str.endswith('Z')]
print('\rSaving matrix to {} ...'.format(self.matrix), end='')
mkdir_p(self.matrix_dir)
xena_matrix.to_csv(self.matrix, sep='\t', encoding='utf-8')
print('\rXena matrix is saved at {}.'.format(self.matrix))
return self
class GDCSurvivalset(XenaDataset):
r"""GDCSurvivalset is derived from the ``XenaDataset`` class and represents
for a Xena matrix of GDC survival data for project(s) of interest.
In general, survival data is retrieved from GDC API's "analysis/survival"
endpoint. This class provides two default configurations, which can be
checked and/or changed through ``gdc_release`` and ``metadata_vars``, for
generating metadata for the transformed Xena matrix. The ``download`` and
``transform`` methods are overridden by methods specific for GDC survival
data.
Attributes:
gdc_release (str): URL to the data release note for the dataset. It
will be used by the ``metadata`` method when making the metadata
for this dataset. It is highly recommended that this attribute is
set explicitly by the user so that it is guaranteed to match the
GDC data underlying this dataset. If it is not available, the most
recent data release will be queried and used.
metadata_vars (dict): A dict of variables which will be used (by \*\*
unpacking) when rendering the ``metadata_template``. Defaults, if
needed, can be derived from corresponding matrix and the
``projects`` property.
"""
@property
def gdc_release(self):
try:
return self.__gdc_release
except AttributeError:
data_release = gdc.search('status', typ='json')['data_release']
anchor = (
re.match(r'(Data Release [^\s]+)\s', data_release)
.group(1)
.replace(' ', '-')
.replace('.', '')
.lower()
)
self.__gdc_release = GDC_RELEASE_URL + '#' + anchor
return self.__gdc_release
@gdc_release.setter
def gdc_release(self, url):
self.__gdc_release = url
@property
def metadata_vars(self):
try:
assert self.__metadata_vars and isinstance(
self.__metadata_vars, dict
)
return self.__metadata_vars
except (AttributeError, AssertionError):
matrix_date = time.strftime(
"%m-%d-%Y", time.gmtime(os.path.getmtime(self.matrix))
)
projects = ','.join(self.projects)
variables = {
'project_id': projects,
'date': matrix_date,
'gdc_release': self.gdc_release,
}
if projects in GDC_XENA_COHORT:
variables['xena_cohort'] = GDC_XENA_COHORT[projects]
else:
variables['xena_cohort'] = 'GDC ' + projects
self.__metadata_vars = variables
return self.__metadata_vars
@metadata_vars.setter
def metadata_vars(self, variables):
self.__metadata_vars = variables
def __init__(
self, projects, root_dir='.', raw_data_dir=None, matrix_dir=None
):
super(GDCSurvivalset, self).__init__(
projects, 'survival', root_dir, raw_data_dir, matrix_dir
)
jinja2_env = jinja2.Environment(
loader=jinja2.PackageLoader('xena_gdc_etl', 'resources')
)
self.metadata_template = jinja2_env.get_template(
'template.survival.meta.json'
)
def download(self):
"""Retrieve GDC API's survival data for project(s) in this dataset.
The survival data is queried and retrieved through GDC API's
"analysis/survival" endpoint for project(s) belonging to this dataset.
JSON query results are converted to a pandas DataFrame and saved as a
single tab-separated values ("<projects>.GDC_survival.tsv") file under
``raw_data_dir``.
Returns:
self: allow method chaining.
"""
survival = gdc.search(
'analysis/survival',
in_filter={'project.project_id': self.projects},
typ='json',
)['results'][0]['donors']
mkdir_p(self.raw_data_dir)
path = os.path.join(
self.raw_data_dir,
'{}.GDC_survival.tsv'.format(','.join(self.projects)),
)
pd.DataFrame(survival).set_index('id').to_csv(path, sep='\t')
self.raw_data_list = [path]
print(
'Raw {} data for {} is ready.'.format(
self.projects, self.xena_dtype
)
)
return self
def transform(self):
"""Transform GDC survival data according to Xena survival data spec
Only 1 GDC raw survival data (i.e. ``raw_data_list[0]``) will be read
and used by this transformation. Xena survival data has 4 columns,
which are "sample", "OS", "OS.time" and "_PATIENT". "OS"
corresponds to the "censored" column in GDC survival data; "OS.time"
corresponds to the "time" column in GDC survival data;
"_PATIENT" corresponds to the "submitter_id" column in GDC survival
data which is the case(patient)'s submitter ID; "sample" contains
"samples.submitter_id" for corresponding case(patient).
Returns:
self: allow method chaining.
"""
raw_df = pd.read_csv(self.raw_data_list[0], sep="\t")
# Transform GDC data according to Xena survival data spec
survival_df = raw_df.drop(
['project_id', 'survivalEstimate'], axis=1
).rename(
columns={
'censored': 'OS',
'time': 'OS.time',
'submitter_id': '_PATIENT',
}
)
survival_df['OS'] = (~survival_df['OS']).map(int)
# Get samples to case map
case_samples = gdc.search(
'cases',
in_filter={'project.project_id': self.projects},
fields='submitter_sample_ids',
typ='json',
)
case_samples = [c for c in case_samples if 'submitter_sample_ids' in c]
samples_df = pd.io.json.json_normalize(
case_samples, 'submitter_sample_ids', 'id'
).rename(columns={0: 'sample'})
sample_mask = samples_df['sample'].map(
lambda s: s[-3:-1] not in ['10']
)
samples_df = samples_df[sample_mask]
# Make sample indexed survival matrix
df = (
pd.merge(survival_df, samples_df, how='inner', on='id')
.drop('id', axis=1)
.set_index('sample')
)
print('Dropping TCGA-**-****-**Z samples ...')
df = df[~df.index.str.endswith('Z')]
mkdir_p(os.path.dirname(self.matrix))
df.to_csv(self.matrix, sep='\t')
print('\rXena matrix is saved at {}.'.format(self.matrix))
return self
def main():
print('A python module of Xena specific importing pipeline for GDC data.')
start = time.time()
dataset = GDCOmicset(
projects='TCGA-BRCA',
root_dir=r'/mnt/e/GitHub/xena-GDC-ETL/gitignore/test',
xena_dtype='methylation450',
)
dataset.download().transform().metadata()
print(time.time() - start)
if __name__ == '__main__':
main()
|
import torch.nn as nn
import torchvision.models as models
from .helper import init, make_standard_block
class VGG(nn.Module):
def __init__(self, use_bn=True): # Original implementation doesn't use BN
super(VGG, self).__init__()
if use_bn:
vgg = models.vgg19(pretrained=True)
layers_to_use = list(list(vgg.children())[0].children())[:23]
else:
vgg = models.vgg19_bn(pretrained=True)
layers_to_use = list(list(vgg.children())[0].children())[:33]
self.vgg = nn.Sequential(*layers_to_use)
self.feature_extractor = nn.Sequential(make_standard_block(512, 256, 3),
make_standard_block(256, 128, 3))
init(self.feature_extractor)
def forward(self, x):
x = self.vgg(x)
x = self.feature_extractor(x)
return x
|
from abc import abstractmethod
from copy import deepcopy
from hrl.salient_event.SalientEventClass import SalientEvent
import numpy as np
from gym import Wrapper
class GoalConditionedMDPWrapper(Wrapper):
"""
this abstract class is a wrapper that represents a goal conditioned MDP
user must specify a start and goal state, and a goal tolerance that represents
an ball around the goal state.
All the methods in the class should be batched
"""
def __init__(self, env, start_state, goal_state, goal_tolerance=0.6):
super().__init__(env)
self.env = env
self.start_state = start_state
self.goal_state = goal_state
self.salient_positions = [goal_state]
self.goal_tolerance = np.asarray(goal_tolerance)
# set initial states
self.cur_state = deepcopy(self.reset())
self.cur_done = False
def get_start_state_salient_event(self):
return SalientEvent(self.start_state, event_idx=0)
def get_original_target_events(self):
"""
return the original salient events
"""
saleint_events = [SalientEvent(pos, event_idx=i+1) for i, pos in enumerate(self.salient_positions)]
return saleint_events
@abstractmethod
def sparse_gc_reward_func(self, states, goals):
"""
always overwrite this function to provide the sparse reward func
"""
pass
@abstractmethod
def dense_gc_reward_func(self, states, goals):
"""
always overwrite this function to provide the dense reward func
"""
pass
def reset(self):
self.init_state = self.env.reset()
self.cur_state = deepcopy(self.init_state)
self.cur_done = False
return self.init_state
def step(self, action):
"""
overwrite the step function for gc MDP.
"""
next_state, reward, done, info = self.env.step(action)
self.cur_state = next_state
self.cur_done = done
return next_state, reward, done, info
@abstractmethod
def is_start_region(self, states):
"""
given a batch of states, return a boolean array indicating whether states are in start region
always overwrite this function
"""
pass
@abstractmethod
def is_goal_region(self, states):
"""
given a batch of states, return a boolean array indicating whether states are in goal region
always overwrite this function
"""
pass
@abstractmethod
def extract_features_for_initiation_classifier(self, states):
"""
take as input a batch of `states` of shape `N x D` and return the state
dimensions relevant for learning the initiation set classifier
(shape `N x K`; e.g, K=2 for navigation).
always overwrite this function
"""
pass
|
from typing import Tuple
import pytest
from hypothesis import given
from dendroid.hints import (Item,
Key)
from tests.utils import (Map,
map_value_to_key)
from . import strategies
@given(strategies.empty_maps_with_keys)
def test_base_case(map_with_key: Tuple[Map, Key]) -> None:
map_, key = map_with_key
with pytest.raises(KeyError):
map_.prev(key)
@given(strategies.non_empty_maps_with_their_items)
def test_step(map_with_value: Tuple[Map, Item]) -> None:
map_, (key, value) = map_with_value
assert value == map_.min() or map_value_to_key(map_, map_.prev(key)) < key
@given(strategies.non_empty_maps_with_external_keys)
def test_external_value(map_with_key: Tuple[Map, Key]) -> None:
map_, key = map_with_key
with pytest.raises(KeyError):
map_.prev(key)
@given(strategies.non_empty_maps)
def test_minimum_key(map_: Map) -> None:
minimum_key = min(map_)
with pytest.raises(KeyError):
map_.prev(minimum_key)
|
# Copyright (c) 2021 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial.
# Full text can be found in LICENSE.md
import os
import glob
import argparse
import cv2
import tqdm
from utils import *
import subprocess
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
from objectron.schema import annotation_data_pb2 as annotation_protocol
categories = [
"bike",
# "book",
# "bottle",
# "camera",
# "cereal_box",
# "chair",
# "cup",
# "laptop",
# "shoe"
]
def grab_frame(video_file, sequence,frame_ids):
"""Grab an image frame from the video file."""
try:
frames = []
# Numpy array width (img height) <- data.camera.image_resolution_width
width,height = sequence.frame_annotations[0].camera.image_resolution_width,sequence.frame_annotations[0].camera.image_resolution_height
# Debug only
# frame_ids=frame_ids[0:310]
frame_size = len(frame_ids)*width * height * 3
frame_filter='select=\''
for idx,frame_id in enumerate(frame_ids):
if idx==0:
frame_filter = frame_filter+f'eq(n\,{frame_id})'
else:
frame_filter = frame_filter+f'+eq(n\,{frame_id})'
frame_filter=frame_filter+'\''
command = [
'ffmpeg', '-i', video_file, '-f', 'image2pipe', '-vf', frame_filter,
'-pix_fmt', 'rgb24', '-vcodec', 'rawvideo', '-vsync', 'vfr', '-','-loglevel', 'panic','-hide_banner'
]
# pipe = subprocess.Popen(
# command, stdout=subprocess.PIPE, bufsize = 2* frame_size)
pipe = subprocess.Popen(
command, stdout=subprocess.PIPE)
current_frame=np.frombuffer(
pipe.stdout.read(frame_size), dtype='uint8')
if current_frame.size==frame_size:
current_frame = current_frame.reshape(int(current_frame.size/width/height/3),width, height, 3)
pipe.stdout.flush()
if frames == []:
frames=current_frame
else:
frames=frames+current_frame
warning_flag=0
else:
warning_flag=1
except:
return None, 1
return frames,warning_flag
def preprocess(annotation_file,category,opt):
# Read from the sequence info
try:
with open(annotation_file, 'rb') as pb:
sequence = annotation_protocol.Sequence()
sequence.ParseFromString(pb.read())
except:
with open('bug_list.txt', 'a+') as fp:
video_filename = annotation_file.replace('pbdata','MOV')
fp.write(video_filename)
fp.write('\n')
return
frame_id_list=list(range(0,len(sequence.frame_annotations),opt.frame_rate))
# Debug only
# frame_id_list=frame_id_list[0:300]
# Extract all the frames from the video
video_filename=annotation_file.replace('pbdata','MOV')
frame, warning_flag = grab_frame(video_filename, sequence,frame_id_list)
if warning_flag==1:
with open('bug_list.txt', 'a+') as fp:
fp.write(video_filename)
fp.write('\n')
else:
prefix=annotation_file[annotation_file.rfind('/')+1:annotation_file.rfind('.')]
if os.path.isdir(f'{opt.outf}/{category}/{prefix}'):
print(f'folder {opt.outf}/{category}/{prefix} exists')
else:
os.mkdir(f'{opt.outf}/{category}/{prefix}')
print(f'created folder {opt.outf}/{category}/{prefix}')
for i,frame_id in enumerate(frame_id_list):
# Debug only
# print(f"{str(frame_id).zfill(5)}")
# Save all the extracted images
im_bgr = cv2.cvtColor(frame[i], cv2.COLOR_RGB2BGR)
# resize to img width, img height
im_bgr=cv2.resize(im_bgr, (int(im_bgr.shape[1]/opt.resolution_ratio),int(im_bgr.shape[0]/opt.resolution_ratio)))
cv2.imwrite(f"{opt.outf}/{category}/{prefix}/{str(frame_id).zfill(5)}.png",im_bgr)
# Export .json file
warning_flag = export_to_ndds_file(
frame,
f"{opt.outf}/{category}/{prefix}/{str(frame_id).zfill(5)}.json",
# f"{opt.outf}/{category}/{prefix}.json",
sequence=sequence,
frame_id=frame_id,
opt=opt,
video_filename=video_filename
)
if warning_flag == 1:
with open('bug_list.txt', 'a+') as fp:
fp.write(video_filename)
fp.write('\n')
return
if __name__ == "__main__":
# User defined parameters
parser = argparse.ArgumentParser()
parser.add_argument(
'--outf',
default = 'outf_all',
help = "output filename inside output/"
)
parser.add_argument(
'--debug',
action = 'store_true',
default = False,
help = "Debug mode or not"
)
parser.add_argument(
'--resolution_ratio',
default = 2.4,
help = "ratio change from the original resolution (by default 1920*1440-> 800*600)"
)
parser.add_argument(
'--frame_rate',
type = int,
default = 1,
help = "skip intermediate frames"
)
parser.add_argument(
'--skip',
action = 'store_true',
default = False,
help = "skip the files which have been generated"
)
parser.add_argument(
'--test_flag',
action = 'store_true',
default = False,
help = "generate data for test"
)
parser.add_argument(
'--c',
nargs = '+',
default = categories,
help = "categories to be generated"
)
opt = parser.parse_args()
# Todo: Hack some parameters here, should be commented if not using Pycharm but .sh instead
# opt.debug = True
# opt.test_flag = True
opt.skip = True
opt.resolution_ratio = 2.4
if os.path.isdir(f'{opt.outf}'):
print(f'folder {opt.outf}/ exists')
else:
os.mkdir(f'{opt.outf}')
print(f'created folder {opt.outf}/')
# Target file
if opt.debug is True:
annotation_file = 'test/chair_batch-13_32.pbdata'
if os.path.isdir(f'{opt.outf}/debug'):
print(f'folder {opt.outf}/debug exists')
else:
os.mkdir(f'{opt.outf}/debug')
print(f'created folder {opt.outf}/debug')
if opt.skip == True:
prefix=annotation_file[annotation_file.rfind('/')+1:annotation_file.rfind('.')]
if glob.glob(f"{opt.outf}/debug/{prefix}/*.json"):
print('Skip it')
else:
preprocess(annotation_file,'debug',opt)
else:
preprocess(annotation_file,'debug',opt)
else:
for c in opt.c:
print(c)
if opt.test_flag == False:
suffix="train"
else:
suffix="test"
if os.path.isdir(f'{opt.outf}/{c}_{suffix}'):
print(f'folder {opt.outf}/{c}_{suffix} exists')
else:
os.mkdir(f'{opt.outf}/{c}_{suffix}')
print(f'created folder {opt.outf}/{c}_{suffix}')
with open(f"index" + f"/{c}_annotations_{suffix}",'r') as fopen:
target_list = fopen.read().splitlines()
# Read bug list
if os.path.exists(f"label/{c}/bug_list.txt"):
with open(f"label/{c}/bug_list.txt",'r') as fopen:
opt.bug_list = fopen.read().splitlines()
else:
opt.bug_list = []
for target in tqdm.tqdm(target_list):
print(target)
annotation_file = f"data/{c}/"+target.replace('/','_')+'.pbdata'
prefix = annotation_file[annotation_file.rfind('/')+1:annotation_file.rfind('.')]
if opt.skip == True:
# if glob.glob(f"{opt.outf}/{c}_{suffix}/{prefix}.png"):
if glob.glob(f"{opt.outf}/{c}_{suffix}/{prefix}/*.json"):
continue
preprocess(annotation_file,f"{c}_{suffix}",opt)
print('Done')
|
# Copied from chromium build/.
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper functions useful when writing scripts that integrate with GN.
The main functions are ToGNString and FromGNString which convert between
serialized GN veriables and Python variables.
To use in a random python file in the build:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, "build"))
import gn_helpers
Where the sequence of parameters to join is the relative path from your source
file to the build directory."""
import sys
class GNException(Exception):
pass
def ToGNString(value, allow_dicts = True):
"""Returns a stringified GN equivalent of the Python value.
allow_dicts indicates if this function will allow converting dictionaries
to GN scopes. This is only possible at the top level, you can't nest a
GN scope in a list, so this should be set to False for recursive calls."""
if sys.version_info.major < 3:
basestring_compat = basestring
else:
basestring_compat = str
if isinstance(value, basestring_compat):
if value.find('\n') >= 0:
raise GNException("Trying to print a string with a newline in it.")
return '"' + \
value.replace('\\', '\\\\').replace('"', '\\"').replace('$', '\\$') + \
'"'
if isinstance(value, unicode):
return ToGNString(value.encode('utf-8'))
if isinstance(value, bool):
if value:
return "true"
return "false"
if isinstance(value, list):
return '[ %s ]' % ', '.join(ToGNString(v) for v in value)
if isinstance(value, dict):
if not allow_dicts:
raise GNException("Attempting to recursively print a dictionary.")
result = ""
for key in sorted(value):
if not isinstance(key, basestring):
raise GNException("Dictionary key is not a string.")
result += "%s = %s\n" % (key, ToGNString(value[key], False))
return result
if isinstance(value, int):
return str(value)
raise GNException("Unsupported type when printing to GN.")
def FromGNString(input_string):
"""Converts the input string from a GN serialized value to Python values.
For details on supported types see GNValueParser.Parse() below.
If your GN script did:
something = [ "file1", "file2" ]
args = [ "--values=$something" ]
The command line would look something like:
--values="[ \"file1\", \"file2\" ]"
Which when interpreted as a command line gives the value:
[ "file1", "file2" ]
You can parse this into a Python list using GN rules with:
input_values = FromGNValues(options.values)
Although the Python 'ast' module will parse many forms of such input, it
will not handle GN escaping properly, nor GN booleans. You should use this
function instead.
A NOTE ON STRING HANDLING:
If you just pass a string on the command line to your Python script, or use
string interpolation on a string variable, the strings will not be quoted:
str = "asdf"
args = [ str, "--value=$str" ]
Will yield the command line:
asdf --value=asdf
The unquoted asdf string will not be valid input to this function, which
accepts only quoted strings like GN scripts. In such cases, you can just use
the Python string literal directly.
The main use cases for this is for other types, in particular lists. When
using string interpolation on a list (as in the top example) the embedded
strings will be quoted and escaped according to GN rules so the list can be
re-parsed to get the same result."""
parser = GNValueParser(input_string)
return parser.Parse()
def FromGNArgs(input_string):
"""Converts a string with a bunch of gn arg assignments into a Python dict.
Given a whitespace-separated list of
<ident> = (integer | string | boolean | <list of the former>)
gn assignments, this returns a Python dict, i.e.:
FromGNArgs("foo=true\nbar=1\n") -> { 'foo': True, 'bar': 1 }.
Only simple types and lists supported; variables, structs, calls
and other, more complicated things are not.
This routine is meant to handle only the simple sorts of values that
arise in parsing --args.
"""
parser = GNValueParser(input_string)
return parser.ParseArgs()
def UnescapeGNString(value):
"""Given a string with GN escaping, returns the unescaped string.
Be careful not to feed with input from a Python parsing function like
'ast' because it will do Python unescaping, which will be incorrect when
fed into the GN unescaper."""
result = ''
i = 0
while i < len(value):
if value[i] == '\\':
if i < len(value) - 1:
next_char = value[i + 1]
if next_char in ('$', '"', '\\'):
# These are the escaped characters GN supports.
result += next_char
i += 1
else:
# Any other backslash is a literal.
result += '\\'
else:
result += value[i]
i += 1
return result
def _IsDigitOrMinus(char):
return char in "-0123456789"
class GNValueParser(object):
"""Duplicates GN parsing of values and converts to Python types.
Normally you would use the wrapper function FromGNValue() below.
If you expect input as a specific type, you can also call one of the Parse*
functions directly. All functions throw GNException on invalid input. """
def __init__(self, string):
self.input = string
self.cur = 0
def IsDone(self):
return self.cur == len(self.input)
def ConsumeWhitespace(self):
while not self.IsDone() and self.input[self.cur] in ' \t\n':
self.cur += 1
def Parse(self):
"""Converts a string representing a printed GN value to the Python type.
See additional usage notes on FromGNString above.
- GN booleans ('true', 'false') will be converted to Python booleans.
- GN numbers ('123') will be converted to Python numbers.
- GN strings (double-quoted as in '"asdf"') will be converted to Python
strings with GN escaping rules. GN string interpolation (embedded
variables preceeded by $) are not supported and will be returned as
literals.
- GN lists ('[1, "asdf", 3]') will be converted to Python lists.
- GN scopes ('{ ... }') are not supported."""
result = self._ParseAllowTrailing()
self.ConsumeWhitespace()
if not self.IsDone():
raise GNException("Trailing input after parsing:\n " +
self.input[self.cur:])
return result
def ParseArgs(self):
"""Converts a whitespace-separated list of ident=literals to a dict.
See additional usage notes on FromGNArgs, above.
"""
d = {}
self.ConsumeWhitespace()
while not self.IsDone():
ident = self._ParseIdent()
self.ConsumeWhitespace()
if self.input[self.cur] != '=':
raise GNException("Unexpected token: " + self.input[self.cur:])
self.cur += 1
self.ConsumeWhitespace()
val = self._ParseAllowTrailing()
self.ConsumeWhitespace()
d[ident] = val
return d
def _ParseAllowTrailing(self):
"""Internal version of Parse that doesn't check for trailing stuff."""
self.ConsumeWhitespace()
if self.IsDone():
raise GNException("Expected input to parse.")
next_char = self.input[self.cur]
if next_char == '[':
return self.ParseList()
elif _IsDigitOrMinus(next_char):
return self.ParseNumber()
elif next_char == '"':
return self.ParseString()
elif self._ConstantFollows('true'):
return True
elif self._ConstantFollows('false'):
return False
else:
raise GNException("Unexpected token: " + self.input[self.cur:])
def _ParseIdent(self):
ident = ''
next_char = self.input[self.cur]
if not next_char.isalpha() and not next_char=='_':
raise GNException("Expected an identifier: " + self.input[self.cur:])
ident += next_char
self.cur += 1
next_char = self.input[self.cur]
while next_char.isalpha() or next_char.isdigit() or next_char=='_':
ident += next_char
self.cur += 1
next_char = self.input[self.cur]
return ident
def ParseNumber(self):
self.ConsumeWhitespace()
if self.IsDone():
raise GNException('Expected number but got nothing.')
begin = self.cur
# The first character can include a negative sign.
if not self.IsDone() and _IsDigitOrMinus(self.input[self.cur]):
self.cur += 1
while not self.IsDone() and self.input[self.cur].isdigit():
self.cur += 1
number_string = self.input[begin:self.cur]
if not len(number_string) or number_string == '-':
raise GNException("Not a valid number.")
return int(number_string)
def ParseString(self):
self.ConsumeWhitespace()
if self.IsDone():
raise GNException('Expected string but got nothing.')
if self.input[self.cur] != '"':
raise GNException('Expected string beginning in a " but got:\n ' +
self.input[self.cur:])
self.cur += 1 # Skip over quote.
begin = self.cur
while not self.IsDone() and self.input[self.cur] != '"':
if self.input[self.cur] == '\\':
self.cur += 1 # Skip over the backslash.
if self.IsDone():
raise GNException("String ends in a backslash in:\n " +
self.input)
self.cur += 1
if self.IsDone():
raise GNException('Unterminated string:\n ' + self.input[begin:])
end = self.cur
self.cur += 1 # Consume trailing ".
return UnescapeGNString(self.input[begin:end])
def ParseList(self):
self.ConsumeWhitespace()
if self.IsDone():
raise GNException('Expected list but got nothing.')
# Skip over opening '['.
if self.input[self.cur] != '[':
raise GNException("Expected [ for list but got:\n " +
self.input[self.cur:])
self.cur += 1
self.ConsumeWhitespace()
if self.IsDone():
raise GNException("Unterminated list:\n " + self.input)
list_result = []
previous_had_trailing_comma = True
while not self.IsDone():
if self.input[self.cur] == ']':
self.cur += 1 # Skip over ']'.
return list_result
if not previous_had_trailing_comma:
raise GNException("List items not separated by comma.")
list_result += [ self._ParseAllowTrailing() ]
self.ConsumeWhitespace()
if self.IsDone():
break
# Consume comma if there is one.
previous_had_trailing_comma = self.input[self.cur] == ','
if previous_had_trailing_comma:
# Consume comma.
self.cur += 1
self.ConsumeWhitespace()
raise GNException("Unterminated list:\n " + self.input)
def _ConstantFollows(self, constant):
"""Returns true if the given constant follows immediately at the current
location in the input. If it does, the text is consumed and the function
returns true. Otherwise, returns false and the current position is
unchanged."""
end = self.cur + len(constant)
if end > len(self.input):
return False # Not enough room.
if self.input[self.cur:end] == constant:
self.cur = end
return True
return False
|
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.tf_export import tf_export
@tf_export('masked_matmul')
def masked_matmul(a, b, mask_indices, transpose_a, transpose_b, name=None):
r"""Computes the product a * b, but only for indices (i, j) in mask_indices. The
result is stored in prod_values, a rank 1 tensor, such that for all i,
prod_values[i] = (a * b)[mask_indices[i, 0], mask_indices[i, 1]].
Note that the shapes of the input matrices a, b should be compatible (after
transposing as specified by the arguments transpose_a and transpose_b).
Input arguments:
Args:
a: A `Tensor` of type `float32`. A rank 2 tensor of shape [m, n].
b: A `Tensor` of type `float32`.
A rank 2 tensor of shape [s, t]. The inner dimensions of a and b should match
after transposition.
mask_indices: A `Tensor` of type `int64`.
A rank 2 tensor, of shape [nnz, 2] where nnz is the number of
non-zero elements in the output. The indices are not assumed to be in
lexicographic, or any particular order.
For all i, mask_indices[i, :] should represent a valid index of the product
matrix (a * b) (after transposition). That is:
mask_indices[i, 0] should be in [0, m) if !transpose_a, and in [0, n)
otherwise.
mask_indices[i, 1] should be in [0, t) if !transpose_b, and in [0, s)
otherwise.
transpose_a: A `Tensor` of type `bool`.
A boolean, specifies whether to transpose the matrix a.
transpose_b: A `Tensor` of type `bool`.
A boolean, specifies whether to transpose the matrix b.
Output arguments:
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
A rank 1 tensor of shape [nnz], representing the values of the
non-zero elements in the product, such that for all i,
prod_values[i] = (a * b)[mask_indices[i, 0], mask_indices[i, 1]].
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"MaskedMatmul", a=a, b=b, mask_indices=mask_indices,
transpose_a=transpose_a, transpose_b=transpose_b, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"MaskedMatmul", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "MaskedMatmul",
name, _ctx._post_execution_callbacks, a, b, mask_indices, transpose_a,
transpose_b)
return _result
except _core._FallbackException:
return masked_matmul_eager_fallback(
a, b, mask_indices, transpose_a, transpose_b, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def masked_matmul_eager_fallback(a, b, mask_indices, transpose_a, transpose_b, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function masked_matmul
"""
_ctx = ctx if ctx else _context.context()
a = _ops.convert_to_tensor(a, _dtypes.float32)
b = _ops.convert_to_tensor(b, _dtypes.float32)
mask_indices = _ops.convert_to_tensor(mask_indices, _dtypes.int64)
transpose_a = _ops.convert_to_tensor(transpose_a, _dtypes.bool)
transpose_b = _ops.convert_to_tensor(transpose_b, _dtypes.bool)
_inputs_flat = [a, b, mask_indices, transpose_a, transpose_b]
_attrs = None
_result = _execute.execute(b"MaskedMatmul", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"MaskedMatmul", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_wals_compute_partial_lhs_and_rhs_outputs = ["partial_lhs", "partial_rhs"]
_WALSComputePartialLhsAndRhsOutput = _collections.namedtuple(
"WALSComputePartialLhsAndRhs", _wals_compute_partial_lhs_and_rhs_outputs)
@tf_export('wals_compute_partial_lhs_and_rhs')
def wals_compute_partial_lhs_and_rhs(factors, factor_weights, unobserved_weights, input_weights, input_indices, input_values, input_block_size, input_is_transpose, name=None):
r"""Computes the partial left-hand side and right-hand side of WALS update.
Args:
factors: A `Tensor` of type `float32`. Matrix of size m * k.
factor_weights: A `Tensor` of type `float32`.
Vector of size m. Corresponds to column weights
unobserved_weights: A `Tensor` of type `float32`.
Scalar. Weight for unobserved input entries.
input_weights: A `Tensor` of type `float32`.
Vector of size n. Corresponds to row weights.
input_indices: A `Tensor` of type `int64`.
Indices for the input SparseTensor.
input_values: A `Tensor` of type `float32`.
Values for the input SparseTensor.
input_block_size: A `Tensor` of type `int64`.
Scalar. Number of rows spanned by input.
input_is_transpose: A `Tensor` of type `bool`.
If true, logically transposes the input for processing.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (partial_lhs, partial_rhs).
partial_lhs: A `Tensor` of type `float32`. 3-D tensor with size input_block_size x k x k.
partial_rhs: A `Tensor` of type `float32`. Matrix with size input_block_size x k.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"WALSComputePartialLhsAndRhs", factors=factors,
factor_weights=factor_weights, unobserved_weights=unobserved_weights,
input_weights=input_weights, input_indices=input_indices,
input_values=input_values, input_block_size=input_block_size,
input_is_transpose=input_is_transpose, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"WALSComputePartialLhsAndRhs", _inputs_flat, _attrs, _result, name)
_result = _WALSComputePartialLhsAndRhsOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"WALSComputePartialLhsAndRhs", name, _ctx._post_execution_callbacks,
factors, factor_weights, unobserved_weights, input_weights,
input_indices, input_values, input_block_size, input_is_transpose)
_result = _WALSComputePartialLhsAndRhsOutput._make(_result)
return _result
except _core._FallbackException:
return wals_compute_partial_lhs_and_rhs_eager_fallback(
factors, factor_weights, unobserved_weights, input_weights,
input_indices, input_values, input_block_size, input_is_transpose,
name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def wals_compute_partial_lhs_and_rhs_eager_fallback(factors, factor_weights, unobserved_weights, input_weights, input_indices, input_values, input_block_size, input_is_transpose, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function wals_compute_partial_lhs_and_rhs
"""
_ctx = ctx if ctx else _context.context()
factors = _ops.convert_to_tensor(factors, _dtypes.float32)
factor_weights = _ops.convert_to_tensor(factor_weights, _dtypes.float32)
unobserved_weights = _ops.convert_to_tensor(unobserved_weights, _dtypes.float32)
input_weights = _ops.convert_to_tensor(input_weights, _dtypes.float32)
input_indices = _ops.convert_to_tensor(input_indices, _dtypes.int64)
input_values = _ops.convert_to_tensor(input_values, _dtypes.float32)
input_block_size = _ops.convert_to_tensor(input_block_size, _dtypes.int64)
input_is_transpose = _ops.convert_to_tensor(input_is_transpose, _dtypes.bool)
_inputs_flat = [factors, factor_weights, unobserved_weights, input_weights, input_indices, input_values, input_block_size, input_is_transpose]
_attrs = None
_result = _execute.execute(b"WALSComputePartialLhsAndRhs", 2,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"WALSComputePartialLhsAndRhs", _inputs_flat, _attrs, _result, name)
_result = _WALSComputePartialLhsAndRhsOutput._make(_result)
return _result
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "MaskedMatmul"
# input_arg {
# name: "a"
# type: DT_FLOAT
# }
# input_arg {
# name: "b"
# type: DT_FLOAT
# }
# input_arg {
# name: "mask_indices"
# type: DT_INT64
# }
# input_arg {
# name: "transpose_a"
# type: DT_BOOL
# }
# input_arg {
# name: "transpose_b"
# type: DT_BOOL
# }
# output_arg {
# name: "prod_values"
# type: DT_FLOAT
# }
# }
# op {
# name: "WALSComputePartialLhsAndRhs"
# input_arg {
# name: "factors"
# type: DT_FLOAT
# }
# input_arg {
# name: "factor_weights"
# type: DT_FLOAT
# }
# input_arg {
# name: "unobserved_weights"
# type: DT_FLOAT
# }
# input_arg {
# name: "input_weights"
# type: DT_FLOAT
# }
# input_arg {
# name: "input_indices"
# type: DT_INT64
# }
# input_arg {
# name: "input_values"
# type: DT_FLOAT
# }
# input_arg {
# name: "input_block_size"
# type: DT_INT64
# }
# input_arg {
# name: "input_is_transpose"
# type: DT_BOOL
# }
# output_arg {
# name: "partial_lhs"
# type: DT_FLOAT
# }
# output_arg {
# name: "partial_rhs"
# type: DT_FLOAT
# }
# }
_op_def_lib = _InitOpDefLibrary(b"\na\n\014MaskedMatmul\022\005\n\001a\030\001\022\005\n\001b\030\001\022\020\n\014mask_indices\030\t\022\017\n\013transpose_a\030\n\022\017\n\013transpose_b\030\n\032\017\n\013prod_values\030\001\n\336\001\n\033WALSComputePartialLhsAndRhs\022\013\n\007factors\030\001\022\022\n\016factor_weights\030\001\022\026\n\022unobserved_weights\030\001\022\021\n\rinput_weights\030\001\022\021\n\rinput_indices\030\t\022\020\n\014input_values\030\001\022\024\n\020input_block_size\030\t\022\026\n\022input_is_transpose\030\n\032\017\n\013partial_lhs\030\001\032\017\n\013partial_rhs\030\001")
|
import time
import sys
from signalrcore.hub_connection_builder import HubConnectionBuilder
def input_with_default(input_text, default_value):
value = input(input_text.format(default_value))
return default_value if value is None or value.strip() == "" else value
server_url = input_with_default('Enter your server url(default: {0}): ', "ws://localhost:57957/streamHub")
hub_connection = HubConnectionBuilder().with_url(server_url).build()
hub_connection.start()
time.sleep(10)
def bye(error, x):
if error:
print("error {0}".format(x))
else:
print("complete! ")
global hub_connection
hub_connection.stop()
sys.exit(0)
hub_connection.stream(
"Counter",
[10, 500]).subscribe({
"next": lambda x: print("next callback: ", x),
"complete": lambda x: bye(False, x),
"error": lambda x: bye(True, x)
})
|
from django.urls import path
from . import views
urlpatterns = [
path('',views.index,name="index"),
path('list',views.res_list,name="list"),
path('search',views.res_search,name="search"),
path('restaurants/<int:id>',views.res_detail,name="detail"),
path('create_reservation/<int:id>',views.create_reservation,name="reservation"),
path('my_reservations/active',views.my_reservations_active,name="history"),
path('my_reservations/history',views.my_reservations_history,name="history"),
path('reservations/active',views.get_reservations_active,name="reservations"),
path('reservations/completed',views.get_reservations_completed,name="reservations"),
path('reservation_accept/<int:id>',views.reservation_accept,name="accept"),
path('reservation_reject/<int:id>',views.reservation_reject,name="reject"),
path('reservation_delete/<int:id>',views.reservation_delete,name="reservarition_delete"),
path('reservation_edit/<int:id>',views.reservation_edit,name="reservarition_edit"),
path('edit_details/<int:id>',views.edit_restaurant_details,name="edit_detail"),
path('edit_menu/<int:id>',views.edit_restaurant_menu,name="edit_menu"),
path('edit_profile/<int:id>',views.edit_restaurant_profile,name="edit_profile"),
path('edit_about/<int:id>',views.edit_restaurant_about,name="edit_about"),
path('add_relatedimg/<int:id>',views.add_related_image,name="add_related"),
path('delete_restaurant/<int:id>',views.delete_restaurant,name="delete"),
path('create_review/<int:id>',views.create_review,name="review"),
] |
from django.db.models import Count, Max, F, Value
from codice import settings
from files.models import File
from repos.models import Branch, Repository
hotspot_threshold = settings.CODICE_HOT_SPOTS_THRESHOLD
def max_changes(repo: Repository, branch: Branch):
qs = File.objects.filter(repository=repo, branch=branch, is_code=True).values('id')\
.aggregate(max_change=Max('changes'))
return (qs['max_change'] or 0.0) + 1.0
def get_hotspots(repo: Repository, branch: Branch):
mc = max_changes(repo, branch)
return File.objects.filter(repository=repo, branch=branch, is_code=True).values('id', 'filename') \
.annotate(percent=(F('changes')*Value(100.0))/Value(mc))\
.filter(percent__gt=hotspot_threshold).order_by('-changes')
def count_hotspots(repo: Repository, branch: Branch):
return get_hotspots(repo, branch).count()
|
from cupyx.scipy.ndimage.filters import correlate # NOQA
from cupyx.scipy.ndimage.filters import convolve # NOQA
from cupyx.scipy.ndimage.interpolation import affine_transform # NOQA
from cupyx.scipy.ndimage.interpolation import map_coordinates # NOQA
from cupyx.scipy.ndimage.interpolation import rotate # NOQA
from cupyx.scipy.ndimage.interpolation import shift # NOQA
from cupyx.scipy.ndimage.interpolation import zoom # NOQA
|
# -*- coding: utf-8 -*-
import os,sqlite3
db_file = os.path.join(os.path.dirname(__file__),'test.db')
if os.path.isfile(db_file):
os.remove(db_file)
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
users = [('A-001', 'Adam', 95),
('A-002', 'Bart', 62),
('A-003', 'Lisa', 78)]
cursor.execute('create table user(id varchar(20) primary key, name varchar(20), score int)')
cursor.execute(r"insert into user values('A-001', 'Adam', 95)")
cursor.execute(r"insert into user values ('A-002', 'Bart', 62)")
cursor.execute(r"insert into user values ('A-003', 'Lisa', 78)")
# cursor.executemany(r"insert into user values(?,?,?)",users)
def get_score_in(low,high):
try:
cursor.execute(r"select name from user where score>=? and score<=? order by score",(low,high))
values = cursor.fetchall()
name = [t[0] for t in values]
print('get_score_in(%s,%s) == %s, get_score_in(%s,%s)' % (low,high,name,low,high))
finally:
cursor.close()
conn.close()
return name
# get_score_in(80, 95)
# get_score_in(60, 80)
get_score_in(60, 100) |
import requests
import re
class GenderResponse:
"""
A class that acts as a wrapper for all responses which classify a name into a gender.
Attributes:
ID (str): The ID of the request
first_name (str): The first name that was classified
last_name (str): The last name that was classified
likely_gender (str): The likely gender inferred from input (male or female)
gender_scale (int): The gender scale inferred from input (-1 or 1)
score (float): The score of the precision of the gender information provided ranging from 0 to 50
probability_calibrated (float): The probability that the gender information provided is correct
"""
ID = ""
first_name = ""
last_name = ""
likely_gender = ""
gender_scale = ""
score = 0.0
probability_calibrated = 0.0
def __init__(self, api_response: dict):
""" Constructor
Args:
api_response (dict): the json format (dict) of the NamsorAPI response received from a GET/POST request
"""
self.ID = api_response['id']
self.first_name = api_response['firstName']
self.last_name = api_response['lastName']
self.likely_gender = api_response['likelyGender']
self.gender_scale = int(api_response['genderScale'])
self.score = float(api_response['score'])
self.probability_calibrated = float(
api_response['probabilityCalibrated'])
# Initializing the GenderResponse attributes with values of corresponding attribute names or keys of the API responsein json format
class GenderFullResponse:
"""
A class that acts as a wrapper for all responses of POST requests which classify a full name into a gender.
Attributes:
ID (str): The ID of the request
name (str): The name that was classified
likely_gender (str): The likely gender inferred from input (male or female)
gender_scale (int): The gender scale inferred from input (-1 or 1)
score (float): The score of the precision of the gender information provided ranging from 0 to 50
"""
ID = ""
name = ""
likely_gender = ""
gender_scale = ""
score = 0.0
def __init__(self, api_response: dict):
""" Constructor
Args:
api_response (dict): the json format (dict) of the NamsorAPI response received from a GET/POST request
"""
self.ID = api_response['id']
self.name = api_response['name']
self.likely_gender = api_response['likelyGender']
self.gender_scale = int(api_response['genderScale'])
self.score = float(api_response['score'])
class OriginResponse:
"""
A class that acts as a wrapper for all responses which infer the origin of an individual with a particular name.
Attributes:
ID (str): The ID of the request
first_name (str): The first name that was classified
last_name (str): The last name that was classified
likely_gender (str): The likely gender inferred from input (male or female)
gender_scale (int): The gender scale inferred from input (-1 or 1)
score (float): The score of the precision of the gender information provided ranging from 0 to 50
country_origin (str): The most likely country that the individual with this name belongs to.
country_origin_alt (str): A potential alternative country that the individual with this name belongs to.
probability_calibrated (float): The probability that the classification is correct
region_origin (str):
top_region_origin (str):
sub_region_origin (str):
"""
ID = ""
first_name = ""
last_name = ""
likely_gender = ""
score = 0.0
# Maybe use the country codes class here?
country_origin = ""
country_origin_alt = ""
region_origin = ""
top_region_origin = ""
sub_region_origin = ""
def __init__(self, api_response: dict):
""" Constructor
Args:
api_response (dict): the json format (dict) of the NamsorAPI response received from a GET/POST request
"""
self.ID = api_response['id']
self.first_name = api_response['firstName']
self.last_name = api_response['lastName']
self.country_origin = api_response['countryOrigin']
self.country_origin_alt = api_response['countryOriginAlt']
self.score = float(api_response['score'])
self.region_origin = api_response['regionOrigin']
self.top_region_origin = api_response['topRegionOrigin']
self.sub_region_origin = api_response['subRegionOrigin']
class CountryResponse:
def __init__(self, api_response: dict):
""" Constructor
Args:
api_response (dict): the json format (dict) of the NamsorAPI response received from a GET/POST request
"""
self.ID = api_response['id']
self.name = api_response['name']
self.score = float(api_response['score'])
self.country = api_response['country']
self.country_alt = api_response['countryAlt']
self.region = api_response['region']
self.top_region = api_response['topRegion']
self.sub_region = api_response['subRegion']
class RaceEthnicityResponse:
"""
A class that acts as a wrapper for all responses which infer the ethnicity of an individual with a particular name.
Attributes:
ID (str): The ID of the request
first_name (str): The first name that was classified
last_name (str): The last name that was classified
score (float): The score of the precision of the gender information provided ranging from 0 to 50
race_ethnicity (str): The most likely ethnicity of an individual with said name.
race_ethnicity_alt (str): A potential alternative ethnicity that the individual can have.
"""
ID = ""
first_name = ""
last_name = ""
race_ethnicity_alt = "W_NL"
race_ethnicity = "W_NL"
score = 0
def __init__(self, api_response: dict):
""" Constructor
Args:
api_response (dict): the json format (dict) of the NamsorAPI response received from a GET/POST request
"""
self.ID = api_response['id']
self.first_name = api_response['firstName']
self.last_name = api_response['lastName']
self.race_ethnicity = api_response['raceEthnicity']
self.race_ethnicity_alt = api_response['raceEthnicityAlt']
self.score = api_response['score']
class DiasporaResponse:
"""
A class that acts as a wrapper for the diaspora response object.
Attributes:
ID (str): The ID of the request
first_name (str): The first name that was classified
last_name (str): The last name that was classified
score (float): The score of the precision of the gender information provided ranging from 0 to 50
race_ethnicity (str): The most likely ethnicity of an individual with said name.
race_ethnicity_alt (str): A potential alternative ethnicity that the individual can have.
lifted (str):
country (str): The ISO2 code of the country to which an indiviudal with this name liekly belongs to.
"""
ID = ""
first_name = ""
last_name = ""
score = 0.0
ethnicity_alt = ""
ethnicity = ""
lifted = ""
country = ""
def __init__(self, api_response: dict):
""" Constructor
Args:
api_response (dict): the json format (dict) of the NamsorAPI response received from a GET/POST request
"""
self.ID = api_response['id']
self.first_name = api_response['firstName']
self.last_name = api_response['lastName']
self.score = float(api_response['score'])
self.ethnicity_alt = api_response['ethnicityAlt']
self.ethnicity = api_response['ethnicity']
self.lifted = api_response['lifted']
self.country = api_response['countryIso2']
class ParseNameResponse:
"""
A class that acts as a wrapper for the response object of all the parseName routes.
Attributes:
ID (str): The ID of the request
name (str): The full name to be parsed
name_parser_type (NameParserTypeWrapper): An object which contains data about how many words from the full name were classified into first names and last names.
name_parser_type_alt (NameParserTypeWrapper): An object which contains another potential form of data about how many words from the full name were classified into first names and last names.
first_last_name (FirstLastNameWrapper): An object which contains the most likely first name/ last name format that the full name would be arranged in.
score (float): The score of the precision of the gender information provided ranging from 0 to 50
"""
ID = ""
name = ""
name_parser_type = ""
name_parser_type_alt = ""
first_last_name = ""
score = ""
def __init__(self, api_response: dict):
""" Constructor
Args:
api_response (dict): the json format (dict) of the NamsorAPI response received from a GET/POST request
"""
self.ID = api_response['id']
self.name = api_response['name']
self.name_parser_type = NameParserTypeWrapper(
api_response['nameParserType'])
self.name_parser_type_alt = NameParserTypeWrapper(
api_response['nameParserTypeAlt'])
self.first_last_name = FirstLastNameWrapper(
api_response['firstLastName'])
self.score = api_response['score']
class NameParserTypeWrapper:
"""A class which acts as a wrapper for the nameParserType key in the parseName response objects.
Attributes:
first_name_count (int): The number of words classified as first names.
last_name_count (int): The number of words classified as last names.
"""
_raw_string = ""
first_name_count = 0
last_name_count = 0
def __init__(self, raw_string: str):
""" Constructor
Args:
raw_string (str): The raw string in the response object representing the value of the 'nameParserType' key.
"""
self.raw_string = raw_string
if raw_string != None and raw_string != "null":
regex = ""
if raw_string.startswith("LN"):
regex = re.fullmatch(r"LN([0-9]+)FN([0-9]+)", raw_string)
else:
regex = re.fullmatch(r"FN([0-9]+)LN([0-9]+)", raw_string)
self.first_name_count = int(regex.groups()[0])
self.last_name_count = int(regex.groups()[1])
print(self)
def __repr__(self):
return self.raw_string
def __eq__(self, other):
return self.first_name_count == other.first_name_count \
and self.last_name_count == other.last_name_count \
and self.raw_string == str(other)
class FirstLastNameWrapper:
"""A class which acts as a wrapper for the value of the 'firstLastName' key in the parseName response objects.
Attributes:
first_name (str): The portion of the full name classified as the first name.
last_name_count (int): The portion of the full name classified as the last name.
"""
ID = ""
first_name = ""
last_name = ""
def __init__(self, raw_dict: dict):
""" Constructor
Args:
raw_dict (dict): The raw json object in the response object representing the value of the 'firstLastName' key.
"""
self.ID = raw_dict['id']
self.first_name = raw_dict['firstName']
self.last_name = raw_dict['lastName']
def __repr__(self):
return f'First Name: {self.first_name} | Last Name: {self.last_name}'
def __eq__(self, other):
return self.first_name == other.first_name \
and self.last_name == other.last_name \
and self.ID == other.ID
|
import collections
import sys
import pip
import json
import changelogs
import urllib.request
from packaging import version
from urllib.error import HTTPError, URLError
from distutils.version import LooseVersion
SECURITY_NOTICE_KEYWORDS = [
'security', 'vulnerability', 'cve', 'xss', 'sql injection',
]
DISPLAY_TABLE_LABELS = {
'status': 'Status',
'package': 'Package',
'installed': 'Installed',
'latest': 'Latest',
'versions': 'Available Updates',
'notices': 'Security Notices',
}
class PackageVersion(LooseVersion):
def _cmp(self, other):
try:
v1 = version.parse(self.vstring)
v2 = version.parse(str(other))
if v1 < v2:
return -1
elif v1 > v2:
return 1
else:
return 0
except:
return super(PackageVersion, self)._cmp(other)
def get_pkg_info(package):
f = urllib.request.urlopen(
'https://pypi.python.org/pypi/%s/json' % package)
return json.loads(f.readall().decode('utf-8'))
def get_versions(package):
return [(version, package) for version, package in sorted(
package['releases'].items(), key=lambda k: PackageVersion(k[0]), reverse=True)]
def get_version_range(package, installed_version):
compare_version = LooseVersion(installed_version)
return [(version, package) for version, package in get_versions(package)
if PackageVersion(version) > compare_version]
def get_latest_version(package):
versions = get_versions(package)
return versions[0][0] if len(versions) else None
def get_version_diff(package, version_range):
logs = get_changelogs(package.project_name)
versions = []
for version, version_package in version_range:
for release_version, changelog in logs:
if release_version == version and PackageVersion(package.version) < PackageVersion(version):
versions.append((version, changelog))
return versions if len(versions) else []
def get_pkg_security_releases(version_diff):
versions = []
for version, changelog in version_diff:
if _string_contains_security_keywords(changelog):
versions.append((version, changelog))
return versions if len(versions) else None
def get_changelogs(package_name):
versions = changelogs.get(package_name, vendor='pypi')
ret = [(v, versions[v])
for v in sorted(versions.keys(), key=PackageVersion, reverse=True)]
return ret
def get_updates(package, changelog_scan=True):
ret = {
'package': package.project_name,
'installed': package.version,
'latest': None,
}
try:
pkg = get_pkg_info(package.project_name)
latest = get_latest_version(pkg)
if latest:
ret['latest'] = latest
if latest > package.version:
version_range = get_version_range(pkg, package.version)
for version, version_info in version_range:
ret['versions'] = ', '.join(
[version for version, version_info in version_range])
if changelog_scan:
ret['changelogs'] = {}
version_diff = get_version_diff(package, version_range)
for diff_version, diff_changelog in version_diff:
ret['changelogs'][diff_version] = [line.strip() for line in diff_changelog.strip(
).split("\n") if len(line.replace('-', '').strip()) > 0]
sec_releases = get_pkg_security_releases(version_diff)
if sec_releases is not None:
ret['notices'] = ', '.join(
['<%s' % sec_version for sec_version, sec_changelog in sec_releases])
except (HTTPError, URLError) as e:
ret['error'] = str(e)
return ret
def show_updates(changelog_scan=True, all_packages=False, json_out=False, filter_packages=[]):
packages = sorted(pip.get_installed_distributions(),
key=lambda pkg: pkg.project_name.lower())
packages_total = len(packages)
if filter_packages:
packages = filter(
lambda pkg: pkg.project_name in filter_packages, packages)
packages_total = len(filter_packages)
packages_progress = 0
updates = []
if not json_out:
_display_progress(packages_total)
for p in packages:
updates.append(get_updates(p, changelog_scan=changelog_scan))
packages_progress += 1
if not json_out:
_display_progress(packages_total, packages_progress)
if json_out:
sys.stdout.write(json.dumps(updates, indent=4))
else:
_display_table(updates, show_notices=changelog_scan, show_all_packages=all_packages)
def _string_contains_security_keywords(string):
lower = string.lower()
for keyword in SECURITY_NOTICE_KEYWORDS:
if keyword in lower:
return True
return False
def _get_column_lengths(rows, labels):
lens = {k: len(labels[k]) for k in labels.keys()}
for r in range(0, len(rows)):
for k, v in rows[r].items():
if k in labels.keys():
l = len(str(v))
if l > lens[k]:
lens[k] = l
return lens
def _display_table(rows, show_notices=False, show_all_packages=False):
lens = _get_column_lengths(rows, DISPLAY_TABLE_LABELS)
columns = ['package', 'installed', 'latest', 'versions', ]
if show_notices:
columns.append('notices')
row_format = " | ".join(["{:<%s}" % lens[column]
for column in columns]) + "\n"
labels = row_format.format(
*(DISPLAY_TABLE_LABELS[column] for column in columns))
sys.stdout.write("-" * len(labels) + "\n")
sys.stdout.write(labels)
sys.stdout.write("-" * len(labels) + "\n")
for row in rows:
row['versions'] = row['versions'] if 'versions' in row else ''
row['notices'] = row['notices'] if 'notices' in row else ''
row['latest'] = row['latest'] if 'latest' in row and row['latest'] is not None else 'unknown'
if show_all_packages or len(row['versions']) > 0:
sys.stdout.write(row_format.format(
*(row[column] for column in columns)))
sys.stdout.write('\n')
def _display_progress(total, i=0):
percent = ("{0:.1f}").format(100 * (i / float(total)))
sys.stdout.write('\rFetching package information... %s%%\r' % percent)
if i < total:
sys.stdout.write('\rFetching package information... %s%%' % percent)
else:
sys.stdout.write('\r%s\r' % (' ' * 100))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.