hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
43e5ffb17205f0520730d2a9a25d19f485f42613
| 75,146
|
py
|
Python
|
model/work.py
|
tdilauro/circulation-core
|
8086ca8cbedd5f4b2a0c44df97889d078ff79aac
|
[
"Apache-2.0"
] | 1
|
2021-11-16T00:58:43.000Z
|
2021-11-16T00:58:43.000Z
|
model/work.py
|
tdilauro/circulation-core
|
8086ca8cbedd5f4b2a0c44df97889d078ff79aac
|
[
"Apache-2.0"
] | 44
|
2022-01-20T01:31:32.000Z
|
2022-03-31T01:50:41.000Z
|
model/work.py
|
tdilauro/circulation-core
|
8086ca8cbedd5f4b2a0c44df97889d078ff79aac
|
[
"Apache-2.0"
] | 1
|
2021-05-12T19:11:52.000Z
|
2021-05-12T19:11:52.000Z
|
# encoding: utf-8
# WorkGenre, Work
import logging
from collections import Counter
from sqlalchemy import (
Boolean,
Column,
DateTime,
Enum,
Float,
ForeignKey,
Integer,
Numeric,
String,
Unicode,
)
from sqlalchemy.dialects.postgresql import INT4RANGE
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import contains_eager, relationship
from sqlalchemy.orm.session import Session
from sqlalchemy.sql.expression import and_, case, join, literal_column, or_, select
from sqlalchemy.sql.functions import func
from ..classifier import Classifier, WorkClassifier
from ..config import CannotLoadConfiguration
from ..util import LanguageCodes
from ..util.datetime_helpers import utc_now
from . import (
Base,
PresentationCalculationPolicy,
flush,
get_one_or_create,
numericrange_to_string,
numericrange_to_tuple,
tuple_to_numericrange,
)
from .constants import DataSourceConstants
from .contributor import Contribution, Contributor
from .coverage import CoverageRecord, WorkCoverageRecord
from .datasource import DataSource
from .edition import Edition
from .identifier import Identifier
from .measurement import Measurement
class WorkGenre(Base):
"""An assignment of a genre to a work."""
__tablename__ = "workgenres"
id = Column(Integer, primary_key=True)
genre_id = Column(Integer, ForeignKey("genres.id"), index=True)
work_id = Column(Integer, ForeignKey("works.id"), index=True)
affinity = Column(Float, index=True, default=0)
@classmethod
def from_genre(cls, genre):
wg = WorkGenre()
wg.genre = genre
return wg
def __repr__(self):
return "%s (%d%%)" % (self.genre.name, self.affinity * 100)
class Work(Base):
APPEALS_URI = "http://librarysimplified.org/terms/appeals/"
CHARACTER_APPEAL = "Character"
LANGUAGE_APPEAL = "Language"
SETTING_APPEAL = "Setting"
STORY_APPEAL = "Story"
UNKNOWN_APPEAL = "Unknown"
NOT_APPLICABLE_APPEAL = "Not Applicable"
NO_APPEAL = "None"
CURRENTLY_AVAILABLE = "currently_available"
ALL = "all"
# If no quality data is available for a work, it will be assigned
# a default quality based on where we got it.
#
# The assumption is that a librarian would not have ordered a book
# if it didn't meet a minimum level of quality.
#
# For data sources where librarians tend to order big packages of
# books instead of selecting individual titles, the default
# quality is lower. For data sources where there is no curation at
# all, the default quality is zero.
#
# If there is absolutely no way to get quality data for a curated
# data source, each work is assigned the minimum level of quality
# necessary to show up in featured feeds.
default_quality_by_data_source = {
DataSourceConstants.GUTENBERG: 0,
DataSourceConstants.OVERDRIVE: 0.4,
DataSourceConstants.BIBLIOTHECA: 0.65,
DataSourceConstants.AXIS_360: 0.65,
DataSourceConstants.STANDARD_EBOOKS: 0.8,
DataSourceConstants.UNGLUE_IT: 0.4,
DataSourceConstants.PLYMPTON: 0.5,
}
__tablename__ = "works"
id = Column(Integer, primary_key=True)
# One Work may have copies scattered across many LicensePools.
license_pools = relationship("LicensePool", backref="work", lazy="joined")
# A Work takes its presentation metadata from a single Edition.
# But this Edition is a composite of provider, metadata wrangler, admin interface, etc.-derived Editions.
presentation_edition_id = Column(Integer, ForeignKey("editions.id"), index=True)
# One Work may have many associated WorkCoverageRecords.
coverage_records = relationship(
"WorkCoverageRecord", backref="work", cascade="all, delete-orphan"
)
# One Work may be associated with many CustomListEntries.
# However, a CustomListEntry may lose its Work without
# ceasing to exist.
custom_list_entries = relationship("CustomListEntry", backref="work")
# One Work may have multiple CachedFeeds, and if a CachedFeed
# loses its Work, it ceases to exist.
cached_feeds = relationship(
"CachedFeed", backref="work", cascade="all, delete-orphan"
)
# One Work may participate in many WorkGenre assignments.
genres = association_proxy("work_genres", "genre", creator=WorkGenre.from_genre)
work_genres = relationship(
"WorkGenre", backref="work", cascade="all, delete-orphan"
)
audience = Column(Unicode, index=True)
target_age = Column(INT4RANGE, index=True)
fiction = Column(Boolean, index=True)
summary_id = Column(
Integer,
ForeignKey("resources.id", use_alter=True, name="fk_works_summary_id"),
index=True,
)
# This gives us a convenient place to store a cleaned-up version of
# the content of the summary Resource.
summary_text = Column(Unicode)
# The overall suitability of this work for unsolicited
# presentation to a patron. This is a calculated value taking both
# rating and popularity into account.
quality = Column(Numeric(4, 3), index=True)
# The overall rating given to this work.
rating = Column(Float, index=True)
# The overall current popularity of this work.
popularity = Column(Float, index=True)
appeal_type = Enum(
CHARACTER_APPEAL,
LANGUAGE_APPEAL,
SETTING_APPEAL,
STORY_APPEAL,
NOT_APPLICABLE_APPEAL,
NO_APPEAL,
UNKNOWN_APPEAL,
name="appeal",
)
primary_appeal = Column(appeal_type, default=None, index=True)
secondary_appeal = Column(appeal_type, default=None, index=True)
appeal_character = Column(Float, default=None, index=True)
appeal_language = Column(Float, default=None, index=True)
appeal_setting = Column(Float, default=None, index=True)
appeal_story = Column(Float, default=None, index=True)
# The last time the availability or metadata changed for this Work.
last_update_time = Column(DateTime(timezone=True), index=True)
# This is set to True once all metadata and availability
# information has been obtained for this Work. Until this is True,
# the work will not show up in feeds.
presentation_ready = Column(Boolean, default=False, index=True)
# This is the last time we tried to make this work presentation ready.
presentation_ready_attempt = Column(
DateTime(timezone=True), default=None, index=True
)
# This is the error that occured while trying to make this Work
# presentation ready. Until this is cleared, no further attempt
# will be made to make the Work presentation ready.
presentation_ready_exception = Column(Unicode, default=None, index=True)
# A precalculated OPDS entry containing all metadata about this
# work that would be relevant to display to a library patron.
simple_opds_entry = Column(Unicode, default=None)
# A precalculated OPDS entry containing all metadata about this
# work that would be relevant to display in a machine-to-machine
# integration context.
verbose_opds_entry = Column(Unicode, default=None)
# A precalculated MARC record containing metadata about this
# work that would be relevant to display in a library's public
# catalog.
marc_record = Column(String, default=None)
# These fields are potentially large and can be deferred if you
# don't need all the data in a Work.
LARGE_FIELDS = [
"simple_opds_entry",
"verbose_opds_entry",
"marc_record",
"summary_text",
]
@property
def title(self):
if self.presentation_edition:
return self.presentation_edition.title
return None
@property
def sort_title(self):
if not self.presentation_edition:
return None
return self.presentation_edition.sort_title or self.presentation_edition.title
@property
def subtitle(self):
if not self.presentation_edition:
return None
return self.presentation_edition.subtitle
@property
def series(self):
if not self.presentation_edition:
return None
return self.presentation_edition.series
@property
def series_position(self):
if not self.presentation_edition:
return None
return self.presentation_edition.series_position
@property
def author(self):
if self.presentation_edition:
return self.presentation_edition.author
return None
@property
def sort_author(self):
if not self.presentation_edition:
return None
return self.presentation_edition.sort_author or self.presentation_edition.author
@property
def language(self):
if self.presentation_edition:
return self.presentation_edition.language
return None
@property
def language_code(self):
if not self.presentation_edition:
return None
return self.presentation_edition.language_code
@property
def publisher(self):
if not self.presentation_edition:
return None
return self.presentation_edition.publisher
@property
def imprint(self):
if not self.presentation_edition:
return None
return self.presentation_edition.imprint
@property
def cover_full_url(self):
if not self.presentation_edition:
return None
return self.presentation_edition.cover_full_url
@property
def cover_thumbnail_url(self):
if not self.presentation_edition:
return None
return self.presentation_edition.cover_thumbnail_url
@property
def target_age_string(self):
return numericrange_to_string(self.target_age)
@property
def has_open_access_license(self):
return any(x.open_access for x in self.license_pools)
@property
def complaints(self):
complaints = list()
[complaints.extend(pool.complaints) for pool in self.license_pools]
return complaints
def __repr__(self):
return '<Work #%s "%s" (by %s) %s lang=%s (%s lp)>' % (
self.id,
self.title,
self.author,
", ".join([g.name for g in self.genres]),
self.language,
len(self.license_pools),
)
@classmethod
def missing_coverage_from(
cls, _db, operation=None, count_as_covered=None, count_as_missing_before=None
):
"""Find Works which have no WorkCoverageRecord for the given
`operation`.
"""
clause = and_(
Work.id == WorkCoverageRecord.work_id,
WorkCoverageRecord.operation == operation,
)
q = (
_db.query(Work)
.outerjoin(WorkCoverageRecord, clause)
.order_by(Work.id, WorkCoverageRecord.id)
)
missing = WorkCoverageRecord.not_covered(
count_as_covered, count_as_missing_before
)
q2 = q.filter(missing)
return q2
@classmethod
def for_unchecked_subjects(cls, _db):
from .classification import Classification, Subject
from .licensing import LicensePool
"""Find all Works whose LicensePools have an Identifier that
is classified under an unchecked Subject.
This is a good indicator that the Work needs to be
reclassified.
"""
qu = (
_db.query(Work)
.join(Work.license_pools)
.join(LicensePool.identifier)
.join(Identifier.classifications)
.join(Classification.subject)
)
return qu.filter(Subject.checked == False).order_by(Subject.id)
@classmethod
def _potential_open_access_works_for_permanent_work_id(
cls, _db, pwid, medium, language
):
"""Find all Works that might be suitable for use as the
canonical open-access Work for the given `pwid`, `medium`,
and `language`.
:return: A 2-tuple (pools, counts_by_work). `pools` is a set
containing all affected LicensePools; `counts_by_work is a
Counter tallying the number of affected LicensePools
associated with a given work.
"""
from .licensing import LicensePool
qu = (
_db.query(LicensePool)
.join(LicensePool.presentation_edition)
.filter(LicensePool.open_access == True)
.filter(Edition.permanent_work_id == pwid)
.filter(Edition.medium == medium)
.filter(Edition.language == language)
)
pools = set(qu.all())
# Build the Counter of Works that are eligible to represent
# this pwid/medium/language combination.
affected_licensepools_for_work = Counter()
for lp in pools:
work = lp.work
if not lp.work:
continue
if affected_licensepools_for_work[lp.work]:
# We already got this information earlier in the loop.
continue
pe = work.presentation_edition
if pe and (
pe.language != language
or pe.medium != medium
or pe.permanent_work_id != pwid
):
# This Work's presentation edition doesn't match
# this LicensePool's presentation edition.
# It would be better to create a brand new Work and
# remove this LicensePool from its current Work.
continue
affected_licensepools_for_work[lp.work] = len(
[x for x in pools if x.work == lp.work]
)
return pools, affected_licensepools_for_work
@classmethod
def open_access_for_permanent_work_id(cls, _db, pwid, medium, language):
"""Find or create the Work encompassing all open-access LicensePools
whose presentation Editions have the given permanent work ID,
the given medium, and the given language.
This may result in the consolidation or splitting of Works, if
a book's permanent work ID has changed without
calculate_work() being called, or if the data is in an
inconsistent state for any other reason.
"""
is_new = False
(
licensepools,
licensepools_for_work,
) = cls._potential_open_access_works_for_permanent_work_id(
_db, pwid, medium, language
)
if not licensepools:
# There is no work for this PWID/medium/language combination
# because no LicensePools offer it.
return None, is_new
work = None
if len(licensepools_for_work) == 0:
# None of these LicensePools have a Work. Create a new one.
work = Work()
is_new = True
else:
# Pick the Work with the most LicensePools.
work, count = licensepools_for_work.most_common(1)[0]
# In the simple case, there will only be the one Work.
if len(licensepools_for_work) > 1:
# But in this case, for whatever reason (probably bad
# data caused by a bug) there's more than one
# Work. Merge the other Works into the one we chose
# earlier. (This is why we chose the work with the
# most LicensePools--it minimizes the disruption
# here.)
# First, make sure this Work is the exclusive
# open-access work for its permanent work ID.
# Otherwise the merge may fail.
work.make_exclusive_open_access_for_permanent_work_id(
pwid, medium, language
)
for needs_merge in list(licensepools_for_work.keys()):
if needs_merge != work:
# Make sure that Work we're about to merge has
# nothing but LicensePools whose permanent
# work ID matches the permanent work ID of the
# Work we're about to merge into.
needs_merge.make_exclusive_open_access_for_permanent_work_id(
pwid, medium, language
)
needs_merge.merge_into(work)
# At this point we have one, and only one, Work for this
# permanent work ID. Assign it to every LicensePool whose
# presentation Edition has that permanent work ID/medium/language
# combination.
for lp in licensepools:
lp.work = work
return work, is_new
def make_exclusive_open_access_for_permanent_work_id(self, pwid, medium, language):
"""Ensure that every open-access LicensePool associated with this Work
has the given PWID and medium. Any non-open-access
LicensePool, and any LicensePool with a different PWID or a
different medium, is kicked out and assigned to a different
Work. LicensePools with no presentation edition or no PWID
are kicked out.
In most cases this Work will be the _only_ work for this PWID,
but inside open_access_for_permanent_work_id this is called as
a preparatory step for merging two Works, and after the call
(but before the merge) there may be two Works for a given PWID.
"""
_db = Session.object_session(self)
for pool in list(self.license_pools):
other_work = is_new = None
if not pool.open_access:
# This needs to have its own Work--we don't mix
# open-access and commercial versions of the same book.
pool.work = None
if pool.presentation_edition:
pool.presentation_edition.work = None
other_work, is_new = pool.calculate_work()
elif not pool.presentation_edition:
# A LicensePool with no presentation edition
# cannot have an associated Work.
logging.warning(
"LicensePool %r has no presentation edition, setting .work to None.",
pool,
)
pool.work = None
else:
e = pool.presentation_edition
this_pwid = e.permanent_work_id
if not this_pwid:
# A LicensePool with no permanent work ID
# cannot have an associated Work.
logging.warning(
"Presentation edition for LicensePool %r has no PWID, setting .work to None.",
pool,
)
e.work = None
pool.work = None
continue
if this_pwid != pwid or e.medium != medium or e.language != language:
# This LicensePool should not belong to this Work.
# Make sure it gets its own Work, creating a new one
# if necessary.
pool.work = None
pool.presentation_edition.work = None
other_work, is_new = Work.open_access_for_permanent_work_id(
_db, this_pwid, e.medium, e.language
)
if other_work and is_new:
other_work.calculate_presentation()
@property
def pwids(self):
"""Return the set of permanent work IDs associated with this Work.
There should only be one permanent work ID associated with a
given work, but if there is more than one, this will find all
of them.
"""
pwids = set()
for pool in self.license_pools:
if (
pool.presentation_edition
and pool.presentation_edition.permanent_work_id
):
pwids.add(pool.presentation_edition.permanent_work_id)
return pwids
def merge_into(self, other_work):
"""Merge this Work into another Work and delete it."""
# Neither the source nor the destination work may have any
# non-open-access LicensePools.
for w in self, other_work:
for pool in w.license_pools:
if not pool.open_access:
raise ValueError(
"Refusing to merge %r into %r because it would put an open-access LicensePool into the same work as a non-open-access LicensePool."
% (self, other_work)
)
my_pwids = self.pwids
other_pwids = other_work.pwids
if not my_pwids == other_pwids:
raise ValueError(
"Refusing to merge %r into %r because permanent work IDs don't match: %s vs. %s"
% (
self,
other_work,
",".join(sorted(my_pwids)),
",".join(sorted(other_pwids)),
)
)
# Every LicensePool associated with this work becomes
# associated instead with the other work.
for pool in self.license_pools:
other_work.license_pools.append(pool)
# All WorkGenres and WorkCoverageRecords for this Work are
# deleted. (WorkGenres are deleted via cascade.)
_db = Session.object_session(self)
for cr in self.coverage_records:
_db.delete(cr)
_db.delete(self)
other_work.calculate_presentation()
def set_summary(self, resource):
self.summary = resource
# TODO: clean up the content
if resource and resource.representation:
self.summary_text = resource.representation.unicode_content
else:
self.summary_text = ""
WorkCoverageRecord.add_for(self, operation=WorkCoverageRecord.SUMMARY_OPERATION)
@classmethod
def with_genre(cls, _db, genre):
"""Find all Works classified under the given genre."""
from .classification import Genre
if isinstance(genre, (bytes, str)):
genre, ignore = Genre.lookup(_db, genre)
return _db.query(Work).join(WorkGenre).filter(WorkGenre.genre == genre)
@classmethod
def with_no_genres(self, q):
"""Modify a query so it finds only Works that are not classified under
any genre."""
q = q.outerjoin(Work.work_genres)
q = q.options(contains_eager(Work.work_genres))
q = q.filter(WorkGenre.genre == None)
return q
@classmethod
def from_identifiers(cls, _db, identifiers, base_query=None, policy=None):
"""Returns all of the works that have one or more license_pools
associated with either an identifier in the given list or an
identifier considered equivalent to one of those listed.
:param policy: A PresentationCalculationPolicy, used to
determine how far to go when looking for equivalent
Identifiers. By default, this method will be very strict
about equivalencies.
"""
from .licensing import LicensePool
identifier_ids = [identifier.id for identifier in identifiers]
if not identifier_ids:
return None
if not base_query:
# A raw base query that makes no accommodations for works that are
# suppressed or otherwise undeliverable.
base_query = (
_db.query(Work).join(Work.license_pools).join(LicensePool.identifier)
)
if policy is None:
policy = PresentationCalculationPolicy(
equivalent_identifier_levels=1, equivalent_identifier_threshold=0.999
)
identifier_ids_subquery = (
Identifier.recursively_equivalent_identifier_ids_query(
Identifier.id, policy=policy
)
)
identifier_ids_subquery = identifier_ids_subquery.where(
Identifier.id.in_(identifier_ids)
)
query = base_query.filter(Identifier.id.in_(identifier_ids_subquery))
return query
@classmethod
def reject_covers(cls, _db, works_or_identifiers, search_index_client=None):
"""Suppresses the currently visible covers of a number of Works"""
from .licensing import LicensePool
from .resource import Hyperlink, Resource
works = list(set(works_or_identifiers))
if not isinstance(works[0], cls):
# This assumes that everything in the provided list is the
# same class: either Work or Identifier.
works = cls.from_identifiers(_db, works_or_identifiers).all()
work_ids = [w.id for w in works]
if len(works) == 1:
logging.info("Suppressing cover for %r", works[0])
else:
logging.info("Supressing covers for %i Works", len(works))
cover_urls = list()
for work in works:
# Create a list of the URLs of the works' active cover images.
edition = work.presentation_edition
if edition:
if edition.cover_full_url:
cover_urls.append(edition.cover_full_url)
if edition.cover_thumbnail_url:
cover_urls.append(edition.cover_thumbnail_url)
if not cover_urls:
# All of the target Works have already had their
# covers suppressed. Nothing to see here.
return
covers = (
_db.query(Resource)
.join(Hyperlink.identifier)
.join(Identifier.licensed_through)
.filter(Resource.url.in_(cover_urls), LicensePool.work_id.in_(work_ids))
)
editions = list()
for cover in covers:
# Record a downvote that will dismiss the Resource.
cover.reject()
if len(cover.cover_editions) > 1:
editions += cover.cover_editions
flush(_db)
editions = list(set(editions))
if editions:
# More Editions and Works have been impacted by this cover
# suppression.
works += [ed.work for ed in editions if ed.work]
editions = [ed for ed in editions if not ed.work]
# Remove the cover from the Work and its Edition and reset
# cached OPDS entries.
policy = PresentationCalculationPolicy.reset_cover()
for work in works:
work.calculate_presentation(
policy=policy, search_index_client=search_index_client
)
for edition in editions:
edition.calculate_presentation(policy=policy)
_db.commit()
def reject_cover(self, search_index_client=None):
"""Suppresses the current cover of the Work"""
_db = Session.object_session(self)
self.suppress_covers(_db, [self], search_index_client=search_index_client)
def all_editions(self, policy=None):
"""All Editions identified by an Identifier equivalent to
the identifiers of this Work's license pools.
:param policy: A PresentationCalculationPolicy, used to
determine how far to go when looking for equivalent
Identifiers.
"""
from .licensing import LicensePool
_db = Session.object_session(self)
identifier_ids_subquery = (
Identifier.recursively_equivalent_identifier_ids_query(
LicensePool.identifier_id, policy=policy
)
)
identifier_ids_subquery = identifier_ids_subquery.where(
LicensePool.work_id == self.id
)
q = _db.query(Edition).filter(
Edition.primary_identifier_id.in_(identifier_ids_subquery)
)
return q
@property
def _direct_identifier_ids(self):
"""Return all Identifier IDs associated with one of this
Work's LicensePools.
"""
return [lp.identifier.id for lp in self.license_pools if lp.identifier]
def all_identifier_ids(self, policy=None):
"""Return all Identifier IDs associated with this Work.
:param policy: A `PresentationCalculationPolicy`.
:return: A set containing all Identifier IDs associated
with this Work (as per the rules set down in `policy`).
"""
_db = Session.object_session(self)
# Get a dict that maps identifier ids to lists of their equivalents.
equivalent_lists = Identifier.recursively_equivalent_identifier_ids(
_db, self._direct_identifier_ids, policy=policy
)
all_identifier_ids = set()
for equivs in list(equivalent_lists.values()):
all_identifier_ids.update(equivs)
return all_identifier_ids
@property
def language_code(self):
"""A single 2-letter language code for display purposes."""
if not self.language:
return None
language = self.language
if language in LanguageCodes.three_to_two:
language = LanguageCodes.three_to_two[language]
return language
def age_appropriate_for_patron(self, patron):
"""Is this Work age-appropriate for the given Patron?
:param patron: A Patron.
:return: A boolean
"""
if patron is None:
return True
return patron.work_is_age_appropriate(self.audience, self.target_age)
def set_presentation_edition(self, new_presentation_edition):
"""Sets presentation edition and lets owned pools and editions know.
Raises exception if edition to set to is None.
"""
# only bother if something changed, or if were explicitly told to
# set (useful for setting to None)
if not new_presentation_edition:
error_message = (
"Trying to set presentation_edition to None on Work [%s]" % self.id
)
raise ValueError(error_message)
self.presentation_edition = new_presentation_edition
# if the edition is the presentation edition for any license
# pools, let them know they have a Work.
for pool in self.presentation_edition.is_presentation_for:
pool.work = self
def calculate_presentation_edition(self, policy=None):
"""Which of this Work's Editions should be used as the default?
First, every LicensePool associated with this work must have
its presentation edition set.
Then, we go through the pools, see which has the best presentation edition,
and make it our presentation edition.
"""
changed = False
policy = policy or PresentationCalculationPolicy()
if not policy.choose_edition:
return changed
# For each owned edition, see if its LicensePool was superceded or suppressed
# if yes, the edition is unlikely to be the best.
# An open access pool may be "superceded", if there's a better-quality
# open-access pool available.
self.mark_licensepools_as_superceded()
edition_metadata_changed = False
old_presentation_edition = self.presentation_edition
new_presentation_edition = None
for pool in self.license_pools:
# a superceded pool's composite edition is not good enough
# Note: making the assumption here that we won't have a situation
# where we marked all of the work's pools as superceded or suppressed.
if pool.superceded or pool.suppressed:
continue
# make sure the pool has most up-to-date idea of its presentation edition,
# and then ask what it is.
pool_edition_changed = pool.set_presentation_edition()
edition_metadata_changed = edition_metadata_changed or pool_edition_changed
potential_presentation_edition = pool.presentation_edition
# We currently have no real way to choose between
# competing presentation editions. But it doesn't matter much
# because in the current system there should never be more
# than one non-superceded license pool per Work.
#
# So basically we pick the first available edition and
# make it the presentation edition.
if not new_presentation_edition or (
potential_presentation_edition is old_presentation_edition
and old_presentation_edition
):
# We would prefer not to change the Work's presentation
# edition unnecessarily, so if the current presentation
# edition is still an option, choose it.
new_presentation_edition = potential_presentation_edition
if (
self.presentation_edition != new_presentation_edition
) and new_presentation_edition != None:
# did we find a pool whose presentation edition was better than the work's?
self.set_presentation_edition(new_presentation_edition)
# tell everyone else we tried to set work's presentation edition
WorkCoverageRecord.add_for(
self, operation=WorkCoverageRecord.CHOOSE_EDITION_OPERATION
)
changed = (
edition_metadata_changed
or old_presentation_edition != self.presentation_edition
)
return changed
def _get_default_audience(self):
"""Return the default audience.
:return: Default audience
:rtype: Optional[str]
"""
for license_pool in self.license_pools:
if license_pool.collection.default_audience:
return license_pool.collection.default_audience
return None
def calculate_presentation(
self,
policy=None,
search_index_client=None,
exclude_search=False,
default_fiction=None,
default_audience=None,
):
"""Make a Work ready to show to patrons.
Call calculate_presentation_edition() to find the best-quality presentation edition
that could represent this work.
Then determine the following information, global to the work:
* Subject-matter classifications for the work.
* Whether or not the work is fiction.
* The intended audience for the work.
* The best available summary for the work.
* The overall popularity of the work.
"""
if not default_audience:
default_audience = self._get_default_audience()
# Gather information up front so we can see if anything
# actually changed.
changed = False
edition_changed = False
classification_changed = False
policy = policy or PresentationCalculationPolicy()
edition_changed = self.calculate_presentation_edition(policy)
if not self.presentation_edition:
# Without a presentation edition, we can't calculate presentation
# for the work.
return
if policy.choose_cover or policy.set_edition_metadata:
cover_changed = self.presentation_edition.calculate_presentation(policy)
edition_changed = edition_changed or cover_changed
summary = self.summary
summary_text = self.summary_text
quality = self.quality
# If we find a cover or description that comes direct from a
# license source, it may short-circuit the process of finding
# a good cover or description.
licensed_data_sources = set()
for pool in self.license_pools:
# Descriptions from Gutenberg are useless, so we
# specifically exclude it from being a privileged data
# source.
if pool.data_source.name != DataSourceConstants.GUTENBERG:
licensed_data_sources.add(pool.data_source)
if policy.classify or policy.choose_summary or policy.calculate_quality:
# Find all related IDs that might have associated descriptions,
# classifications, or measurements.
_db = Session.object_session(self)
direct_identifier_ids = self._direct_identifier_ids
all_identifier_ids = self.all_identifier_ids(policy=policy)
else:
# Don't bother.
direct_identifier_ids = all_identifier_ids = []
if policy.classify:
classification_changed = self.assign_genres(
all_identifier_ids,
default_fiction=default_fiction,
default_audience=default_audience,
)
WorkCoverageRecord.add_for(
self, operation=WorkCoverageRecord.CLASSIFY_OPERATION
)
if policy.choose_summary:
self._choose_summary(
direct_identifier_ids, all_identifier_ids, licensed_data_sources
)
if policy.calculate_quality:
# In the absense of other data, we will make a rough
# judgement as to the quality of a book based on the
# license source. Commercial data sources have higher
# default quality, because it's presumed that a librarian
# put some work into deciding which books to buy.
default_quality = None
for source in licensed_data_sources:
q = self.default_quality_by_data_source.get(source.name, None)
if q is None:
continue
if default_quality is None or q > default_quality:
default_quality = q
if not default_quality:
# if we still haven't found anything of a quality measurement,
# then at least make it an integer zero, not none.
default_quality = 0
self.calculate_quality(all_identifier_ids, default_quality)
if self.summary_text:
if isinstance(self.summary_text, str):
new_summary_text = self.summary_text
else:
new_summary_text = self.summary_text.decode("utf8")
else:
new_summary_text = self.summary_text
changed = (
edition_changed
or classification_changed
or summary != self.summary
or summary_text != new_summary_text
or float(quality) != float(self.quality)
)
if changed:
# last_update_time tracks the last time the data actually
# changed, not the last time we checked whether or not to
# change it.
self.last_update_time = utc_now()
if changed or policy.regenerate_opds_entries:
self.calculate_opds_entries()
if changed or policy.regenerate_marc_record:
self.calculate_marc_record()
if (changed or policy.update_search_index) and not exclude_search:
self.external_index_needs_updating()
# Now that everything's calculated, print it out.
if policy.verbose:
if changed:
changed = "changed"
representation = self.detailed_representation
else:
# TODO: maybe change changed to a boolean, and return it as method result
changed = "unchanged"
representation = repr(self)
logging.info("Presentation %s for work: %s", changed, representation)
# We want works to be presentation-ready as soon as possible,
# unless they are missing crucial information like language or
# title.
self.set_presentation_ready_based_on_content()
def _choose_summary(
self, direct_identifier_ids, all_identifier_ids, licensed_data_sources
):
"""Helper method for choosing a summary as part of presentation
calculation.
Summaries closer to a LicensePool, or from a more trusted source
will be preferred.
:param direct_identifier_ids: All IDs of Identifiers of LicensePools
directly associated with this Work. Summaries associated with
these IDs will be preferred. In the real world, this will happen
almost all the time.
:param all_identifier_ids: All IDs of Identifiers of
LicensePools associated (directly or indirectly) with this
Work. Summaries associated with these IDs will be
used only if none are found from direct_identifier_ids.
:param licensed_data_sources: A list of DataSources that should be
given priority -- either because they provided the books or because
they are trusted sources such as library staff.
"""
_db = Session.object_session(self)
staff_data_source = DataSource.lookup(_db, DataSourceConstants.LIBRARY_STAFF)
data_sources = [staff_data_source, licensed_data_sources]
summary = None
for id_set in (direct_identifier_ids, all_identifier_ids):
summary, summaries = Identifier.evaluate_summary_quality(
_db, id_set, data_sources
)
if summary:
# We found a summary.
break
self.set_summary(summary)
@property
def detailed_representation(self):
"""A description of this work more detailed than repr()"""
l = ["%s (by %s)" % (self.title, self.author)]
l.append(" language=%s" % self.language)
l.append(" quality=%s" % self.quality)
if self.presentation_edition and self.presentation_edition.primary_identifier:
primary_identifier = self.presentation_edition.primary_identifier
else:
primary_identifier = None
l.append(" primary id=%s" % primary_identifier)
if self.fiction:
fiction = "Fiction"
elif self.fiction == False:
fiction = "Nonfiction"
else:
fiction = "???"
if self.target_age and (self.target_age.upper or self.target_age.lower):
target_age = " age=" + self.target_age_string
else:
target_age = ""
l.append(
" %(fiction)s a=%(audience)s%(target_age)r"
% (dict(fiction=fiction, audience=self.audience, target_age=target_age))
)
l.append(" " + ", ".join(repr(wg) for wg in self.work_genres))
if self.cover_full_url:
l.append(" Full cover: %s" % self.cover_full_url)
else:
l.append(" No full cover.")
if self.cover_thumbnail_url:
l.append(" Cover thumbnail: %s" % self.cover_thumbnail_url)
else:
l.append(" No thumbnail cover.")
downloads = []
expect_downloads = False
for pool in self.license_pools:
if pool.superceded:
continue
if pool.open_access:
expect_downloads = True
for lpdm in pool.delivery_mechanisms:
if lpdm.resource and lpdm.resource.final_url:
downloads.append(lpdm.resource)
if downloads:
l.append(" Open-access downloads:")
for r in downloads:
l.append(" " + r.final_url)
elif expect_downloads:
l.append(" Expected open-access downloads but found none.")
def _ensure(s):
if not s:
return ""
elif isinstance(s, str):
return s
else:
return s.decode("utf8", "replace")
if self.summary and self.summary.representation:
snippet = _ensure(self.summary.representation.content)[:100]
d = " Description (%.2f) %s" % (self.summary.quality, snippet)
l.append(d)
l = [_ensure(s) for s in l]
return "\n".join(l)
def calculate_opds_entries(self, verbose=True):
from ..opds import AcquisitionFeed, Annotator, VerboseAnnotator
_db = Session.object_session(self)
simple = AcquisitionFeed.single_entry(_db, self, Annotator, force_create=True)
if verbose is True:
verbose = AcquisitionFeed.single_entry(
_db, self, VerboseAnnotator, force_create=True
)
WorkCoverageRecord.add_for(
self, operation=WorkCoverageRecord.GENERATE_OPDS_OPERATION
)
def calculate_marc_record(self):
from ..marc import Annotator, MARCExporter
_db = Session.object_session(self)
record = MARCExporter.create_record(
self, annotator=Annotator, force_create=True
)
WorkCoverageRecord.add_for(
self, operation=WorkCoverageRecord.GENERATE_MARC_OPERATION
)
def active_license_pool(self):
# The active license pool is the one that *would* be
# associated with a loan, were a loan to be issued right
# now.
active_license_pool = None
for p in self.license_pools:
if p.superceded:
continue
edition = p.presentation_edition
if p.open_access:
if p.best_open_access_link:
active_license_pool = p
# We have an unlimited source for this book.
# There's no need to keep looking.
break
elif p.unlimited_access or p.self_hosted:
active_license_pool = p
elif edition and edition.title and p.licenses_owned > 0:
active_license_pool = p
return active_license_pool
def _reset_coverage(self, operation):
"""Put this work's WorkCoverageRecord for the given `operation`
into the REGISTERED state.
This is useful for erasing the record of work that was done,
so that automated scripts know the work needs to be done
again.
:return: A WorkCoverageRecord.
"""
_db = Session.object_session(self)
record, is_new = WorkCoverageRecord.add_for(
self, operation=operation, status=CoverageRecord.REGISTERED
)
return record
def external_index_needs_updating(self):
"""Mark this work as needing to have its search document reindexed.
This is a more efficient alternative to reindexing immediately,
since these WorkCoverageRecords are handled in large batches.
"""
return self._reset_coverage(WorkCoverageRecord.UPDATE_SEARCH_INDEX_OPERATION)
def update_external_index(self, client, add_coverage_record=True):
"""Create a WorkCoverageRecord so that this work's
entry in the search index can be modified or deleted.
This method is deprecated -- call
external_index_needs_updating() instead.
"""
self.external_index_needs_updating()
def needs_full_presentation_recalculation(self):
"""Mark this work as needing to have its presentation completely
recalculated.
This shifts the time spent recalculating presentation to a
script dedicated to this purpose, rather than a script that
interacts with APIs. It's also more efficient, since a work
might be flagged multiple times before we actually get around
to recalculating the presentation.
"""
return self._reset_coverage(WorkCoverageRecord.CLASSIFY_OPERATION)
def needs_new_presentation_edition(self):
"""Mark this work as needing to have its presentation edition
regenerated. This is significantly less work than
calling needs_full_presentation_recalculation, but it will
not update a Work's quality score, summary, or genre classification.
"""
return self._reset_coverage(WorkCoverageRecord.CHOOSE_EDITION_OPERATION)
def set_presentation_ready(
self, as_of=None, search_index_client=None, exclude_search=False
):
"""Set this work as presentation-ready, no matter what.
This assumes that we know the work has the minimal information
necessary to be found with typical queries and that patrons
will be able to understand what work we're talking about.
In most cases you should call set_presentation_ready_based_on_content
instead, which runs those checks.
"""
as_of = as_of or utc_now()
self.presentation_ready = True
self.presentation_ready_exception = None
self.presentation_ready_attempt = as_of
if not exclude_search:
self.external_index_needs_updating()
def set_presentation_ready_based_on_content(self, search_index_client=None):
"""Set this work as presentation ready, if it appears to
be ready based on its data.
Presentation ready means the book is ready to be shown to
patrons and (pending availability) checked out. It doesn't
necessarily mean the presentation is complete.
The absolute minimum data necessary is a title, a language,
and a medium. We don't need a cover or an author -- we can
fill in that info later if it exists.
TODO: search_index_client is redundant here.
"""
if (
not self.presentation_edition
or not self.license_pools
or not self.title
or not self.language
or not self.presentation_edition.medium
):
self.presentation_ready = False
# The next time the search index WorkCoverageRecords are
# processed, this work will be removed from the search
# index.
self.external_index_needs_updating()
logging.warning("Work is not presentation ready: %r", self)
else:
self.set_presentation_ready(search_index_client=search_index_client)
def calculate_quality(self, identifier_ids, default_quality=0):
_db = Session.object_session(self)
# Relevant Measurements are direct measurements of popularity
# and quality, plus any quantity that might be mapppable to the 0..1
# range -- ratings, and measurements with an associated percentile
# score.
quantities = set(
[Measurement.POPULARITY, Measurement.QUALITY, Measurement.RATING]
)
quantities = quantities.union(list(Measurement.PERCENTILE_SCALES.keys()))
measurements = (
_db.query(Measurement)
.filter(Measurement.identifier_id.in_(identifier_ids))
.filter(Measurement.is_most_recent == True)
.filter(Measurement.quantity_measured.in_(quantities))
.all()
)
self.quality = Measurement.overall_quality(
measurements, default_value=default_quality
)
WorkCoverageRecord.add_for(self, operation=WorkCoverageRecord.QUALITY_OPERATION)
def assign_genres(
self,
identifier_ids,
default_fiction=False,
default_audience=Classifier.AUDIENCE_ADULT,
):
"""Set classification information for this work based on the
subquery to get equivalent identifiers.
:return: A boolean explaining whether or not any data actually
changed.
"""
classifier = WorkClassifier(self)
old_fiction = self.fiction
old_audience = self.audience
old_target_age = self.target_age
_db = Session.object_session(self)
classifications = Identifier.classifications_for_identifier_ids(
_db, identifier_ids
)
for classification in classifications:
classifier.add(classification)
(genre_weights, self.fiction, self.audience, target_age) = classifier.classify(
default_fiction=default_fiction, default_audience=default_audience
)
self.target_age = tuple_to_numericrange(target_age)
workgenres, workgenres_changed = self.assign_genres_from_weights(genre_weights)
classification_changed = (
workgenres_changed
or old_fiction != self.fiction
or old_audience != self.audience
or numericrange_to_tuple(old_target_age) != target_age
)
return classification_changed
def assign_genres_from_weights(self, genre_weights):
# Assign WorkGenre objects to the remainder.
from .classification import Genre
changed = False
_db = Session.object_session(self)
total_genre_weight = float(sum(genre_weights.values()))
workgenres = []
current_workgenres = _db.query(WorkGenre).filter(WorkGenre.work == self)
by_genre = dict()
for wg in current_workgenres:
by_genre[wg.genre] = wg
for g, score in list(genre_weights.items()):
affinity = score / total_genre_weight
if not isinstance(g, Genre):
g, ignore = Genre.lookup(_db, g.name)
if g in by_genre:
wg = by_genre[g]
is_new = False
del by_genre[g]
else:
wg, is_new = get_one_or_create(_db, WorkGenre, work=self, genre=g)
if is_new or round(wg.affinity, 2) != round(affinity, 2):
changed = True
wg.affinity = affinity
workgenres.append(wg)
# Any WorkGenre objects left over represent genres the Work
# was once classified under, but is no longer. Delete them.
for wg in list(by_genre.values()):
_db.delete(wg)
changed = True
# ensure that work_genres is up to date without having to read from database again
self.work_genres = workgenres
return workgenres, changed
def assign_appeals(self, character, language, setting, story, cutoff=0.20):
"""Assign the given appeals to the corresponding database fields,
as well as calculating the primary and secondary appeal.
"""
self.appeal_character = character
self.appeal_language = language
self.appeal_setting = setting
self.appeal_story = story
c = Counter()
c[self.CHARACTER_APPEAL] = character
c[self.LANGUAGE_APPEAL] = language
c[self.SETTING_APPEAL] = setting
c[self.STORY_APPEAL] = story
primary, secondary = c.most_common(2)
if primary[1] > cutoff:
self.primary_appeal = primary[0]
else:
self.primary_appeal = self.UNKNOWN_APPEAL
if secondary[1] > cutoff:
self.secondary_appeal = secondary[0]
else:
self.secondary_appeal = self.NO_APPEAL
# This can be used in func.to_char to convert a SQL datetime into a string
# that Elasticsearch can parse as a date.
ELASTICSEARCH_TIME_FORMAT = 'YYYY-MM-DD"T"HH24:MI:SS"."MS'
@classmethod
def to_search_documents(cls, works, policy=None):
"""Generate search documents for these Works.
This is done by constructing an extremely complicated
SQL query. The code is ugly, but it's about 100 times
faster than using python to create documents for
each work individually. When working on the search
index, it's very important for this to be fast.
:param policy: A PresentationCalculationPolicy to use when
deciding how deep to go to find Identifiers equivalent to
these works.
"""
if not works:
return []
_db = Session.object_session(works[0])
# If this is a batch of search documents, postgres needs extra working
# memory to process the query quickly.
if len(works) > 50:
_db.execute("set work_mem='200MB'")
# This query gets relevant columns from Work and Edition for the Works we're
# interested in. The work_id, edition_id, and identifier_id columns are used
# by other subqueries to filter, and the remaining columns are used directly
# to create the json document.
works_alias = (
select(
[
Work.id.label("work_id"),
Edition.id.label("edition_id"),
Edition.primary_identifier_id.label("identifier_id"),
Edition.title,
Edition.subtitle,
Edition.series,
Edition.series_position,
Edition.language,
Edition.sort_title,
Edition.author,
Edition.sort_author,
Edition.medium,
Edition.publisher,
Edition.imprint,
Edition.permanent_work_id,
Work.fiction,
Work.audience,
Work.summary_text,
Work.quality,
Work.rating,
Work.popularity,
Work.presentation_ready,
Work.presentation_edition_id,
func.extract(
"EPOCH",
Work.last_update_time,
).label("last_update_time"),
],
Work.id.in_((w.id for w in works)),
)
.select_from(
join(Work, Edition, Work.presentation_edition_id == Edition.id)
)
.alias("works_alias")
)
work_id_column = literal_column(
works_alias.name + "." + works_alias.c.work_id.name
)
work_presentation_edition_id_column = literal_column(
works_alias.name + "." + works_alias.c.presentation_edition_id.name
)
work_quality_column = literal_column(
works_alias.name + "." + works_alias.c.quality.name
)
def query_to_json(query):
"""Convert the results of a query to a JSON object."""
return select([func.row_to_json(literal_column(query.name))]).select_from(
query
)
def query_to_json_array(query):
"""Convert the results of a query into a JSON array."""
return select(
[
func.array_to_json(
func.array_agg(func.row_to_json(literal_column(query.name)))
)
]
).select_from(query)
# This subquery gets Collection IDs for collections
# that own more than zero licenses for this book.
from .classification import Genre, Subject
from .customlist import CustomListEntry
from .licensing import LicensePool
# We need information about LicensePools for a few reasons:
#
# * We always want to filter out Works that are not available
# in any of the collections associated with a given library
# -- either because no licenses are owned, because the
# LicensePools are suppressed, or (TODO) because there are no
# delivery mechanisms.
# * A patron may want to sort a list of books by availability
# date.
# * A patron may want to show only books currently available,
# or only open-access books.
#
# Whenever LicensePool.open_access is changed, or
# licenses_available moves to zero or away from zero, the
# LicensePool signals that its Work needs reindexing.
#
# The work quality field is stored in the main document, but
# it's also stored here, so that we can apply a nested filter
# that combines quality with other fields found only in the subdocument.
def explicit_bool(label, t):
# Ensure we always generate True/False instead of
# True/None. Elasticsearch can't filter on null values.
return case([(t, True)], else_=False).label(label)
licensepools = (
select(
[
LicensePool.id.label("licensepool_id"),
LicensePool.data_source_id.label("data_source_id"),
LicensePool.collection_id.label("collection_id"),
LicensePool.open_access.label("open_access"),
LicensePool.suppressed,
explicit_bool(
"available",
or_(
LicensePool.unlimited_access,
LicensePool.self_hosted,
LicensePool.licenses_available > 0,
),
),
explicit_bool(
"licensed",
or_(
LicensePool.unlimited_access,
LicensePool.self_hosted,
LicensePool.licenses_owned > 0,
),
),
work_quality_column,
Edition.medium,
func.extract(
"EPOCH",
LicensePool.availability_time,
).label("availability_time"),
]
)
.where(
and_(
LicensePool.work_id == work_id_column,
work_presentation_edition_id_column == Edition.id,
or_(
LicensePool.open_access,
LicensePool.unlimited_access,
LicensePool.self_hosted,
LicensePool.licenses_owned > 0,
),
)
)
.alias("licensepools_subquery")
)
licensepools_json = query_to_json_array(licensepools)
# This subquery gets CustomList IDs for all lists
# that contain the work.
#
# We also keep track of whether the work is featured on each
# list. This is used when determining which works should be
# featured for a lane based on CustomLists.
#
# And we keep track of the first time the work appears on the list.
# This is used when generating a crawlable feed for the customlist,
# which is ordered by a work's first appearance on the list.
customlists = (
select(
[
CustomListEntry.list_id.label("list_id"),
CustomListEntry.featured.label("featured"),
func.extract(
"EPOCH",
CustomListEntry.first_appearance,
).label("first_appearance"),
]
)
.where(CustomListEntry.work_id == work_id_column)
.alias("listentries_subquery")
)
customlists_json = query_to_json_array(customlists)
# This subquery gets Contributors, filtered on edition_id.
contributors = (
select(
[
Contributor.sort_name,
Contributor.display_name,
Contributor.family_name,
Contributor.lc,
Contributor.viaf,
Contribution.role,
]
)
.where(
Contribution.edition_id
== literal_column(
works_alias.name + "." + works_alias.c.edition_id.name
)
)
.select_from(
join(
Contributor,
Contribution,
Contributor.id == Contribution.contributor_id,
)
)
.alias("contributors_subquery")
)
contributors_json = query_to_json_array(contributors)
# Use a subquery to get recursively equivalent Identifiers
# for the Edition's primary_identifier_id.
#
# NOTE: we don't reliably reindex works when this information
# changes, but it's not critical that this information be
# totally up to date -- we only use it for subject searches
# and recommendations. The index is completely rebuilt once a
# day, and that's good enough.
equivalent_identifiers = Identifier.recursively_equivalent_identifier_ids_query(
literal_column(works_alias.name + "." + works_alias.c.identifier_id.name),
policy=policy,
).alias("equivalent_identifiers_subquery")
identifiers = (
select(
[
Identifier.identifier.label("identifier"),
Identifier.type.label("type"),
]
)
.where(Identifier.id.in_(equivalent_identifiers))
.alias("identifier_subquery")
)
identifiers_json = query_to_json_array(identifiers)
# Map our constants for Subject type to their URIs.
scheme_column = case(
[
(Subject.type == key, literal_column("'%s'" % val))
for key, val in list(Subject.uri_lookup.items())
]
)
# If the Subject has a name, use that, otherwise use the Subject's identifier.
# Also, 3M's classifications have slashes, e.g. "FICTION/Adventure". Make sure
# we get separated words for search.
term_column = func.replace(
case([(Subject.name != None, Subject.name)], else_=Subject.identifier),
"/",
" ",
)
# Normalize by dividing each weight by the sum of the weights for that Identifier's Classifications.
from .classification import Classification
weight_column = (
func.sum(Classification.weight)
/ func.sum(func.sum(Classification.weight)).over()
)
# The subquery for Subjects, with those three columns. The labels will become keys in json objects.
subjects = (
select(
[
scheme_column.label("scheme"),
term_column.label("term"),
weight_column.label("weight"),
],
# Only include Subjects with terms that are useful for search.
and_(Subject.type.in_(Subject.TYPES_FOR_SEARCH), term_column != None),
)
.group_by(scheme_column, term_column)
.where(Classification.identifier_id.in_(equivalent_identifiers))
.select_from(
join(Classification, Subject, Classification.subject_id == Subject.id)
)
.alias("subjects_subquery")
)
subjects_json = query_to_json_array(subjects)
# Subquery for genres.
genres = (
select(
# All Genres have the same scheme - the simplified genre URI.
[
literal_column("'%s'" % Subject.SIMPLIFIED_GENRE).label("scheme"),
Genre.name,
Genre.id.label("term"),
WorkGenre.affinity.label("weight"),
]
)
.where(
WorkGenre.work_id
== literal_column(works_alias.name + "." + works_alias.c.work_id.name)
)
.select_from(join(WorkGenre, Genre, WorkGenre.genre_id == Genre.id))
.alias("genres_subquery")
)
genres_json = query_to_json_array(genres)
target_age = cls.target_age_query(
literal_column(works_alias.name + "." + works_alias.c.work_id.name)
).alias("target_age_subquery")
target_age_json = query_to_json(target_age)
# Now, create a query that brings together everything we need for the final
# search document.
search_data = (
select(
[
works_alias.c.work_id.label("_id"),
works_alias.c.work_id.label("work_id"),
works_alias.c.title,
works_alias.c.sort_title,
works_alias.c.subtitle,
works_alias.c.series,
works_alias.c.series_position,
works_alias.c.language,
works_alias.c.author,
works_alias.c.sort_author,
works_alias.c.medium,
works_alias.c.publisher,
works_alias.c.imprint,
works_alias.c.permanent_work_id,
works_alias.c.presentation_ready,
works_alias.c.last_update_time,
# Convert true/false to "Fiction"/"Nonfiction".
case(
[(works_alias.c.fiction == True, literal_column("'Fiction'"))],
else_=literal_column("'Nonfiction'"),
).label("fiction"),
# Replace "Young Adult" with "YoungAdult" and "Adults Only" with "AdultsOnly".
func.replace(works_alias.c.audience, " ", "").label("audience"),
works_alias.c.summary_text.label("summary"),
works_alias.c.quality,
works_alias.c.rating,
works_alias.c.popularity,
# Here are all the subqueries.
licensepools_json.label("licensepools"),
customlists_json.label("customlists"),
contributors_json.label("contributors"),
identifiers_json.label("identifiers"),
subjects_json.label("classifications"),
genres_json.label("genres"),
target_age_json.label("target_age"),
]
)
.select_from(works_alias)
.alias("search_data_subquery")
)
# Finally, convert everything to json.
search_json = query_to_json(search_data)
result = _db.execute(search_json)
if result:
return [r[0] for r in result]
@classmethod
def target_age_query(self, foreign_work_id_field):
# If the upper limit of the target age is inclusive, we leave
# it alone. Otherwise, we subtract one to make it inclusive.
upper_field = func.upper(Work.target_age)
upper = case(
[(func.upper_inc(Work.target_age), upper_field)], else_=upper_field - 1
).label("upper")
# If the lower limit of the target age is inclusive, we leave
# it alone. Otherwise, we add one to make it inclusive.
lower_field = func.lower(Work.target_age)
lower = case(
[(func.lower_inc(Work.target_age), lower_field)], else_=lower_field + 1
).label("lower")
# Subquery for target age. This has to be a subquery so it can
# become a nested object in the final json.
target_age = select([upper, lower]).where(Work.id == foreign_work_id_field)
return target_age
def to_search_document(self):
"""Generate a search document for this Work."""
return Work.to_search_documents([self])[0]
def mark_licensepools_as_superceded(self):
"""Make sure that all but the single best open-access LicensePool for
this Work are superceded. A non-open-access LicensePool should
never be superceded, and this method will mark them as
un-superceded.
"""
champion_open_access_license_pool = None
for pool in self.license_pools:
if not pool.open_access:
pool.superceded = False
continue
if pool.better_open_access_pool_than(champion_open_access_license_pool):
if champion_open_access_license_pool:
champion_open_access_license_pool.superceded = True
champion_open_access_license_pool = pool
pool.superceded = False
else:
pool.superceded = True
@classmethod
def restrict_to_custom_lists_from_data_source(
cls, _db, base_query, data_source, on_list_as_of=None
):
"""Annotate a query that joins Work against Edition to match only
Works that are on a custom list from the given data source."""
condition = CustomList.data_source == data_source
return cls._restrict_to_customlist_subquery_condition(
_db, base_query, condition, on_list_as_of
)
@classmethod
def restrict_to_custom_lists(
cls, _db, base_query, custom_lists, on_list_as_of=None
):
"""Annotate a query that joins Work against Edition to match only
Works that are on one of the given custom lists."""
condition = CustomList.id.in_([x.id for x in custom_lists])
return cls._restrict_to_customlist_subquery_condition(
_db, base_query, condition, on_list_as_of
)
@classmethod
def _restrict_to_customlist_subquery_condition(
cls, _db, base_query, condition, on_list_as_of=None
):
"""Annotate a query that joins Work against Edition to match only
Works that are on a custom list from the given data source."""
# Find works that are on a list that meets the given condition.
qu = base_query.join(LicensePool.custom_list_entries).join(
CustomListEntry.customlist
)
if on_list_as_of:
qu = qu.filter(CustomListEntry.most_recent_appearance >= on_list_as_of)
qu = qu.filter(condition)
return qu
def classifications_with_genre(self):
from .classification import Classification, Subject
_db = Session.object_session(self)
identifier = self.presentation_edition.primary_identifier
return (
_db.query(Classification)
.join(Subject)
.filter(Classification.identifier_id == identifier.id)
.filter(Subject.genre_id != None)
.order_by(Classification.weight.desc())
)
def top_genre(self):
from .classification import Genre
_db = Session.object_session(self)
genre = (
_db.query(Genre)
.join(WorkGenre)
.filter(WorkGenre.work_id == self.id)
.order_by(WorkGenre.affinity.desc())
.first()
)
return genre.name if genre else None
def delete(self, search_index=None):
"""Delete the work from both the DB and search index."""
_db = Session.object_session(self)
if search_index is None:
try:
from ..external_search import ExternalSearchIndex
search_index = ExternalSearchIndex(_db)
except CannotLoadConfiguration as e:
# No search index is configured. This is fine -- just skip that part.
pass
if search_index is not None:
search_index.remove_work(self)
_db.delete(self)
| 38.935751
| 155
| 0.611623
|
8563627b579399104a3328cb159b18e999af80e9
| 17,854
|
py
|
Python
|
tensorflow/python/keras/engine/training_utils_v1_test.py
|
AdamHillier/tensorflow
|
6780ebf4858a56fd0745f03fa5a61b249559f3cd
|
[
"Apache-2.0"
] | 4
|
2016-07-14T15:15:05.000Z
|
2017-03-02T15:17:22.000Z
|
tensorflow/python/keras/engine/training_utils_v1_test.py
|
AdamHillier/tensorflow
|
6780ebf4858a56fd0745f03fa5a61b249559f3cd
|
[
"Apache-2.0"
] | 1
|
2021-03-23T03:25:15.000Z
|
2021-03-23T03:25:15.000Z
|
tensorflow/python/keras/engine/training_utils_v1_test.py
|
AdamHillier/tensorflow
|
6780ebf4858a56fd0745f03fa5a61b249559f3cd
|
[
"Apache-2.0"
] | 5
|
2016-11-07T21:17:45.000Z
|
2020-05-31T00:16:59.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import multiprocessing.pool
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import keras_tensor
from tensorflow.python.keras.engine import training_utils_v1
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class ModelInputsTest(test.TestCase):
def test_single_thing(self):
a = np.ones(10)
model_inputs = training_utils_v1.ModelInputs(a)
self.assertEqual(['input_1'], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertTrue(tensor_util.is_tf_type(vals))
vals = model_inputs.get_symbolic_inputs(return_single_as_list=True)
self.assertEqual(1, len(vals))
self.assertTrue(tensor_util.is_tf_type(vals[0]))
self.assertEqual(backend.floatx(), vals[0].dtype)
def test_single_thing_eager(self):
if not context.executing_eagerly():
self.skipTest('Run in eager mode only.')
a = np.ones(10, dtype=np.int32)
model_inputs = training_utils_v1.ModelInputs(a)
self.assertEqual(['input_1'], model_inputs.get_input_names())
val = model_inputs.get_symbolic_inputs()
self.assertIsInstance(val, keras_tensor.KerasTensor)
vals = model_inputs.get_symbolic_inputs(return_single_as_list=True)
self.assertEqual(1, len(vals))
self.assertIsInstance(vals[0], keras_tensor.KerasTensor)
self.assertEqual(dtypes.int32, vals[0].dtype)
def test_list(self):
a = [np.ones(10), np.ones(20)]
model_inputs = training_utils_v1.ModelInputs(a)
self.assertEqual(['input_1', 'input_2'], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertTrue(tensor_util.is_tf_type(vals[0]))
self.assertTrue(tensor_util.is_tf_type(vals[1]))
def test_list_eager(self):
if not context.executing_eagerly():
self.skipTest('Run in eager mode only.')
a = [np.ones(10), np.ones(20)]
model_inputs = training_utils_v1.ModelInputs(a)
self.assertEqual(['input_1', 'input_2'], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertIsInstance(vals[0], keras_tensor.KerasTensor)
self.assertIsInstance(vals[1], keras_tensor.KerasTensor)
def test_dict(self):
a = {'b': np.ones(10), 'a': np.ones(20)}
model_inputs = training_utils_v1.ModelInputs(a)
self.assertEqual(['a', 'b'], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertTrue(tensor_util.is_tf_type(vals['a']))
self.assertTrue(tensor_util.is_tf_type(vals['b']))
def test_dict_eager(self):
if not context.executing_eagerly():
self.skipTest('Run in eager mode only.')
a = {'b': np.ones(10), 'a': np.ones(20)}
model_inputs = training_utils_v1.ModelInputs(a)
self.assertEqual(['a', 'b'], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertIsInstance(vals['a'], keras_tensor.KerasTensor)
self.assertIsInstance(vals['b'], keras_tensor.KerasTensor)
class DatasetUtilsTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
# pylint: disable=g-long-lambda
('Batch', lambda: dataset_ops.Dataset.range(5).batch(2)),
('Cache', lambda: dataset_ops.Dataset.range(5).cache()),
('Concatenate', lambda: dataset_ops.Dataset.range(5).concatenate(
dataset_ops.Dataset.range(5))),
('FlatMap', lambda: dataset_ops.Dataset.range(5).flat_map(
lambda _: dataset_ops.Dataset.from_tensors(0))),
('FlatMap_Shuffle', lambda: dataset_ops.Dataset.range(5).flat_map(
lambda _: dataset_ops.Dataset.from_tensors(0).shuffle(1)), True),
('Filter', lambda: dataset_ops.Dataset.range(5).filter(lambda _: True)),
('FixedLengthRecordDatasetV2',
lambda: readers.FixedLengthRecordDatasetV2([], 42)),
('FromTensors', lambda: dataset_ops.Dataset.from_tensors(0)),
('FromTensorSlices',
lambda: dataset_ops.Dataset.from_tensor_slices([0, 0, 0])),
('Interleave', lambda: dataset_ops.Dataset.range(5).interleave(
lambda _: dataset_ops.Dataset.from_tensors(0), cycle_length=1)),
('Interleave_Shuffle', lambda: dataset_ops.Dataset.range(5).interleave(
lambda _: dataset_ops.Dataset.from_tensors(0).shuffle(1),
cycle_length=1), True),
('Map', lambda: dataset_ops.Dataset.range(5).map(lambda x: x)),
('Options',
lambda: dataset_ops.Dataset.range(5).with_options(dataset_ops.Options())
),
('PaddedBatch', lambda: dataset_ops.Dataset.range(5).padded_batch(2, [])),
('ParallelInterleave', lambda: dataset_ops.Dataset.range(5).interleave(
lambda _: dataset_ops.Dataset.from_tensors(0),
cycle_length=1,
num_parallel_calls=1)),
('ParallelMap', lambda: dataset_ops.Dataset.range(5).map(
lambda x: x, num_parallel_calls=1)),
('Prefetch', lambda: dataset_ops.Dataset.range(5).prefetch(1)),
('Range', lambda: dataset_ops.Dataset.range(0)),
('Repeat', lambda: dataset_ops.Dataset.range(0).repeat(0)),
('Shuffle', lambda: dataset_ops.Dataset.range(5).shuffle(1), True),
('Skip', lambda: dataset_ops.Dataset.range(5).skip(2)),
('Take', lambda: dataset_ops.Dataset.range(5).take(2)),
('TextLineDataset', lambda: readers.TextLineDatasetV2([])),
('TFRecordDataset', lambda: readers.TFRecordDatasetV2([])),
('Window', lambda: dataset_ops.Dataset.range(5).window(2)),
('Zip', lambda: dataset_ops.Dataset.zip(dataset_ops.Dataset.range(5))),
# pylint: enable=g-long-lambda
)
def test_verify_dataset_shuffled(self, dataset_fn, expect_shuffled=False):
dataset = dataset_fn()
if not expect_shuffled:
with test.mock.patch.object(logging, 'warning') as mock_log:
shuffled = training_utils_v1.verify_dataset_shuffled(dataset)
self.assertRegex(
str(mock_log.call_args), 'input dataset `x` is not shuffled.')
self.assertFalse(shuffled)
else:
self.assertTrue(training_utils_v1.verify_dataset_shuffled(dataset))
class StandardizeWeightsTest(keras_parameterized.TestCase):
def test_sample_weights(self):
y = np.array([0, 1, 0, 0, 2])
sample_weights = np.array([0.5, 1., 1., 0., 2.])
weights = training_utils_v1.standardize_weights(y, sample_weights)
self.assertAllClose(weights, sample_weights)
def test_class_weights(self):
y = np.array([0, 1, 0, 0, 2])
class_weights = {0: 0.5, 1: 1., 2: 1.5}
weights = training_utils_v1.standardize_weights(
y, class_weight=class_weights)
self.assertAllClose(weights, np.array([0.5, 1., 0.5, 0.5, 1.5]))
def test_sample_weights_and_class_weights(self):
y = np.array([0, 1, 0, 0, 2])
sample_weights = np.array([0.5, 1., 1., 0., 2.])
class_weights = {0: 0.5, 1: 1., 2: 1.5}
weights = training_utils_v1.standardize_weights(y, sample_weights,
class_weights)
expected = sample_weights * np.array([0.5, 1., 0.5, 0.5, 1.5])
self.assertAllClose(weights, expected)
def test_dataset_with_class_weight(self):
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
model.compile('rmsprop', 'mse')
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
class_weight_np = np.array([0.25, 0.25, 0.25, 0.25])
class_weight = dict(enumerate(class_weight_np))
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=1,
class_weight=class_weight)
class MonitoredPool(multiprocessing.pool.ThreadPool):
def __init__(self, *args, **kwargs):
self._apply_counter = 0
self._func_wrapper = None
super(MonitoredPool, self).__init__(*args, **kwargs)
def apply_async(self, func, *args, **kwargs):
self._apply_counter += 1
if self._func_wrapper:
func = self._func_wrapper(func) # pylint: disable=not-callable
return super(MonitoredPool, self).apply_async(func, *args, **kwargs)
def add_sleep(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
time.sleep(1.)
return f(*args, **kwargs)
return wrapped
def cause_error(f):
@functools.wraps(f)
def wrapped(batch_element, batch_start, batch_end, is_finished): # pylint: disable=unused-argument
# Induce a TypeError during assignment.
return f(None, None, None, is_finished)
return wrapped
_TEST_DATA = np.array((
(3, 1, 3, 1, 2, 0, 3, 3, 1, 2),
(0, 1, 2, 1, 3, 0, 0, 1, 3, 0),
(3, 2, 1, 1, 1, 1, 1, 3, 2, 3),
(2, 2, 0, 1, 0, 3, 3, 2, 1, 1),
(3, 0, 3, 3, 3, 2, 1, 0, 0, 1),
(1, 0, 3, 3, 3, 2, 1, 2, 3, 1),))
class AggregationTest(keras_parameterized.TestCase):
def setUp(self):
super(AggregationTest, self).setUp()
self._old_pool = training_utils_v1._COPY_POOL
self._old_threshold = (
training_utils_v1.SliceAggregator._BINARY_SIZE_THRESHOLD)
self._old_timeout = training_utils_v1.SliceAggregator._MAX_COPY_SECONDS
training_utils_v1._COPY_POOL = MonitoredPool(
training_utils_v1._COPY_THREADS)
def tearDown(self):
super(AggregationTest, self).tearDown()
training_utils_v1._COPY_POOL = self._old_pool
training_utils_v1.SliceAggregator._BINARY_SIZE_THRESHOLD = (
self._old_threshold)
training_utils_v1.SliceAggregator._MAX_COPY_SECONDS = self._old_timeout
def _run_with_steps(self):
aggregator = training_utils_v1.OutputsAggregator(use_steps=True)
for i, batch in enumerate(np.array_split(_TEST_DATA, 4)):
if i == 0:
aggregator.create(batch)
aggregator.aggregate(batch)
assert len(aggregator.results) == 1
assert isinstance(aggregator.results[0], training_utils_v1.ConcatAggregator)
aggregator.finalize()
return aggregator.results
def _run_without_steps(self):
aggregator = training_utils_v1.OutputsAggregator(
use_steps=False, num_samples=6)
batch_start = 0
for i, batch in enumerate(np.array_split(_TEST_DATA, 4)):
if i == 0:
aggregator.create(batch)
batch_end = batch_start + batch.shape[0]
aggregator.aggregate(batch, batch_start, batch_end)
batch_start = batch_end
assert len(aggregator.results) == 1
assert isinstance(aggregator.results[0], training_utils_v1.SliceAggregator)
aggregator.finalize()
return aggregator.results
def test_with_steps(self):
self.assertAllEqual(self._run_with_steps(), _TEST_DATA)
def test_without_steps(self):
self.assertAllEqual(self._run_without_steps(), _TEST_DATA)
def test_nested_aggregation(self):
aggregator = training_utils_v1.OutputsAggregator(
use_steps=False, num_samples=6)
batches = np.array_split(_TEST_DATA, 4)
batch_start = 0
for i, batch in enumerate(zip(batches, batches)):
if i == 0:
aggregator.create(batch)
batch_end = batch_start + batch[0].shape[0]
aggregator.aggregate(batch, batch_start, batch_end)
batch_start = batch_end
assert len(aggregator.results) == 2
aggregator.finalize()
self.assertAllEqual(aggregator.results, (_TEST_DATA, _TEST_DATA))
def test_concat_single_batch(self):
aggregator = training_utils_v1.OutputsAggregator(use_steps=True)
data = _TEST_DATA.copy()
aggregator.create(data)
assert len(aggregator.results) == 1
assert isinstance(aggregator.results[0], training_utils_v1.ConcatAggregator)
aggregator.aggregate(data)
aggregator.finalize()
assert aggregator.results is data # No copy.
def test_slice_single_batch(self):
aggregator = training_utils_v1.OutputsAggregator(
use_steps=False, num_samples=6)
data = _TEST_DATA.copy()
aggregator.create(data)
assert len(aggregator.results) == 1
assert isinstance(aggregator.results[0], training_utils_v1.SliceAggregator)
aggregator.aggregate(data, 0, 6)
aggregator.finalize()
assert aggregator.results is data # No copy.
def test_async_copy(self):
training_utils_v1.SliceAggregator._BINARY_SIZE_THRESHOLD = 15
self.assertAllEqual(self._run_without_steps(), _TEST_DATA)
# Two of the four batches will have 20 elements and two will have 10.
self.assertEqual(training_utils_v1._COPY_POOL._apply_counter, 2)
def test_async_copy_timeout(self):
training_utils_v1.SliceAggregator._BINARY_SIZE_THRESHOLD = 15
training_utils_v1.SliceAggregator._MAX_COPY_SECONDS = 0.1
training_utils_v1._COPY_POOL._func_wrapper = add_sleep
with self.assertRaisesRegex(ValueError, 'Timed out waiting for copy'):
self._run_without_steps()
def test_async_copy_reraise(self):
training_utils_v1.SliceAggregator._BINARY_SIZE_THRESHOLD = 15
training_utils_v1.SliceAggregator._MAX_COPY_SECONDS = 1.
training_utils_v1._COPY_POOL._func_wrapper = cause_error
with self.assertRaisesRegex(TypeError, 'NoneType'):
self._run_without_steps()
class CompositeTensorTestUtils(keras_parameterized.TestCase):
def test_is_composite(self):
# Validate that all composite tensor and value types return true.
self.assertTrue(
training_utils_v1.is_composite_or_composite_value(
sparse_tensor.SparseTensor([[0, 0]], [1], [1, 1])))
self.assertTrue(
training_utils_v1.is_composite_or_composite_value(
sparse_tensor.SparseTensorValue([[0, 0]], [1], [1, 1])))
self.assertTrue(
training_utils_v1.is_composite_or_composite_value(
ragged_tensor.RaggedTensor.from_row_splits(
np.array([0, 1, 2]), np.array([0, 1, 3], dtype=np.int64))))
self.assertTrue(
training_utils_v1.is_composite_or_composite_value(
ragged_tensor_value.RaggedTensorValue(
np.array([0, 1, 2]), np.array([0, 1, 3], dtype=np.int64))))
# Test that numpy arrays and tensors return false.
self.assertFalse(
training_utils_v1.is_composite_or_composite_value(np.ndarray([0, 1])))
self.assertFalse(
training_utils_v1.is_composite_or_composite_value(
ops.convert_to_tensor_v2_with_dispatch([3, 1])))
def test_sparse_concatenation(self):
tensor_1 = sparse_tensor.SparseTensor([[0, 0]], [1], [1, 1])
tensor_2 = sparse_tensor.SparseTensor([[0, 0]], [2], [1, 1])
concatenated_tensor = training_utils_v1._append_composite_tensor(
tensor_1, tensor_2)
evaluated_tensor = self.evaluate(concatenated_tensor)
self.assertAllEqual(evaluated_tensor.indices, [[0, 0], [1, 0]])
self.assertAllEqual(evaluated_tensor.values, [1, 2])
self.assertAllEqual(evaluated_tensor.dense_shape, [2, 1])
def test_sparse_value_concatenation(self):
tensor_1 = sparse_tensor.SparseTensorValue([[0, 0]], [1], [1, 1])
tensor_2 = sparse_tensor.SparseTensorValue([[0, 0]], [2], [1, 1])
concatenated_tensor = training_utils_v1._append_composite_tensor(
tensor_1, tensor_2)
self.assertAllEqual(concatenated_tensor.indices, [[0, 0], [1, 0]])
self.assertAllEqual(concatenated_tensor.values, [1, 2])
self.assertAllEqual(concatenated_tensor.dense_shape, [2, 1])
def test_ragged_concatenation(self):
tensor_1 = ragged_tensor.RaggedTensor.from_row_splits(
np.array([0, 1, 2]), np.array([0, 1, 3], dtype=np.int64))
tensor_2 = ragged_tensor.RaggedTensor.from_row_splits(
np.array([3, 4, 5]), np.array([0, 2, 3], dtype=np.int64))
concatenated_tensor = training_utils_v1._append_composite_tensor(
tensor_1, tensor_2)
evaluated_tensor = self.evaluate(concatenated_tensor)
self.assertAllEqual(evaluated_tensor.values, [0, 1, 2, 3, 4, 5])
self.assertAllEqual(evaluated_tensor.row_splits, [0, 1, 3, 5, 6])
def test_ragged_value_concatenation(self):
tensor_1 = ragged_tensor_value.RaggedTensorValue(
np.array([0, 1, 2]), np.array([0, 1, 3], dtype=np.int64))
tensor_2 = ragged_tensor_value.RaggedTensorValue(
np.array([3, 4, 5]), np.array([0, 2, 3], dtype=np.int64))
concatenated_tensor = training_utils_v1._append_composite_tensor(
tensor_1, tensor_2)
self.assertAllEqual(concatenated_tensor.values, [0, 1, 2, 3, 4, 5])
self.assertAllEqual(concatenated_tensor.row_splits, [0, 1, 3, 5, 6])
if __name__ == '__main__':
test.main()
| 40.393665
| 101
| 0.709085
|
98e8f430e72d1ba451cafccf0c8d52322c7394bf
| 2,131
|
py
|
Python
|
cogs/detain.py
|
kevinyang07/dorp-bot
|
f27314dd9f716ee0c0759dda04e2ec37a3266dae
|
[
"MIT"
] | null | null | null |
cogs/detain.py
|
kevinyang07/dorp-bot
|
f27314dd9f716ee0c0759dda04e2ec37a3266dae
|
[
"MIT"
] | null | null | null |
cogs/detain.py
|
kevinyang07/dorp-bot
|
f27314dd9f716ee0c0759dda04e2ec37a3266dae
|
[
"MIT"
] | 1
|
2021-04-27T22:13:58.000Z
|
2021-04-27T22:13:58.000Z
|
import discord
from discord.ext import commands
class detain(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def detain(self, ctx, member: discord.Member = None):
overseer = discord.utils.get(ctx.guild.roles, name = "Overseer")
guard = discord.utils.get(ctx.guild.roles, name = "Guard")
judge = discord.utils.get(ctx.guild.roles, name = "The Judge")
staff = discord.utils.get(ctx.guild.roles, name = "Staff")
if guard in ctx.author.roles or overseer in ctx.author.roles or judge in ctx.author.roles or staff in ctx.author.roles:
if member == None:
await ctx.send("Tell me who to put in jail")
else:
role = discord.utils.get(ctx.guild.roles, name='Dead Meme')
role2 = discord.utils.get(ctx.guild.roles, name='Member')
await member.add_roles(role)
await member.remove_roles(role2)
await ctx.send("The user was successfully put in jail")
@commands.command()
async def undetain(self, ctx, member: discord.Member = None):
overseer = discord.utils.get(ctx.guild.roles, name = "Overseer")
guard = discord.utils.get(ctx.guild.roles, name = "Guard")
judge = discord.utils.get(ctx.guild.roles, name = "The Judge")
staff = discord.utils.get(ctx.guild.roles, name = "Staff")
if guard in ctx.author.roles or overseer in ctx.author.roles or judge in ctx.author.roles or staff in ctx.author.roles:
if member == None:
await ctx.send("Tell me who to take out of jail")
else:
role = discord.utils.get(ctx.guild.roles, name='Dead Meme')
role2 = discord.utils.get(ctx.guild.roles, name='Member')
await member.remove_roles(role)
await member.add_roles(role2)
await ctx.send("The user was successfully taken out of jail")
else:
await ctx.send("ha you suck")
def setup(bot):
bot.add_cog(detain(bot))
| 47.355556
| 128
| 0.600188
|
3749ffd6c637f2a43916cc129f28a74498e100b0
| 11,134
|
py
|
Python
|
src/third_party/wiredtiger/tools/wtstats/wtstats.py
|
stbrody/mongo
|
e6df4d48792498545a9069f08ff78f7840700ef6
|
[
"Apache-2.0"
] | 29
|
2015-01-13T02:34:23.000Z
|
2022-01-30T16:57:10.000Z
|
src/third_party/wiredtiger/tools/wtstats/wtstats.py
|
stbrody/mongo
|
e6df4d48792498545a9069f08ff78f7840700ef6
|
[
"Apache-2.0"
] | 1
|
2015-05-29T16:12:10.000Z
|
2015-05-29T16:12:10.000Z
|
src/third_party/wiredtiger/tools/wtstats/wtstats.py
|
stbrody/mongo
|
e6df4d48792498545a9069f08ff78f7840700ef6
|
[
"Apache-2.0"
] | 12
|
2015-01-24T08:40:28.000Z
|
2017-10-04T17:23:39.000Z
|
#!/usr/bin/env python
#
# Public Domain 2014-2015 MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
import os, re, sys
from collections import defaultdict
from glob import glob
import json
from datetime import datetime
try:
from stat_data \
import groups, no_scale_per_second_list, no_clear_list, prefix_list
except ImportError:
print >>sys.stderr, "Could not import stat_data.py, it should be" \
"in the same directory as %s" % sys.argv[0]
sys.exit(-1)
thisyear = datetime.today().year
def parsetime(s):
return datetime.strptime(s, "%b %d %H:%M:%S").replace(year=thisyear)
if sys.version_info<(2,7,0):
print >>sys.stderr, "You need python 2.7 or later to run this script"
sys.exit(-1)
# Plot a set of entries for a title.
def munge(args, title, values):
t0, v0 = values[0]
start_time = parsetime(t0)
ylabel = ' '.join(title.split(' ')).lower()
if title.split(' ')[1] != 'spinlock' and \
title.split(' ', 1)[1] in no_scale_per_second_list:
seconds = 1
elif 'wtperf' in title and 'per second' not in title:
seconds = 1
else:
t1, v1 = values[1]
seconds = (parsetime(t1) - start_time).seconds
if not ylabel.endswith('per second'):
ylabel += ' per second'
if seconds == 0:
seconds = 1
stats_cleared = False
if args.clear or title.split(' ', 1)[1] in no_clear_list or 'wtperf' in title:
stats_cleared = True
# Split the values into a dictionary of y-axis values keyed by the x axis
ydata = {}
last_value = 0.0
for t, v in sorted(values):
float_v = float(v)
if not stats_cleared:
float_v = float_v - last_value
# Sometimes WiredTiger stats go backwards without clear, assume
# that means nothing happened
if float_v < 0:
float_v = 0.0
last_value = float(v)
ydata[t] = float_v / seconds
return ylabel, ydata
# Parse the command line
import argparse
def common_prefix(a, b):
""" compute longest common prefix of a and b """
while not b.startswith(a):
a = a[:-1]
return a
def common_suffix(a, b):
""" compute longest common suffix of a and b """
while not a.endswith(b):
b = b[1:]
return b
def parse_wtstats_file(file, result):
""" parse wtstats file, one stat per line, example format:
Dec 05 14:43:14 0 /data/b block-manager: mapped bytes read
"""
print 'Processing wtstats file: ' + file
# Parse file
for line in open(file, 'rU'):
month, day, time, v, title = line.strip('\n').split(" ", 4)
result[title].append((month + " " + day + " " + time, v))
def parse_wtperf_file(file, result):
""" parse wtperf file, all stats on single line, example format:
Feb 13 17:55:14,0,0,156871,0,N,0,0,0,49,6,6146,0,0,0
"""
print 'Processing wtperf file: ' + file
fh = open(file, 'rU')
# first line contains headings, replace microseconds with milliseconds
headings = fh.next().strip('\n').split(',')[1:]
headings = map(lambda h: h.replace('(uS)', ' (ms)'), headings)
# parse rest of file
for line in fh:
month, day, time, values = re.split(r'[ ,]', line.strip('\n'), 3)
values = values.split(',')
for i, v in enumerate(values):
if v == 'N':
v = 0
# convert us to ms
if '(ms)' in headings[i]:
v = float(v) / 1000.0
result['wtperf: ' + headings[i]].append((month + " " + day + " " + time, v))
def skip_constants(result):
# Process the series, eliminate constants, delete totalsec for wtperf
items = list(result.iteritems())
for title, values in items:
skip = True
t0, v0 = values[0]
for t, v in values:
if v != v0:
skip = False
break
if title == 'wtperf: totalsec':
skip = True
if skip:
del result[title]
return result
def parse_files(files_or_dir):
""" walk through file list or directory and parse according to file type (wtstats / wtperf). """
result = defaultdict(list)
for f in files_or_dir:
if os.path.isfile(f):
# peek at first line to determine type
with open(f, 'rU') as fh:
line = fh.readline()
if line.startswith('#time'):
parse_wtperf_file(f, result)
else:
parse_wtstats_file(f, result)
elif os.path.isdir(f):
for s in glob(os.path.join(f, 'WiredTigerStat*')):
parse_wtstats_file(s, result)
for s in glob(os.path.join(f, 'monitor*')):
parse_wtperf_file(s, result)
return result
def output_series(results, args, prefix=None, grouplist=[]):
""" Write the data into the html template """
# add .html ending if not present
filename, ext = os.path.splitext(args.output)
if ext == '':
ext = '.html'
# open the output file based on prefix
if prefix == None:
outputname = filename + ext
elif len(grouplist) == 0:
outputname = filename +'.' + prefix + ext
else:
outputname = filename +'.group.' + prefix + ext
if prefix != None and len(grouplist) == 0:
this_series = []
for title, ydata in results:
if not prefix in title:
continue
#print 'Appending to dataset: ' + title
this_series.append((title, ydata))
elif prefix != None and len(grouplist) > 0:
this_series = []
for title, ydata in results:
for subgroup in grouplist:
if not subgroup in title:
continue
# print 'Appending to dataset: ' + title
this_series.append((title, ydata))
else:
this_series = results
if len(this_series) == 0:
print 'Output: ' + outputname + ' has no data. Do not create.'
return
json_output = { "series": [] }
for title, ydata in this_series:
json_output["series"].append({
"key": title,
"values": ydata,
});
# load template
this_path = os.path.dirname(os.path.realpath(__file__))
srcfile = os.path.join(this_path, 'wtstats.html.template')
try:
srcfile = open(srcfile)
contents = srcfile.read()
except IOError:
print >>sys.stderr, "Cannot find template file 'wtstats.html." \
"template'. See ./template/README.md for more information."
sys.exit(-1)
srcfile.close()
# if --json write data to <filename>.json
if args.json:
jsonfile = filename + '.json'
with open(jsonfile, 'w') as f:
json.dump(json_output, f)
print "created %s" % jsonfile
# write output file
dstfile = open(outputname, 'wt')
replaced_contents = contents.replace('"### INSERT DATA HERE ###"',
json.dumps(json_output))
dstfile.write(replaced_contents)
dstfile.close()
print "created %s" % dstfile.name
def main():
parser = argparse.ArgumentParser(description='Create graphs from' \
'WiredTiger statistics.')
parser.add_argument('--all', '-A', action='store_true',
help='generate separate html files for each stats group')
parser.add_argument('--clear', action='store_true',
help='WiredTiger stats gathered with clear set')
parser.add_argument('--include', '-I', metavar='regexp',
type=re.compile, action='append',
help='only include series with titles matching regexp')
parser.add_argument('--list', action='store_true',
help='only list the parsed series, does not create html file')
parser.add_argument('--output', '-o', metavar='file', default='wtstats',
help='HTML output file prefix')
parser.add_argument('--json', action='store_true',
help='additionally output data series in json format')
parser.add_argument('files', metavar='file', nargs='+',
help='input files or directories generated by WiredTiger statistics' \
'logging')
args = parser.parse_args()
# Parse files or directory and skip constants
parsed = skip_constants(parse_files(args.files))
# filter results based on --include, compute common prefix and suffix
results = []
prefix = suffix = None
for title, values in sorted(parsed.iteritems()):
title, ydata = munge(args, title, values)
# ignore entries if a list of regular expressions was given
if args.include and not [r for r in args.include if r.search(title)]:
continue
if not 'wtperf' in title:
prefix = title if prefix is None else common_prefix(prefix, title)
suffix = title if suffix is None else common_suffix(title, suffix)
results.append((title, ydata))
# Process titles, eliminate common prefixes and suffixes
if prefix or suffix:
new_results = []
for title, ydata in results:
if 'wtperf' not in title:
title = title[len(prefix):]
if suffix:
title = title[:-len(suffix)]
new_results.append((title, ydata))
results = new_results
# Are we just listing the results?
if args.list:
print
print "Parsed stats:"
for title, ydata in results:
print " ", title
sys.exit(0)
output_series(results, args)
# If the user wants the stats split up by prefix type do so.
if args.all:
for prefix in prefix_list:
output_series(results, args, prefix)
for group in groups.keys():
output_series(results, args, group, groups[group])
if __name__ == '__main__':
main()
| 32.555556
| 100
| 0.607149
|
842ee231e0090bb8607b966af8eea2809ba70368
| 1,221
|
py
|
Python
|
qa/management/commands/index_data.py
|
gileadslostson/es-django-example
|
65c752f2a3a5deeae38de516790977cdbe652a77
|
[
"Apache-2.0"
] | 181
|
2015-06-12T06:16:34.000Z
|
2022-02-04T18:31:49.000Z
|
qa/management/commands/index_data.py
|
gileadslostson/es-django-example
|
65c752f2a3a5deeae38de516790977cdbe652a77
|
[
"Apache-2.0"
] | 18
|
2015-07-22T14:40:13.000Z
|
2018-05-15T19:20:11.000Z
|
qa/management/commands/index_data.py
|
gileadslostson/es-django-example
|
65c752f2a3a5deeae38de516790977cdbe652a77
|
[
"Apache-2.0"
] | 53
|
2015-07-09T04:30:11.000Z
|
2022-02-04T18:31:53.000Z
|
from __future__ import print_function
import sys
import time
from django.core.management.base import BaseCommand
from django.conf import settings
from elasticsearch_dsl.connections import connections
from elasticsearch.helpers import streaming_bulk
from qa.models import Question, Answer
from qa.search import index
class Command(BaseCommand):
def handle(self, *args, **kwargs):
self.es = connections.get_connection()
index.delete(ignore=404)
index.create()
self.verbose_run(Question)
self.verbose_run(Answer)
def verbose_run(self, model, report_every=100):
name = model._meta.verbose_name
print('Indexing %s: ' % name, end='')
start = time.time()
cnt = 0
for _ in streaming_bulk(
self.es,
(m.to_search().to_dict(True) for m in model.objects.all().iterator()),
index=settings.ES_INDEX,
doc_type=name.lower(),
):
cnt += 1
if cnt % report_every:
print('.', end='')
sys.stdout.flush()
print('DONE\nIndexing %d %s in %.2f seconds'% (
cnt, name, time.time() - start
))
| 28.395349
| 86
| 0.602785
|
e3fc45bfd032cc8a1b7552e8cf48224705ce090e
| 6,827
|
py
|
Python
|
verilog/linked_list/refinement-proof/gen_prop.py
|
makaimann/Stanford-FAST
|
69034897209d962b561de0ca5075bf3b9eac9fb4
|
[
"BSD-3-Clause"
] | 2
|
2019-04-19T21:58:52.000Z
|
2021-02-02T14:01:46.000Z
|
verilog/linked_list/refinement-proof/gen_prop.py
|
makaimann/Stanford-FAST
|
69034897209d962b561de0ca5075bf3b9eac9fb4
|
[
"BSD-3-Clause"
] | null | null | null |
verilog/linked_list/refinement-proof/gen_prop.py
|
makaimann/Stanford-FAST
|
69034897209d962b561de0ca5075bf3b9eac9fb4
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# PARAMETERS
# DEPTH
# NUM_FIFOS
import argparse
import math
counter = "count[{sel}_{sel_width}]"
empty = "empty[{sel}:{sel}]"
val = "{val}_{width}"
extract = "[{lower}:{upper}]"
head = "shared_fifo.ll.head[{sel}_{width}]"
tail = "'shared_fifo.ll.tail[{sel}]'"
PROBLEM="""
[{name}]
description: "AUTOGENERATED"
properties: {formula}
precondition: reset_done
verification: safety
prove: True
"""
def get_full_tag(fifo, elem_num, sel_width, ptr_width):
num_bits = sel_width+ptr_width+1
fifo_shifted = fifo*(2**ptr_width)
res = fifo_shifted+elem_num
return val.format(val=res, width=num_bits)
def get_fifo_tag_extract(sel_width, ptr_width):
return extract.format(upper=(sel_width+ptr_width), lower=ptr_width)
def get_elem_tag_extract(sel_width, ptr_width):
return extract.format(upper=ptr_width-1, lower=0)
def ptr_neq_tail(ptr, num_fifos, ptr_width):
ptr_val = val.format(val=ptr, width=ptr_width)
assertions = ["(({} != shared_fifo.ll.free_list_tail) | (full = 1_1))".format(ptr_val)]
for i in range(num_fifos):
empty_sel = empty.format(sel=i)
empty_bool = "({} = 1_1)".format(empty_sel)
tail_sel = tail.format(sel=i)
ptr_noteq_tail = "({} != {})".format(ptr_val, tail_sel)
assertions.append("({} | {})".format(ptr_noteq_tail, empty_bool))
return " & ".join(assertions)
def counts_consistent_depth(depth, num_fifos, ptr_width, sel_width, addr_width):
count_sum = "free_list_count"
for i in range(num_fifos):
count_sum += " + " + counter.format(sel=i, sel_width=sel_width)
return "({}) = depth".format(count_sum)
def counts_consistent_total(depth, num_fifos, ptr_width, sel_width, addr_width):
count_sum = counter.format(sel=0, sel_width=sel_width)
for i in range(1, num_fifos):
count_sum += " + " + counter.format(sel=i, sel_width=sel_width)
return "({}) = shared_fifo.ll.total_count".format(count_sum)
def counters_lte_depth(depth, num_fifos, ptr_width, sel_width, addr_width):
lte_depth = ["(free_list_count <= depth)"]
for i in range(num_fifos):
lte_depth.append("({} <= depth)".format(counter.format(sel=i, sel_width=sel_width)))
return " & ".join(lte_depth)
def heads_map_to_zero(depth, num_fifos, ptr_width, sel_width, addr_width):
free_list_tag = get_full_tag(num_fifos, 0, sel_width, ptr_width)
mappings = ["((full = 0_1) -> (ghost[shared_fifo.ll.free_list_head] = {}))".format(free_list_tag)]
for i in range(num_fifos):
# TODO: Finish this
empty_sel = empty.format(sel=i)
tag = get_full_tag(i, 0, sel_width, ptr_width)
head_sel = head.format(sel=i, width=sel_width)
ghost_head = "ghost[{}]".format(head_sel)
mappings.append("(({} = 0_1) -> ({} = {}))".format(empty_sel, ghost_head, tag))
return " & ".join(mappings)
def tails_map_to_count(depth, num_fifos, ptr_width, sel_width, addr_width):
fifo_extract = get_fifo_tag_extract(sel_width, ptr_width)
elem_extract = get_elem_tag_extract(sel_width, ptr_width)
mappings = ["((full = 0_1) -> ((ghost[shared_fifo.ll.free_list_tail]{} = {})"
" & ((ghost[shared_fifo.ll.free_list_tail]{} + {}) "
"= free_list_count{})))".format(
fifo_extract,
val.format(val=num_fifos, width=sel_width+1),
elem_extract,
val.format(val=1, width=ptr_width),
extract.format(upper=ptr_width-1, lower=0))]
for i in range(num_fifos):
empty_sel = empty.format(sel=i)
empty_bool = "({} = 0_1)".format(empty_sel)
ghost_tail = "ghost[{}]".format(tail.format(sel=i))
ghost_fifo_extract = "{}{}".format(ghost_tail, get_fifo_tag_extract(sel_width, ptr_width))
ghost_elem_extract = "{}{}".format(ghost_tail, get_elem_tag_extract(sel_width, ptr_width))
ghost_fifo_matches = "({} = {})".format(ghost_fifo_extract,
val.format(val=i, width=sel_width+1))
count_sel = "{}{}".format(counter.format(sel=i, sel_width=sel_width), elem_extract)
ghost_elem_matches = "(({} + {}) = {})".format(ghost_elem_extract,
val.format(val=1, width=ptr_width),
count_sel
)
mappings.append("({} -> ({} & {}))".format(empty_bool, ghost_fifo_matches, ghost_elem_matches))
return " & ".join(mappings)
def next_ptr(depth, num_fifos, ptr_width, sel_width, addr_width):
ptr_assertions = []
fifo_extract = get_fifo_tag_extract(sel_width, ptr_width)
elem_extract = get_elem_tag_extract(sel_width, ptr_width)
for i in range(depth):
p = ptr_neq_tail(i, num_fifos, ptr_width)
ptr_val = val.format(val=i, width=ptr_width)
fifo_match = "(ghost[shared_fifo.ll.next_ptr[{0}]]{1} = ghost[{0}]{1})".format(ptr_val, fifo_extract)
elem_inc = "(ghost[shared_fifo.ll.next_ptr[{0}]]{1} = (ghost[{0}]{1} + {2}))".format(ptr_val, elem_extract, val.format(val=1, width=ptr_width))
ptr_assertions.append("(({}) -> ({} & {}))".format(p, fifo_match, elem_inc))
return " & ".join(ptr_assertions)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate the shared fifo refinement property for the provided parameters")
parser.add_argument('--depth', '-d', help='The number of available memory cells (power of 2)', required=True, type=int)
parser.add_argument('--num-fifos', '-n', help='The number of FIFOs sharing the same memory', required=True, type=int)
parser.add_argument('--problems', help='Print individual problems', action="store_true")
args = parser.parse_args()
depth = args.depth
num_fifos = args.num_fifos
assert (2**(depth.bit_length()-1)) == depth, "Expecting a power of two"
ptr_width = math.ceil(math.log2(depth))
sel_width = math.ceil(math.log2(num_fifos))
addr_width = math.ceil(math.log2(num_fifos+1))
problems = {
'counts_consistent_depth': counts_consistent_depth,
'counts_consistent_total': counts_consistent_total,
'counters_lte_depth': counters_lte_depth,
'heads_map_to_zero': heads_map_to_zero,
'tails_map_to_count': tails_map_to_count,
'next_ptr': next_ptr
}
if args.problems:
for name, fun in problems.items():
print(PROBLEM.format(name=name, formula=fun(depth, num_fifos, ptr_width, sel_width, addr_width)))
else:
assertions = []
for name, fun in problems.items():
assertions.append("({})".format(fun(depth, num_fifos, ptr_width, sel_width, addr_width)))
print(" & ".join(assertions))
| 44.620915
| 151
| 0.642449
|
a7adfbd748df0efbfb18e1ed8d5159349e092592
| 1,200
|
py
|
Python
|
Apps/phrsasecurityanalytics/rsasa_consts.py
|
nate-axonius/phantom-apps
|
15bb5866b5fa34418b5286a8d0a11f2dcb98e53f
|
[
"Apache-2.0"
] | 74
|
2019-10-22T02:00:53.000Z
|
2022-03-15T12:56:13.000Z
|
Apps/phrsasecurityanalytics/rsasa_consts.py
|
nate-axonius/phantom-apps
|
15bb5866b5fa34418b5286a8d0a11f2dcb98e53f
|
[
"Apache-2.0"
] | 375
|
2019-10-22T20:53:50.000Z
|
2021-11-09T21:28:43.000Z
|
Apps/phrsasecurityanalytics/rsasa_consts.py
|
nate-axonius/phantom-apps
|
15bb5866b5fa34418b5286a8d0a11f2dcb98e53f
|
[
"Apache-2.0"
] | 175
|
2019-10-23T15:30:42.000Z
|
2021-11-05T21:33:31.000Z
|
# --
# File: rsasa_consts.py
#
# Copyright (c) 2017-2021 Splunk Inc.
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
#
# --
RSASA_JSON_URL = "url"
RSASA_JSON_USERNAME = "username"
RSASA_JSON_PASSWORD = "password"
RSASA_JSON_LAST_DATE_TIME = "last_date_time"
RSASA_JSON_INCIDENT_MANAGER = "incident_manager"
RSASA_JSON_EXTRACT_COMPONENTS = "extract_components"
RSASA_JSON_POLL_NOW_DAYS = "poll_now_ingestion_span"
RSASA_JSON_VERIFY_SERVER_CERT = "verify_server_cert"
RSASA_JSON_SCHEDULED_POLL_DAYS = "first_scheduled_ingestion_span"
RSASA_ERR_SERVER_CONNECTION = "Connection failed"
RSASA_ERR_NO_DEVICES = "Found no devices on RSA SA"
RSASA_ERR_TEST_CONNECTIVITY = "Test connectivity failed"
RSASA_ERR_NO_ID = "Could not find specified device ID/name"
RSASA_ERR_JSON_PARSE = "Unable to parse reply, raw string reply: '{raw_text}'"
RSASA_REST_CALL_FAIL = "Call to server failed with error code: {0}, message: {1}"
RSASA_DEFAULT_PAGE_SIZE = 100
RSASA_DEFAULT_ALERT_LIMIT = 100
RSASA_DEFAULT_EVENT_LIMIT = 100
RSASA_DEFAULT_CONTAINER_COUNT = 100
RSASA_DEFAULT_INCIDENT_LIMIT = 1000
RSASA_DEFAULT_START_TIME = 100000000000
RSASA_NO_INCIDENTS = "No incidents to ingest"
| 34.285714
| 81
| 0.814167
|
570cbaa09a97526eaf07bc5c2d697d91a7c8d621
| 146
|
py
|
Python
|
cloudscale/lib/region.py
|
resmo/python-cloudscale
|
e194e3f74c4df549e59781861d4a0a1e1abf62fc
|
[
"MIT"
] | 6
|
2019-11-21T15:08:58.000Z
|
2019-12-18T07:46:01.000Z
|
cloudscale/lib/region.py
|
resmo/python-cloudscale
|
e194e3f74c4df549e59781861d4a0a1e1abf62fc
|
[
"MIT"
] | 15
|
2019-11-26T19:48:12.000Z
|
2020-05-01T14:52:07.000Z
|
cloudscale/lib/region.py
|
resmo/python-cloudscale
|
e194e3f74c4df549e59781861d4a0a1e1abf62fc
|
[
"MIT"
] | null | null | null |
from . import CloudscaleBase
class Region(CloudscaleBase):
def __init__(self):
super().__init__()
self.resource = 'regions'
| 18.25
| 33
| 0.657534
|
8a0fbbb5da575d61cf6f3c547fe09bc483da89a1
| 34,240
|
py
|
Python
|
stable_baselines3/common/policies.py
|
wmmc88/stable-baselines3
|
97b81f9e9ee2e5ba7eb37328bbd21f8eade44e72
|
[
"MIT"
] | null | null | null |
stable_baselines3/common/policies.py
|
wmmc88/stable-baselines3
|
97b81f9e9ee2e5ba7eb37328bbd21f8eade44e72
|
[
"MIT"
] | null | null | null |
stable_baselines3/common/policies.py
|
wmmc88/stable-baselines3
|
97b81f9e9ee2e5ba7eb37328bbd21f8eade44e72
|
[
"MIT"
] | null | null | null |
"""Policies: abstract base class and concrete implementations."""
import collections
from abc import ABC, abstractmethod
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
import gym
import numpy as np
import torch as th
from torch import nn
from stable_baselines3.common.distributions import (
BernoulliDistribution,
CategoricalDistribution,
DiagGaussianDistribution,
Distribution,
MultiCategoricalDistribution,
StateDependentNoiseDistribution,
make_proba_distribution,
)
from stable_baselines3.common.preprocessing import get_action_dim, is_image_space, preprocess_obs
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor, MlpExtractor, NatureCNN, create_mlp
from stable_baselines3.common.utils import get_device, is_vectorized_observation
from stable_baselines3.common.vec_env import VecTransposeImage
class BaseModel(nn.Module, ABC):
"""
The base model object: makes predictions in response to observations.
In the case of policies, the prediction is an action. In the case of critics, it is the
estimated value of the observation.
:param observation_space: The observation space of the environment
:param action_space: The action space of the environment
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the feature extractor.
:param features_extractor: Network to extract features
(a CNN when using images, a nn.Flatten() layer otherwise)
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
features_extractor: Optional[nn.Module] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super(BaseModel, self).__init__()
if optimizer_kwargs is None:
optimizer_kwargs = {}
if features_extractor_kwargs is None:
features_extractor_kwargs = {}
self.observation_space = observation_space
self.action_space = action_space
self.features_extractor = features_extractor
self.normalize_images = normalize_images
self.optimizer_class = optimizer_class
self.optimizer_kwargs = optimizer_kwargs
self.optimizer = None # type: Optional[th.optim.Optimizer]
self.features_extractor_class = features_extractor_class
self.features_extractor_kwargs = features_extractor_kwargs
@abstractmethod
def forward(self, *args, **kwargs):
del args, kwargs
def extract_features(self, obs: th.Tensor) -> th.Tensor:
"""
Preprocess the observation if needed and extract features.
:param obs:
:return:
"""
assert self.features_extractor is not None, "No feature extractor was set"
preprocessed_obs = preprocess_obs(obs, self.observation_space, normalize_images=self.normalize_images)
return self.features_extractor(preprocessed_obs)
def _get_data(self) -> Dict[str, Any]:
"""
Get data that need to be saved in order to re-create the model.
This corresponds to the arguments of the constructor.
:return:
"""
return dict(
observation_space=self.observation_space,
action_space=self.action_space,
# Passed to the constructor by child class
# squash_output=self.squash_output,
# features_extractor=self.features_extractor
normalize_images=self.normalize_images,
)
@property
def device(self) -> th.device:
"""Infer which device this policy lives on by inspecting its parameters.
If it has no parameters, the 'auto' device is used as a fallback.
:return:"""
for param in self.parameters():
return param.device
return get_device("auto")
def save(self, path: str) -> None:
"""
Save model to a given location.
:param path:
"""
th.save({"state_dict": self.state_dict(), "data": self._get_data()}, path)
@classmethod
def load(cls, path: str, device: Union[th.device, str] = "auto") -> "BaseModel":
"""
Load model from path.
:param path:
:param device: Device on which the policy should be loaded.
:return:
"""
device = get_device(device)
saved_variables = th.load(path, map_location=device)
# Create policy object
model = cls(**saved_variables["data"]) # pytype: disable=not-instantiable
# Load weights
model.load_state_dict(saved_variables["state_dict"])
model.to(device)
return model
def load_from_vector(self, vector: np.ndarray) -> None:
"""
Load parameters from a 1D vector.
:param vector:
"""
th.nn.utils.vector_to_parameters(th.FloatTensor(vector).to(self.device), self.parameters())
def parameters_to_vector(self) -> np.ndarray:
"""
Convert the parameters to a 1D vector.
:return:
"""
return th.nn.utils.parameters_to_vector(self.parameters()).detach().cpu().numpy()
class BasePolicy(BaseModel):
"""The base policy object.
Parameters are mostly the same as `BaseModel`; additions are documented below.
:param args: positional arguments passed through to `BaseModel`.
:param kwargs: keyword arguments passed through to `BaseModel`.
:param squash_output: For continuous actions, whether the output is squashed
or not using a ``tanh()`` function.
"""
def __init__(self, *args, squash_output: bool = False, **kwargs):
super(BasePolicy, self).__init__(*args, **kwargs)
self._squash_output = squash_output
@staticmethod
def _dummy_schedule(progress_remaining: float) -> float:
""" (float) Useful for pickling policy."""
del progress_remaining
return 0.0
@property
def squash_output(self) -> bool:
"""(bool) Getter for squash_output."""
return self._squash_output
@staticmethod
def init_weights(module: nn.Module, gain: float = 1) -> None:
"""
Orthogonal initialization (used in PPO and A2C)
"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
nn.init.orthogonal_(module.weight, gain=gain)
if module.bias is not None:
module.bias.data.fill_(0.0)
@abstractmethod
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
"""
Get the action according to the policy for a given observation.
By default provides a dummy implementation -- not all BasePolicy classes
implement this, e.g. if they are a Critic in an Actor-Critic method.
:param observation:
:param deterministic: Whether to use stochastic or deterministic actions
:return: Taken action according to the policy
"""
def predict(
self,
observation: np.ndarray,
state: Optional[np.ndarray] = None,
mask: Optional[np.ndarray] = None,
deterministic: bool = False,
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""
Get the policy action and state from an observation (and optional state).
Includes sugar-coating to handle different observations (e.g. normalizing images).
:param observation: the input observation
:param state: The last states (can be None, used in recurrent policies)
:param mask: The last masks (can be None, used in recurrent policies)
:param deterministic: Whether or not to return deterministic actions.
:return: the model's action and the next state
(used in recurrent policies)
"""
# TODO (GH/1): add support for RNN policies
# if state is None:
# state = self.initial_state
# if mask is None:
# mask = [False for _ in range(self.n_envs)]
observation = np.array(observation)
# Handle the different cases for images
# as PyTorch use channel first format
if is_image_space(self.observation_space):
if not (
observation.shape == self.observation_space.shape or observation.shape[1:] == self.observation_space.shape
):
# Try to re-order the channels
transpose_obs = VecTransposeImage.transpose_image(observation)
if (
transpose_obs.shape == self.observation_space.shape
or transpose_obs.shape[1:] == self.observation_space.shape
):
observation = transpose_obs
vectorized_env = is_vectorized_observation(observation, self.observation_space)
observation = observation.reshape((-1,) + self.observation_space.shape)
observation = th.as_tensor(observation).to(self.device)
with th.no_grad():
actions = self._predict(observation, deterministic=deterministic)
# Convert to numpy
actions = actions.cpu().numpy()
if isinstance(self.action_space, gym.spaces.Box):
if self.squash_output:
# Rescale to proper domain when using squashing
actions = self.unscale_action(actions)
else:
# Actions could be on arbitrary scale, so clip the actions to avoid
# out of bound error (e.g. if sampling from a Gaussian distribution)
actions = np.clip(actions, self.action_space.low, self.action_space.high)
if not vectorized_env:
if state is not None:
raise ValueError("Error: The environment must be vectorized when using recurrent policies.")
actions = actions[0]
return actions, state
def scale_action(self, action: np.ndarray) -> np.ndarray:
"""
Rescale the action from [low, high] to [-1, 1]
(no need for symmetric action space)
:param action: Action to scale
:return: Scaled action
"""
low, high = self.action_space.low, self.action_space.high
return 2.0 * ((action - low) / (high - low)) - 1.0
def unscale_action(self, scaled_action: np.ndarray) -> np.ndarray:
"""
Rescale the action from [-1, 1] to [low, high]
(no need for symmetric action space)
:param scaled_action: Action to un-scale
"""
low, high = self.action_space.low, self.action_space.high
return low + (0.5 * (scaled_action + 1.0) * (high - low))
class ActorCriticPolicy(BasePolicy):
"""
Policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the feature extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Callable[[float], float],
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
if optimizer_kwargs is None:
optimizer_kwargs = {}
# Small values to avoid NaN in Adam optimizer
if optimizer_class == th.optim.Adam:
optimizer_kwargs["eps"] = 1e-5
super(ActorCriticPolicy, self).__init__(
observation_space,
action_space,
features_extractor_class,
features_extractor_kwargs,
optimizer_class=optimizer_class,
optimizer_kwargs=optimizer_kwargs,
squash_output=squash_output,
)
# Default network architecture, from stable-baselines
if net_arch is None:
if features_extractor_class == FlattenExtractor:
net_arch = [dict(pi=[64, 64], vf=[64, 64])]
else:
net_arch = []
self.net_arch = net_arch
self.activation_fn = activation_fn
self.ortho_init = ortho_init
self.features_extractor = features_extractor_class(self.observation_space, **self.features_extractor_kwargs)
self.features_dim = self.features_extractor.features_dim
self.normalize_images = normalize_images
self.log_std_init = log_std_init
dist_kwargs = None
# Keyword arguments for gSDE distribution
if use_sde:
dist_kwargs = {
"full_std": full_std,
"squash_output": squash_output,
"use_expln": use_expln,
"learn_features": sde_net_arch is not None,
}
self.sde_features_extractor = None
self.sde_net_arch = sde_net_arch
self.use_sde = use_sde
self.dist_kwargs = dist_kwargs
# Action distribution
self.action_dist = make_proba_distribution(action_space, use_sde=use_sde, dist_kwargs=dist_kwargs)
self._build(lr_schedule)
def _get_data(self) -> Dict[str, Any]:
data = super()._get_data()
default_none_kwargs = self.dist_kwargs or collections.defaultdict(lambda: None)
data.update(
dict(
net_arch=self.net_arch,
activation_fn=self.activation_fn,
use_sde=self.use_sde,
log_std_init=self.log_std_init,
squash_output=default_none_kwargs["squash_output"],
full_std=default_none_kwargs["full_std"],
sde_net_arch=default_none_kwargs["sde_net_arch"],
use_expln=default_none_kwargs["use_expln"],
lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone
ortho_init=self.ortho_init,
optimizer_class=self.optimizer_class,
optimizer_kwargs=self.optimizer_kwargs,
features_extractor_class=self.features_extractor_class,
features_extractor_kwargs=self.features_extractor_kwargs,
)
)
return data
def reset_noise(self, n_envs: int = 1) -> None:
"""
Sample new weights for the exploration matrix.
:param n_envs:
"""
assert isinstance(self.action_dist, StateDependentNoiseDistribution), "reset_noise() is only available when using gSDE"
self.action_dist.sample_weights(self.log_std, batch_size=n_envs)
def _build_mlp_extractor(self) -> None:
"""
Create the policy and value networks.
Part of the layers can be shared.
"""
# Note: If net_arch is None and some features extractor is used,
# net_arch here is an empty list and mlp_extractor does not
# really contain any layers (acts like an identity module).
self.mlp_extractor = MlpExtractor(self.features_dim, net_arch=self.net_arch, activation_fn=self.activation_fn)
def _build(self, lr_schedule: Callable[[float], float]) -> None:
"""
Create the networks and the optimizer.
:param lr_schedule: Learning rate schedule
lr_schedule(1) is the initial learning rate
"""
self._build_mlp_extractor()
latent_dim_pi = self.mlp_extractor.latent_dim_pi
# Separate feature extractor for gSDE
if self.sde_net_arch is not None:
self.sde_features_extractor, latent_sde_dim = create_sde_features_extractor(
self.features_dim, self.sde_net_arch, self.activation_fn
)
if isinstance(self.action_dist, DiagGaussianDistribution):
self.action_net, self.log_std = self.action_dist.proba_distribution_net(
latent_dim=latent_dim_pi, log_std_init=self.log_std_init
)
elif isinstance(self.action_dist, StateDependentNoiseDistribution):
latent_sde_dim = latent_dim_pi if self.sde_net_arch is None else latent_sde_dim
self.action_net, self.log_std = self.action_dist.proba_distribution_net(
latent_dim=latent_dim_pi, latent_sde_dim=latent_sde_dim, log_std_init=self.log_std_init
)
elif isinstance(self.action_dist, CategoricalDistribution):
self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)
elif isinstance(self.action_dist, MultiCategoricalDistribution):
self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)
elif isinstance(self.action_dist, BernoulliDistribution):
self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)
else:
raise NotImplementedError(f"Unsupported distribution '{self.action_dist}'.")
self.value_net = nn.Linear(self.mlp_extractor.latent_dim_vf, 1)
# Init weights: use orthogonal initialization
# with small initial weight for the output
if self.ortho_init:
# TODO: check for features_extractor
# Values from stable-baselines.
# feature_extractor/mlp values are
# originally from openai/baselines (default gains/init_scales).
module_gains = {
self.features_extractor: np.sqrt(2),
self.mlp_extractor: np.sqrt(2),
self.action_net: 0.01,
self.value_net: 1,
}
for module, gain in module_gains.items():
module.apply(partial(self.init_weights, gain=gain))
# Setup optimizer with initial learning rate
self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
def forward(self, obs: th.Tensor, deterministic: bool = False) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Forward pass in all the networks (actor and critic)
:param obs: Observation
:param deterministic: Whether to sample or use deterministic actions
:return: action, value and log probability of the action
"""
latent_pi, latent_vf, latent_sde = self._get_latent(obs)
# Evaluate the values for the given observations
values = self.value_net(latent_vf)
distribution = self._get_action_dist_from_latent(latent_pi, latent_sde=latent_sde)
actions = distribution.get_actions(deterministic=deterministic)
log_prob = distribution.log_prob(actions)
return actions, values, log_prob
def _get_latent(self, obs: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Get the latent code (i.e., activations of the last layer of each network)
for the different networks.
:param obs: Observation
:return: Latent codes
for the actor, the value function and for gSDE function
"""
# Preprocess the observation if needed
features = self.extract_features(obs)
latent_pi, latent_vf = self.mlp_extractor(features)
# Features for sde
latent_sde = latent_pi
if self.sde_features_extractor is not None:
latent_sde = self.sde_features_extractor(features)
return latent_pi, latent_vf, latent_sde
def _get_action_dist_from_latent(self, latent_pi: th.Tensor, latent_sde: Optional[th.Tensor] = None) -> Distribution:
"""
Retrieve action distribution given the latent codes.
:param latent_pi: Latent code for the actor
:param latent_sde: Latent code for the gSDE exploration function
:return: Action distribution
"""
mean_actions = self.action_net(latent_pi)
if isinstance(self.action_dist, DiagGaussianDistribution):
return self.action_dist.proba_distribution(mean_actions, self.log_std)
elif isinstance(self.action_dist, CategoricalDistribution):
# Here mean_actions are the logits before the softmax
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, MultiCategoricalDistribution):
# Here mean_actions are the flattened logits
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, BernoulliDistribution):
# Here mean_actions are the logits (before rounding to get the binary actions)
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, StateDependentNoiseDistribution):
return self.action_dist.proba_distribution(mean_actions, self.log_std, latent_sde)
else:
raise ValueError("Invalid action distribution")
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
"""
Get the action according to the policy for a given observation.
:param observation:
:param deterministic: Whether to use stochastic or deterministic actions
:return: Taken action according to the policy
"""
latent_pi, _, latent_sde = self._get_latent(observation)
distribution = self._get_action_dist_from_latent(latent_pi, latent_sde)
return distribution.get_actions(deterministic=deterministic)
def evaluate_actions(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Evaluate actions according to the current policy,
given the observations.
:param obs:
:param actions:
:return: estimated value, log likelihood of taking those actions
and entropy of the action distribution.
"""
latent_pi, latent_vf, latent_sde = self._get_latent(obs)
distribution = self._get_action_dist_from_latent(latent_pi, latent_sde)
log_prob = distribution.log_prob(actions)
values = self.value_net(latent_vf)
return values, log_prob, distribution.entropy()
class ActorCriticCnnPolicy(ActorCriticPolicy):
"""
CNN policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the feature extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Callable,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super(ActorCriticCnnPolicy, self).__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
ortho_init,
use_sde,
log_std_init,
full_std,
sde_net_arch,
use_expln,
squash_output,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
class ContinuousCritic(BaseModel):
"""
Critic network(s) for DDPG/SAC/TD3.
It represents the action-state value function (Q-value function).
Compared to A2C/PPO critics, this one represents the Q-value
and takes the continuous action as input. It is concatenated with the state
and then fed to the network which outputs a single value: Q(s, a).
For more recent algorithms like SAC/TD3, multiple networks
are created to give different estimates.
By default, it creates two critic networks used to reduce overestimation
thanks to clipped Q-learning (cf TD3 paper).
:param observation_space: Obervation space
:param action_space: Action space
:param net_arch: Network architecture
:param features_extractor: Network to extract features
(a CNN when using images, a nn.Flatten() layer otherwise)
:param features_dim: Number of features
:param activation_fn: Activation function
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param n_critics: Number of critic networks to create.
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
net_arch: List[int],
features_extractor: nn.Module,
features_dim: int,
activation_fn: Type[nn.Module] = nn.ReLU,
normalize_images: bool = True,
n_critics: int = 2,
):
super().__init__(
observation_space,
action_space,
features_extractor=features_extractor,
normalize_images=normalize_images,
)
action_dim = get_action_dim(self.action_space)
self.n_critics = n_critics
self.q_networks = []
for idx in range(n_critics):
q_net = create_mlp(features_dim + action_dim, 1, net_arch, activation_fn)
q_net = nn.Sequential(*q_net)
self.add_module(f"qf{idx}", q_net)
self.q_networks.append(q_net)
def forward(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, ...]:
# Learn the features extractor using the policy loss only
with th.no_grad():
features = self.extract_features(obs)
qvalue_input = th.cat([features, actions], dim=1)
return tuple(q_net(qvalue_input) for q_net in self.q_networks)
def q1_forward(self, obs: th.Tensor, actions: th.Tensor) -> th.Tensor:
"""
Only predict the Q-value using the first network.
This allows to reduce computation when all the estimates are not needed
(e.g. when updating the policy in TD3).
"""
with th.no_grad():
features = self.extract_features(obs)
return self.q_networks[0](th.cat([features, actions], dim=1))
def create_sde_features_extractor(
features_dim: int, sde_net_arch: List[int], activation_fn: Type[nn.Module]
) -> Tuple[nn.Sequential, int]:
"""
Create the neural network that will be used to extract features
for the gSDE exploration function.
:param features_dim:
:param sde_net_arch:
:param activation_fn:
:return:
"""
# Special case: when using states as features (i.e. sde_net_arch is an empty list)
# don't use any activation function
sde_activation = activation_fn if len(sde_net_arch) > 0 else None
latent_sde_net = create_mlp(features_dim, -1, sde_net_arch, activation_fn=sde_activation, squash_output=False)
latent_sde_dim = sde_net_arch[-1] if len(sde_net_arch) > 0 else features_dim
sde_features_extractor = nn.Sequential(*latent_sde_net)
return sde_features_extractor, latent_sde_dim
_policy_registry = dict() # type: Dict[Type[BasePolicy], Dict[str, Type[BasePolicy]]]
def get_policy_from_name(base_policy_type: Type[BasePolicy], name: str) -> Type[BasePolicy]:
"""
Returns the registered policy from the base type and name.
See `register_policy` for registering policies and explanation.
:param base_policy_type: the base policy class
:param name: the policy name
:return: the policy
"""
if base_policy_type not in _policy_registry:
raise KeyError(f"Error: the policy type {base_policy_type} is not registered!")
if name not in _policy_registry[base_policy_type]:
raise KeyError(
f"Error: unknown policy type {name},"
f"the only registed policy type are: {list(_policy_registry[base_policy_type].keys())}!"
)
return _policy_registry[base_policy_type][name]
def register_policy(name: str, policy: Type[BasePolicy]) -> None:
"""
Register a policy, so it can be called using its name.
e.g. SAC('MlpPolicy', ...) instead of SAC(MlpPolicy, ...).
The goal here is to standardize policy naming, e.g.
all algorithms can call upon "MlpPolicy" or "CnnPolicy",
and they receive respective policies that work for them.
Consider following:
OnlinePolicy
-- OnlineMlpPolicy ("MlpPolicy")
-- OnlineCnnPolicy ("CnnPolicy")
OfflinePolicy
-- OfflineMlpPolicy ("MlpPolicy")
-- OfflineCnnPolicy ("CnnPolicy")
Two policies have name "MlpPolicy" and two have "CnnPolicy".
In `get_policy_from_name`, the parent class (e.g. OnlinePolicy)
is given and used to select and return the correct policy.
:param name: the policy name
:param policy: the policy class
"""
sub_class = None
for cls in BasePolicy.__subclasses__():
if issubclass(policy, cls):
sub_class = cls
break
if sub_class is None:
raise ValueError(f"Error: the policy {policy} is not of any known subclasses of BasePolicy!")
if sub_class not in _policy_registry:
_policy_registry[sub_class] = {}
if name in _policy_registry[sub_class]:
# Check if the registered policy is same
# we try to register. If not so,
# do not override and complain.
if _policy_registry[sub_class][name] != policy:
raise ValueError(f"Error: the name {name} is already registered for a different policy, will not override.")
_policy_registry[sub_class][name] = policy
| 41.40266
| 127
| 0.665333
|
587e347973a571bdac490e8d58007836e41fe5e8
| 1,099
|
py
|
Python
|
treadmill/runtime/linux/image/__init__.py
|
gaocegege/treadmill
|
04325d319c0ee912c066f07b88b674e84485f154
|
[
"Apache-2.0"
] | 2
|
2017-03-20T07:13:33.000Z
|
2017-05-03T03:39:53.000Z
|
treadmill/runtime/linux/image/__init__.py
|
gaocegege/treadmill
|
04325d319c0ee912c066f07b88b674e84485f154
|
[
"Apache-2.0"
] | 12
|
2017-07-10T07:04:06.000Z
|
2017-07-26T09:32:54.000Z
|
treadmill/runtime/linux/image/__init__.py
|
gaocegege/treadmill
|
04325d319c0ee912c066f07b88b674e84485f154
|
[
"Apache-2.0"
] | 2
|
2017-05-04T11:25:32.000Z
|
2017-07-11T09:10:01.000Z
|
"""Treadmill image."""
from treadmill import appcfg
from . import native
from . import tar
def get_image_repo(tm_env, app_type):
"""Gets the image repository for the given app type or None if it is
invalid.
"""
if app_type == appcfg.AppType.NATIVE:
return native.NativeImageRepository(tm_env)
if app_type == appcfg.AppType.TAR:
return tar.TarImageRepository(tm_env)
return None
def get_image(tm_env, manifest):
"""Gets am image from the given manifest."""
app_type = appcfg.AppType(manifest.get('type'))
image_repo = get_image_repo(tm_env, app_type)
if image_repo is None:
raise Exception(
'There is no repository for app with type {0}.'.format(
app_type
)
)
img_impl = image_repo.get(manifest.get('image'))
if img_impl is None:
raise Exception(
'There is no image {0} for app with type {1}.'.format(
manifest.get('image'), app_type
)
)
return img_impl
__all__ = [
'get_image_repo',
'get_image'
]
| 21.98
| 72
| 0.616015
|
1b8aa33b788e2cde3dad6dd87d7f6543b1d0e51b
| 1,810
|
py
|
Python
|
tests/test_portability.py
|
leskin-in/liboqs
|
0e4885b56968a9958d7e2e2ff15fe972cefea84c
|
[
"MIT"
] | 1
|
2021-02-27T06:18:00.000Z
|
2021-02-27T06:18:00.000Z
|
tests/test_portability.py
|
ArdeshirV/liboqs
|
fa9c2671ec146d4e107eb00d85a2c7b15d349246
|
[
"MIT"
] | 1
|
2021-02-16T14:49:24.000Z
|
2021-02-16T14:49:24.000Z
|
tests/test_portability.py
|
ArdeshirV/liboqs
|
fa9c2671ec146d4e107eb00d85a2c7b15d349246
|
[
"MIT"
] | 1
|
2021-03-13T16:59:13.000Z
|
2021-03-13T16:59:13.000Z
|
# SPDX-License-Identifier: MIT
import helpers
import pytest
import platform
from pathlib import Path
MIN_CPUS = {}
# set other CPU types for other architectures; Westmere supports cpuid but not avx2
MIN_CPUS["x86_64"] = "Westmere"
def check_ubuntu():
try:
with open("/etc/os-release") as f:
for line in f.readlines():
if "Ubuntu" in line:
return True
# if file /etc/os-release doesn't exist, we're definitely not on Ubuntu:
except:
return False
return False
@helpers.filtered_test
@pytest.mark.parametrize('kem_name', helpers.available_kems_by_name())
@pytest.mark.skipif(not check_ubuntu(), reason="Only supported on Ubuntu")
def test_kem(kem_name):
if not(helpers.is_build_portable()):
pytest.skip("Portability not enabled")
if not(helpers.is_kem_enabled_by_name(kem_name)):
pytest.skip('Not enabled')
helpers.run_subprocess(["qemu-"+platform.machine()+"-static", "-cpu", MIN_CPUS[platform.machine()],
helpers.path_to_executable('test_kem'), kem_name])
@helpers.filtered_test
@pytest.mark.parametrize('sig_name', helpers.available_sigs_by_name())
@pytest.mark.skipif(not check_ubuntu(), reason="Only supported on Ubuntu")
def test_sig(sig_name):
if not(helpers.is_build_portable()):
pytest.skip("Portability not enabled")
if not(helpers.is_sig_enabled_by_name(sig_name)):
pytest.skip('Not enabled')
if (sig_name.startswith("picnic")):
pytest.skip("Picnic portability known to not be given.")
helpers.run_subprocess(["qemu-"+platform.machine()+"-static", "-cpu", MIN_CPUS[platform.machine()],
helpers.path_to_executable('test_sig'), sig_name])
if __name__ == "__main__":
import sys
pytest.main(sys.argv)
| 32.321429
| 103
| 0.686188
|
ca84df8a50d205a95efc4456571a6ffbbc4c7d39
| 2,470
|
py
|
Python
|
object_detection/face_detection/detector.py
|
Machine-Learning-Tokyo/practical-ml-implementations
|
7df0217237301b99819c61454dc52c123f228978
|
[
"MIT"
] | 18
|
2020-03-17T08:58:45.000Z
|
2022-03-11T16:48:35.000Z
|
object_detection/face_detection/detector.py
|
Machine-Learning-Tokyo/practical-ml-implementations
|
7df0217237301b99819c61454dc52c123f228978
|
[
"MIT"
] | null | null | null |
object_detection/face_detection/detector.py
|
Machine-Learning-Tokyo/practical-ml-implementations
|
7df0217237301b99819c61454dc52c123f228978
|
[
"MIT"
] | 4
|
2020-04-09T12:51:19.000Z
|
2021-02-15T08:46:27.000Z
|
import tensorflow as tf
import numpy as np
class Detector(object):
def __init__(self, net_factory, data_size, batch_size, model_path):
graph = tf.Graph()
with graph.as_default():
self.image_op = tf.placeholder(tf.float32, shape=[batch_size, data_size, data_size, 3], name='input_image')
self.cls_prob, self.bbox_pred, self.landmark_pred = net_factory(self.image_op, training=False)
self.sess = tf.Session(
config=tf.ConfigProto(allow_soft_placement=True, gpu_options=tf.GPUOptions(allow_growth=True)))
saver = tf.train.Saver()
model_dict = '/'.join(model_path.split('/')[:-1])
ckpt = tf.train.get_checkpoint_state(model_dict)
# print(model_path)
readstate = ckpt and ckpt.model_checkpoint_path
assert readstate, "the params dictionary is not valid"
# print("restore models' param")
saver.restore(self.sess, model_path)
self.data_size = data_size
self.batch_size = batch_size
def predict(self, databatch):
scores = []
batch_size = self.batch_size
minibatch = []
cur = 0
n = databatch.shape[0]
while cur < n:
minibatch.append(databatch[cur:min(cur + batch_size, n), :, :, :])
cur += batch_size
cls_prob_list = []
bbox_pred_list = []
landmark_pred_list = []
for idx, data in enumerate(minibatch):
m = data.shape[0]
real_size = self.batch_size
if m < batch_size:
keep_inds = np.arange(m)
gap = self.batch_size - m
while gap >= len(keep_inds):
gap -= len(keep_inds)
keep_inds = np.concatenate((keep_inds, keep_inds))
if gap != 0:
keep_inds = np.concatenate((keep_inds, keep_inds[:gap]))
data = data[keep_inds]
real_size = m
cls_prob, bbox_pred,landmark_pred = self.sess.run([self.cls_prob, self.bbox_pred,self.landmark_pred], feed_dict={self.image_op: data})
cls_prob_list.append(cls_prob[:real_size])
bbox_pred_list.append(bbox_pred[:real_size])
landmark_pred_list.append(landmark_pred[:real_size])
return np.concatenate(cls_prob_list, axis=0), np.concatenate(bbox_pred_list, axis=0), np.concatenate(landmark_pred_list, axis=0)
| 43.333333
| 146
| 0.597976
|
3ff936eba863f7d35be997fafd17581de9c2b1b9
| 1,023
|
py
|
Python
|
configs/motiongan_v7_action_coh_fp_h36_config.py
|
magnux/MotionGAN
|
24b6fae02dda839411e15fef2bc2be4ff712b76f
|
[
"MIT"
] | 39
|
2019-09-11T14:43:47.000Z
|
2022-03-22T18:08:33.000Z
|
configs/motiongan_v7_action_coh_fp_h36_config.py
|
etarakci-hvl/MotionGAN
|
24b6fae02dda839411e15fef2bc2be4ff712b76f
|
[
"MIT"
] | 20
|
2019-10-10T10:52:40.000Z
|
2022-03-12T00:03:46.000Z
|
configs/motiongan_v7_action_coh_fp_h36_config.py
|
etarakci-hvl/MotionGAN
|
24b6fae02dda839411e15fef2bc2be4ff712b76f
|
[
"MIT"
] | 6
|
2020-02-17T08:28:53.000Z
|
2022-03-07T05:48:35.000Z
|
{
# Datasets: MSRC12, NTURGBD
'data_set': 'Human36',
'data_set_version': 'v1',
# Model version to train
'model_version': 'v7',
# Body shape conservation loss
'shape_loss': True,
# Rescale coords using skeleton average bone len
# 'rescale_coords': True,
# Translate sequence starting point to 0,0,0
'translate_start': True,
# Rotate sequence starting point
'rotate_start': True,
# Action label conditional model
'action_cond': True,
# Augment data on training
'augment_data': True,
# Coherence on generated sequences loss
'coherence_loss': True,
# How fast should we learn?
'learning_rate': 1e-3,
# It's the batch size
'batch_size': 128,
# Multiplies length of epoch, useful for tiny datasets
'epoch_factor': 256,
# Number of the random picks (0 == deactivated)
'pick_num': 20,
# Size of the random crop (0 == deactivated)
'crop_len': 200,
# Train on future prediction task only
'train_fp': True,
}
| 29.228571
| 58
| 0.649071
|
2c2c5ac7e2894d65dcf34d8a45d9602d475f7b27
| 663
|
py
|
Python
|
tests/ldap_sync/action/conftest.py
|
agdsn/pycroft
|
ea771141d59c88fdb8a782eafbe106240550a33a
|
[
"Apache-2.0"
] | 18
|
2016-04-20T19:00:56.000Z
|
2021-12-19T16:43:57.000Z
|
tests/ldap_sync/action/conftest.py
|
agdsn/pycroft
|
ea771141d59c88fdb8a782eafbe106240550a33a
|
[
"Apache-2.0"
] | 461
|
2016-07-20T00:42:59.000Z
|
2022-03-25T17:03:07.000Z
|
tests/ldap_sync/action/conftest.py
|
agdsn/pycroft
|
ea771141d59c88fdb8a782eafbe106240550a33a
|
[
"Apache-2.0"
] | 15
|
2016-07-15T18:46:43.000Z
|
2021-03-17T20:08:39.000Z
|
import ldap3
import pytest
from ldap_sync.record import dn_from_username
@pytest.fixture(scope='class')
def connection():
server = ldap3.Server('fake_server', get_info=ldap3.ALL)
connection = ldap3.Connection(server, user='cn=test', password='pw',
client_strategy=ldap3.MOCK_SYNC)
connection.open()
yield connection
connection.strategy.close()
@pytest.fixture(scope='session')
def base():
return 'ou=Nutzer,ou=Pycroft,dc=AG DSN,dc=de'
@pytest.fixture(scope='session')
def uid():
return 'shizzle'
@pytest.fixture(scope='session')
def dn(uid, base):
return dn_from_username(uid, base=base)
| 22.1
| 72
| 0.687783
|
c33fabac466d2ac4683de116bc43c2c2c5258ee0
| 9,054
|
py
|
Python
|
man_knife_terremark/source/conf.py
|
trinitronx/chef-docs
|
948d76fc0c0cffe17ed6b010274dd626f53584c2
|
[
"CC-BY-3.0"
] | 1
|
2020-02-02T21:57:47.000Z
|
2020-02-02T21:57:47.000Z
|
man_knife_terremark/source/conf.py
|
trinitronx/chef-docs
|
948d76fc0c0cffe17ed6b010274dd626f53584c2
|
[
"CC-BY-3.0"
] | null | null | null |
man_knife_terremark/source/conf.py
|
trinitronx/chef-docs
|
948d76fc0c0cffe17ed6b010274dd626f53584c2
|
[
"CC-BY-3.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Chef documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 22 13:50:49 2012.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'knife-terremark'
copyright = u'2012, Opscode, Inc'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1-1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'emacs'
# highlight_language = 'ruby'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "knife-terremark"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../../images/opscode_html_logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = True
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'knife-terremark'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'knife-terremark.tex', u'knife-terremark',
u'Opscode, Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "../../images/opscode_color_text.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'knife-terremark', u'Man page for knife-terremark.',
[u'Opscode'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'knife-terremark', u'knife-terremark',
u'Opscode, Inc.', 'knife-terremark', 'knife-terremark',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'knife-terremark'
epub_author = u'Opscode, Inc.'
epub_publisher = u'Opscode, Inc.'
epub_copyright = u'2012, Opscode, Inc.'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| 31.547038
| 80
| 0.712392
|
48960076f745ffdcde76a0ed6612dfa2cb82a062
| 1,333
|
py
|
Python
|
src/express-route-cross-connection/azext_expressroutecrossconnection/vendored_sdks/v2018_04_01/models/network_interface_association.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/express-route-cross-connection/azext_expressroutecrossconnection/vendored_sdks/v2018_04_01/models/network_interface_association.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/express-route-cross-connection/azext_expressroutecrossconnection/vendored_sdks/v2018_04_01/models/network_interface_association.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class NetworkInterfaceAssociation(Model):
"""Network interface and its custom security rules.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Network interface ID.
:vartype id: str
:param security_rules: Collection of custom security rules.
:type security_rules:
list[~azure.mgmt.network.v2018_04_01.models.SecurityRule]
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'security_rules': {'key': 'securityRules', 'type': '[SecurityRule]'},
}
def __init__(self, **kwargs):
super(NetworkInterfaceAssociation, self).__init__(**kwargs)
self.id = None
self.security_rules = kwargs.get('security_rules', None)
| 32.512195
| 77
| 0.605401
|
6ef61f63d7cf7ceadabc522290238cb4f2fdb10d
| 5,367
|
py
|
Python
|
source code/2_createItemDatabase.py
|
lukasmuell3r/WikidataMetadataPatterns
|
0dd53b1acc5197d77dd9267bb0f637b661c1ccf1
|
[
"MIT"
] | null | null | null |
source code/2_createItemDatabase.py
|
lukasmuell3r/WikidataMetadataPatterns
|
0dd53b1acc5197d77dd9267bb0f637b661c1ccf1
|
[
"MIT"
] | null | null | null |
source code/2_createItemDatabase.py
|
lukasmuell3r/WikidataMetadataPatterns
|
0dd53b1acc5197d77dd9267bb0f637b661c1ccf1
|
[
"MIT"
] | null | null | null |
import bz2
import csv
import json
import os
import re
# compile for efficient use
grep_property_line = re.compile(
'(^<wd:[Qq][^>]+>\s<p:[Pp](?!31>|279>)[^>]+>\s<wds:[Qq][^>]+>)') # to cache property p and wds node name to find all qualifiers, do not cache P31 or P279 lines, P31*** or P279*** will be matched :).
grep_wds = re.compile(
'((?<=wds:)[Qq][^>]+)') # we only want wds-Nodes from Wikidata Items, not from Wikidata Propertys like wds:P* as Subject
grep_property = re.compile('((?<=p:)[Pp][^>]+)')
grep_qualifier = re.compile('((?<=pq:)[Pp][^>]+)')
grep_wditem = re.compile('^<wds?:([Qq][^->]+)')
def extractinformation(files, item_db_file_path, ext_ident_path):
"""
Constructs the item database for frequent itemset mining utilizing regexes.
:param files: The files to process.
:param item_db_file_path: The path to where the item database will be stored.
:param ext_ident_path: The path to the external identifiers file.
:return:
"""
# reading external identifiers
ext_identifiers = []
with open(ext_ident_path, newline='') as external_identifier_csv:
for row in csv.reader(external_identifier_csv):
ext_identifiers.append(row[0])
temp_prop = ''
temp_wds = ''
qualifier_set = set()
item_dict = dict()
item_counter = 0
fileindex = 0
for filename in files:
print("File:\t", filename)
stream = bz2.open(filename, 'rt')
for line in stream:
if len(re.findall(grep_wds, line)) != 0 and re.findall(grep_wds, line)[0].upper() == temp_wds:
if len(re.findall(grep_qualifier, line)) == 1:
qualifier = re.findall(grep_qualifier, line)[0].upper()
qualifier_set.add(qualifier)
else:
pass
# print("Line not needed, ps: or <wd:Q> <p:P> <wds:Q> handled in next operation!")
else:
# Use the FIMI as key, value as FIMI Counter, check if there is at least one qualifier
fimi_tuple = tuple([temp_prop] + list(sorted(qualifier_set)))
# properties P3921 and P4316 contain sparql queries as values and destroy the database format, they are skipped
if len(fimi_tuple) < 2 or fimi_tuple in item_dict.keys() or ext_identifiers.count(
temp_prop) == 1 or temp_prop == 'P3921' or temp_prop == 'P4316':
pass
# print("Item of previous statement already present or is too short or external identifier, skipping item")
else:
item_dict[fimi_tuple] = item_counter
item_counter += 1
qualifier_set = set()
if len(re.findall(grep_property_line, line)) == 1:
if len(re.findall(grep_property, line)) == 1 and len(re.findall(grep_wds, line)) == 1:
temp_prop = re.findall(grep_property, line)[0].upper()
temp_wds = re.findall(grep_wds, line)[0].upper()
stream.close()
with open(item_db_file_path + '\\' + f'{fileindex:04}' + '-items.json', 'w') as file:
file.write(json.dumps({str(k): v for k, v in item_dict.items()}))
file.close()
fileindex += 1
fimi_tuple = tuple([temp_prop] + list(sorted(qualifier_set)))
if len(fimi_tuple) < 2 or fimi_tuple in item_dict.keys() or ext_identifiers.count(
temp_prop) == 1 or temp_prop == 'P3921' or temp_prop == 'P4316':
pass
# print("Item of previous statement already present or is too short, skipping this item now")
else:
item_dict[fimi_tuple] = item_counter
item_counter += 1
print("Writing final Item Database!")
with open(item_db_file_path + '\\items.json', 'w') as file:
# convert keys to string before dumping
file.write(json.dumps({str(k): v for k, v in item_dict.items()}))
file.close()
print("Finished creating item database!")
if __name__ == '__main__':
"""
The main method reads paths and makes sure they exist before calculations start.
"""
input_path = input(
"Enter the directory of the cleaned and splitted .nt.bz2 dump directory (Example: C:\dump\cleaned_dump):\t")
input_path = input_path.replace('"', '').replace("'", "")
assert os.path.exists(input_path), "Path not found at:\t" + str(input_path)
item_db_file_path = input(
"Enter the directory to store the item databases (one each dump shard) (Example: C:\dump\itemdb):\t")
item_db_file_path = item_db_file_path.replace('"', '').replace("'", "")
assert os.path.exists(item_db_file_path), "File not found at:\t" + str(item_db_file_path)
ext_ident_path = input(
"Enter the path to the external identifiers file (Example: C:\dump\external_identifiers_optimization.csv):\t")
ext_ident_path = ext_ident_path.replace('"', '').replace("'", "")
assert os.path.exists(ext_ident_path), "File not found at:\t" + str(ext_ident_path)
# get filelist of provided path
file_list = next(os.walk(input_path))[2]
file_list_fullpath = []
for file in file_list:
file_list_fullpath.append(os.path.join(input_path, file))
# print("Files: ", file_list_fullpath)
extractinformation(file_list_fullpath, item_db_file_path, ext_ident_path)
| 49.694444
| 203
| 0.625303
|
180b70f27c27b98b889980b73160404b8e3ec3c2
| 22,238
|
py
|
Python
|
autopkgtest/autopkgtest-4.4/lib/testdesc.py
|
dwks/egalito-artefact
|
ed11768b47fab6d15c74d3da58d4188eef75a783
|
[
"AFL-1.1"
] | 2
|
2020-01-28T13:12:31.000Z
|
2021-08-24T05:47:18.000Z
|
autopkgtest/autopkgtest-4.4/lib/testdesc.py
|
dwks/egalito-artefact
|
ed11768b47fab6d15c74d3da58d4188eef75a783
|
[
"AFL-1.1"
] | null | null | null |
autopkgtest/autopkgtest-4.4/lib/testdesc.py
|
dwks/egalito-artefact
|
ed11768b47fab6d15c74d3da58d4188eef75a783
|
[
"AFL-1.1"
] | 1
|
2021-09-23T18:17:49.000Z
|
2021-09-23T18:17:49.000Z
|
# testdesc is part of autopkgtest
# autopkgtest is a tool for testing Debian binary packages
#
# autopkgtest is Copyright (C) 2006-2014 Canonical Ltd.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# See the file CREDITS for a full list of credits information (often
# installed as /usr/share/doc/autopkgtest/CREDITS).
import string
import re
import errno
import os.path
import json
import subprocess
import tempfile
import atexit
import shutil
import debian.deb822
import debian.debian_support
import debian.debfile
import adtlog
#
# Abstract test representation
#
known_restrictions = ['rw-build-tree', 'breaks-testbed', 'needs-root',
'build-needed', 'allow-stderr', 'isolation-container',
'isolation-machine', 'needs-recommends', 'needs-reboot']
class Unsupported(Exception):
'''Test cannot be run in the testbed'''
def __init__(self, testname, message):
self.testname = testname
self.message = message
def __str__(self):
return 'Unsupported test %s: %s' % (self.testname, self.message)
def report(self):
adtlog.report(self.testname, 'SKIP %s' % self.message)
class InvalidControl(Exception):
'''Test has invalid control data'''
def __init__(self, testname, message):
self.testname = testname
self.message = message
def __str__(self):
return 'InvalidControl test %s: %s' % (self.testname, self.message)
def report(self):
adtlog.report(self.testname, 'BROKEN %s' % self.message)
class Test:
'''Test description.
This is only a representation of the metadata, it does not have any
actions.
'''
def __init__(self, name, path, command, restrictions, features, depends,
clicks, installed_clicks):
'''Create new test description
A test must have either "path" or "command", the respective other value
must be None.
@name: Test name
@path: path to the test's executable, relative to source tree
@command: shell command for the test code
@restrictions, @features: string lists, as in README.package-tests
@depends: string list of test dependencies (packages)
@clicks: path list of click packages to install for this test
@installed_clicks: names of already installed clicks for this test
'''
if '/' in name:
raise Unsupported(name, 'test name may not contain / character')
for r in restrictions:
if r not in known_restrictions:
raise Unsupported(name, 'unknown restriction %s' % r)
if not ((path is None) ^ (command is None)):
raise InvalidControl(name, 'Test must have either path or command')
self.name = name
self.path = path
self.command = command
self.restrictions = restrictions
self.features = features
self.depends = depends
self.clicks = clicks
self.installed_clicks = installed_clicks
# None while test hasn't run yet; True: pass, False: fail
self.result = None
adtlog.debug('Test defined: name %s path %s command "%s" '
'restrictions %s features %s depends %s clicks %s '
'installed clicks %s' %
(name, path, command, restrictions, features, depends,
clicks, installed_clicks))
def passed(self):
'''Mark test as passed'''
self.result = True
adtlog.report(self.name, 'PASS')
def failed(self, reason):
'''Mark test as failed'''
self.result = False
adtlog.report(self.name, 'FAIL ' + reason)
def check_testbed_compat(self, caps):
'''Check for restrictions incompatible with test bed capabilities.
Raise Unsupported exception if there are any.
'''
if 'isolation-container' in self.restrictions and \
'isolation-container' not in caps and \
'isolation-machine' not in caps:
raise Unsupported(self.name,
'Test requires container-level isolation but '
'testbed does not provide that')
if 'isolation-machine' in self.restrictions and \
'isolation-machine' not in caps:
raise Unsupported(self.name,
'Test requires machine-level isolation but '
'testbed does not provide that')
if 'breaks-testbed' in self.restrictions and \
'revert-full-system' not in caps:
raise Unsupported(self.name,
'Test breaks testbed but testbed does not '
'provide revert-full-system')
if 'needs-root' in self.restrictions and \
'root-on-testbed' not in caps:
raise Unsupported(self.name,
'Test needs root on testbed which is not '
'available')
if 'needs-reboot' in self.restrictions and \
'reboot' not in caps:
raise Unsupported(self.name,
'Test needs to reboot testbed but testbed does '
'not provide reboot capability')
#
# Parsing for Debian source packages
#
def parse_rfc822(path):
'''Parse Debian-style RFC822 file
Yield dictionaries with the keys/values.
'''
try:
f = open(path, encoding='UTF-8')
except (IOError, OSError) as oe:
if oe.errno != errno.ENOENT:
raise
return
# filter out comments, python-debian doesn't do that
# (http://bugs.debian.org/743174)
lines = []
for line in f:
# completely ignore ^# as that breaks continuation lines
if line.startswith('#'):
continue
# filter out comments which don't start on first column (Debian
# #743174); entirely remove line if all that's left is whitespace, as
# that again breaks continuation lines
if '#' in line:
line = line.split('#', 1)[0]
if not line.strip():
continue
lines.append(line)
f.close()
for p in debian.deb822.Deb822.iter_paragraphs(lines):
r = {}
for field, value in p.items():
# un-escape continuation lines
v = ''.join(value.split('\n')).replace(' ', ' ')
field = string.capwords(field)
r[field] = v
yield r
def _debian_check_unknown_fields(name, record):
unknown_keys = set(record.keys()).difference(
{'Tests', 'Test-command', 'Restrictions', 'Features',
'Depends', 'Tests-directory', 'Classes'})
if unknown_keys:
raise Unsupported(name, 'unknown field %s' % unknown_keys.pop())
def _debian_packages_from_source(srcdir):
packages = []
for st in parse_rfc822(os.path.join(srcdir, 'debian/control')):
if 'Package' not in st:
# source stanza
continue
# filter out udebs and similar stuff which aren't "real" debs
if st.get('Xc-package-type', 'deb') != 'deb' or \
st.get('Package-type', 'deb') != 'deb':
continue
arch = st['Architecture']
if arch in ('all', 'any'):
packages.append('%s (>= 0~)' % st['Package'])
else:
packages.append('%s (>= 0~) [%s]' % (st['Package'], arch))
return packages
def _debian_build_deps_from_source(srcdir, testbed_arch):
deps = ''
for st in parse_rfc822(os.path.join(srcdir, 'debian/control')):
if 'Build-depends' in st:
deps += st['Build-depends']
if 'Build-depends-indep' in st:
deps += ', ' + st['Build-depends-indep']
# resolve arch specific dependencies and build profiles
perl = subprocess.Popen(['perl', '-'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
code = '''use Dpkg::Deps;
$supports_profiles = ($Dpkg::Deps::VERSION gt '1.04');
$dep = deps_parse('%s', reduce_arch => 1,
reduce_profiles => $supports_profiles,
build_dep => 1, host_arch => '%s');
$out = $dep->output();
# fall back to ignoring build profiles
$out =~ s/ <[^ >]+>//g if (!$supports_profiles);
print $out, "\\n";
''' % (deps, testbed_arch)
deps = perl.communicate(code.encode('UTF-8'))[0].decode('UTF-8').strip()
if perl.returncode != 0:
raise InvalidControl('source', 'Invalid build dependencies')
deps = [d.strip() for d in deps.split(',')]
# @builddeps@ should always imply build-essential
deps.append('build-essential')
return deps
dep_re = re.compile(
r'(?P<package>[a-z0-9+-.]+)(?::native)?\s*'
r'(\((?P<relation><<|<=|>=|=|>>)\s*(?P<version>[^\)]*)\))?'
r'(\s*\[[[a-z0-9+-.! ]+\])?$')
def _debian_check_dep(testname, dep):
'''Check a single Debian dependency'''
dep = dep.strip()
m = dep_re.match(dep)
if not m:
raise InvalidControl(testname, "Test Depends field contains an "
"invalid dependency `%s'" % dep)
if m.group("version"):
try:
debian.debian_support.NativeVersion(m.group('version'))
except ValueError:
raise InvalidControl(testname, "Test Depends field contains "
"dependency `%s' with an "
"invalid version" % dep)
except AttributeError:
# too old python-debian, skip the check
pass
def _parse_debian_depends(testname, dep_str, srcdir, testbed_arch):
'''Parse Depends: line in a Debian package
Split dependencies (comma separated), validate their syntax, and expand @
and @builddeps@. Return a list of dependencies.
This may raise an InvalidControl exception if there are invalid
dependencies.
'''
deps = []
for alt_group_str in dep_str.split(','):
alt_group_str = alt_group_str.strip()
if not alt_group_str:
# happens for empty depends or trailing commas
continue
adtlog.debug('processing dependency %s' % alt_group_str)
if alt_group_str == '@':
for d in _debian_packages_from_source(srcdir):
adtlog.debug('synthesised dependency %s' % d)
deps.append(d)
elif alt_group_str == '@builddeps@':
for d in _debian_build_deps_from_source(srcdir, testbed_arch):
adtlog.debug('synthesised dependency %s' % d)
deps.append(d)
else:
for dep in alt_group_str.split('|'):
_debian_check_dep(testname, dep)
deps.append(alt_group_str)
return deps
def _autodep8(srcdir):
'''Generate control file with autodep8'''
f = tempfile.NamedTemporaryFile(prefix='autodep8.')
try:
autodep8 = subprocess.Popen(['autodep8'], cwd=srcdir, stdout=f,
stderr=subprocess.PIPE)
except OSError as e:
adtlog.debug('autodep8 not available (%s)' % e)
return None
err = autodep8.communicate()[1].decode()
if autodep8.returncode == 0:
f.flush()
f.seek(0)
ctrl = f.read().decode()
adtlog.debug('autodep8 generated control: -----\n%s\n-------' % ctrl)
return f
f.close()
adtlog.debug('autodep8 failed to generate control (exit status %i): %s' %
(autodep8.returncode, err))
return None
def parse_debian_source(srcdir, testbed_caps, testbed_arch, control_path=None,
auto_control=True):
'''Parse test descriptions from a Debian DEP-8 source dir
You can specify an alternative path for the control file (default:
srcdir/debian/tests/control).
Return (list of Test objects, some_skipped). If this encounters any invalid
restrictions, fields, or test restrictions which cannot be met by the given
testbed capabilities, the test will be skipped (and reported so), and not
be included in the result.
This may raise an InvalidControl exception.
'''
some_skipped = False
command_counter = 0
tests = []
if not control_path:
control_path = os.path.join(srcdir, 'debian', 'tests', 'control')
if not os.path.exists(control_path):
if auto_control:
control = _autodep8(srcdir)
if control is None:
return ([], False)
control_path = control.name
else:
adtlog.debug('auto_control is disabled, no tests')
return ([], False)
for record in parse_rfc822(control_path):
command = None
try:
restrictions = record.get('Restrictions', '').replace(
',', ' ').split()
feature_test_name = None
features = []
record_features = record.get('Features', '').replace(
',', ' ').split()
for feature in record_features:
details = feature.split('=', 1)
if details[0] != 'test-name':
features.append(feature)
continue
if len(details) != 2:
# No value, i.e. a bare 'test-name'
raise InvalidControl(
'*', 'test-name feature with no argument')
if feature_test_name is not None:
raise InvalidControl(
'*', 'only one test-name feature allowed')
feature_test_name = details[1]
features.append(feature)
if 'Tests' in record:
test_names = record['Tests'].replace(',', ' ').split()
depends = _parse_debian_depends(test_names[0],
record.get('Depends', '@'),
srcdir,
testbed_arch)
if 'Test-command' in record:
raise InvalidControl('*', 'Only one of "Tests" or '
'"Test-Command" may be given')
if feature_test_name is not None:
raise InvalidControl(
'*', 'test-name feature incompatible with Tests')
test_dir = record.get('Tests-directory', 'debian/tests')
_debian_check_unknown_fields(test_names[0], record)
for n in test_names:
test = Test(n, os.path.join(test_dir, n), None,
restrictions, features, depends, [], [])
test.check_testbed_compat(testbed_caps)
tests.append(test)
elif 'Test-command' in record:
command = record['Test-command']
depends = _parse_debian_depends(command,
record.get('Depends', '@'),
srcdir,
testbed_arch)
if feature_test_name is None:
command_counter += 1
name = 'command%i' % command_counter
else:
name = feature_test_name
_debian_check_unknown_fields(name, record)
test = Test(name, None, command, restrictions, features,
depends, [], [])
test.check_testbed_compat(testbed_caps)
tests.append(test)
else:
raise InvalidControl('*', 'missing "Tests" or "Test-Command"'
' field')
except Unsupported as u:
u.report()
some_skipped = True
return (tests, some_skipped)
#
# Parsing for click packages
#
def parse_click_manifest(manifest, testbed_caps, clickdeps, use_installed,
srcdir=None):
'''Parse test descriptions from a click manifest.
@manifest: String with the click manifest
@testbed_caps: List of testbed capabilities
@clickdeps: paths of click packages that these tests need
@use_installed: True if test expects the described click to be installed
already
Return (source_dir, list of Test objects, some_skipped). If this encounters
any invalid restrictions, fields, or test restrictions which cannot be met
by the given testbed capabilities, the test will be skipped (and reported
so), and not be included in the result.
If srcdir is given, use that as source for the click package, and return
that as first return value. Otherwise, locate and download the source from
the click's manifest into a temporary directory and use that.
This may raise an InvalidControl exception.
'''
try:
manifest_j = json.loads(manifest)
test_j = manifest_j.get('x-test', {})
except ValueError as e:
raise InvalidControl(
'*', 'click manifest is not valid JSON: %s' % str(e))
if not isinstance(test_j, dict):
raise InvalidControl(
'*', 'click manifest x-test key must be a dictionary')
installed_clicks = []
if use_installed:
installed_clicks.append(manifest_j.get('name'))
some_skipped = False
tests = []
# It's a dictionary and thus does not have a predictable ordering; sort it
# to get a predictable list
for name in sorted(test_j):
desc = test_j[name]
adtlog.debug('parsing click manifest test %s: %s' % (name, desc))
# simple string is the same as { "path": <desc> } without any
# restrictions, or the special "autopilot" case
if isinstance(desc, str):
if name == 'autopilot' and re.match('^[a-z_][a-z0-9_]+$', desc):
desc = {'autopilot_module': desc}
else:
desc = {'path': desc}
if not isinstance(desc, dict):
raise InvalidControl(name, 'click manifest x-test dictionary '
'entries must be strings or dicts')
# autopilot special case: dict with extra depends
if 'autopilot_module' in desc:
desc['command'] = \
'PYTHONPATH=app/tests/autopilot:tests/autopilot:$PYTHONPATH '\
'python3 -m autopilot.run run -v -f subunit -o ' \
'$AUTOPKGTEST_ARTIFACTS/%s.subunit ' % name + os.environ.get(
'AUTOPKGTEST_AUTOPILOT_MODULE',
os.environ.get('ADT_AUTOPILOT_MODULE', desc['autopilot_module']))
desc.setdefault('depends', []).insert(
0, 'ubuntu-ui-toolkit-autopilot')
desc['depends'].insert(0, 'autopilot-touch')
if 'allow-stderr' not in desc.setdefault('restrictions', []):
desc['restrictions'].append('allow-stderr')
try:
test = Test(name, desc.get('path'), desc.get('command'),
desc.get('restrictions', []), desc.get('features', []),
desc.get('depends', []), clickdeps, installed_clicks)
test.check_testbed_compat(testbed_caps)
tests.append(test)
except Unsupported as u:
u.report()
some_skipped = True
if srcdir is None:
# do we have an x-source/vcs-bzr link?
if 'x-source' in manifest_j:
try:
repo = manifest_j['x-source']['vcs-bzr']
adtlog.info('checking out click source from %s' % repo)
d = tempfile.mkdtemp(prefix='adt.clicksrc.')
atexit.register(shutil.rmtree, d, ignore_errors=True)
try:
subprocess.check_call(['bzr', 'checkout', '--lightweight',
repo, d])
srcdir = d
except subprocess.CalledProcessError as e:
adtlog.error('Failed to check out click source from %s: %s'
% (repo, str(e)))
except KeyError:
adtlog.error('Click source download from x-source only '
'supports "vcs-bzr" repositories')
else:
adtlog.error('cannot download click source: manifest does not '
'have "x-source"')
return (srcdir, tests, some_skipped)
def parse_click(clickpath, testbed_caps, srcdir=None):
'''Parse test descriptions from a click package.
Return (source_dir, list of Test objects, some_skipped). If this encounters
any invalid restrictions, fields, or test restrictions which cannot be met
by the given testbed capabilities, the test will be skipped (and reported
so), and not be included in the result.
If srcdir is given, use that as source for the click package, and return
that as first return value. Otherwise, locate and download the source from
the click's manifest into a temporary directory and use that (not yet
implemented).
This may raise an InvalidControl exception.
'''
pkg = debian.debfile.DebFile(clickpath)
try:
manifest = pkg.control.get_content('manifest').decode('UTF-8')
finally:
pkg.close()
return parse_click_manifest(manifest, testbed_caps, [clickpath], False,
srcdir)
| 37.691525
| 85
| 0.573613
|
c5ac99ff84ba08f826d959e83b3f60a0ad05efb9
| 3,516
|
py
|
Python
|
training-code/nn_dropout.py
|
Deeptector/Deeptector
|
4a75200e56105131a26686a741744c018b37936d
|
[
"Apache-2.0"
] | 12
|
2018-09-27T12:57:30.000Z
|
2021-02-24T21:21:11.000Z
|
training-code/nn_dropout.py
|
Deeptector/Deeptector
|
4a75200e56105131a26686a741744c018b37936d
|
[
"Apache-2.0"
] | 1
|
2020-07-22T01:54:56.000Z
|
2020-07-22T01:54:56.000Z
|
training-code/nn_dropout.py
|
Deeptector/Deeptector
|
4a75200e56105131a26686a741744c018b37936d
|
[
"Apache-2.0"
] | 11
|
2018-09-18T18:07:26.000Z
|
2021-07-10T13:46:31.000Z
|
# Lab 10 MNIST and Dropout
import tensorflow as tf
import numpy as np
import random
# import matplotlib.pyplot as plt
tf.set_random_seed(777) # reproducibility
xy = np.loadtxt('train_example.csv', delimiter=',', dtype=np.float32)
x_data = xy[:, 0:-1]
y_data = xy[:, -1]
#output의 갯수!!!!!!
nb_classes = 18 # 1:punch_l 2:punch_r 3:punch_l2 4:punch_r2 5:hold
X = tf.placeholder(tf.float32, [None, 864])
Y = tf.placeholder(tf.int32, [None, nb_classes]) # 1:punch_l 2:punch_r 3:punch_l2 4:punch_r2 5:hold
y_data = y_data.astype(int)
one_hot_targets = np.eye(nb_classes)[y_data]
print(one_hot_targets)
W = tf.Variable(tf.random_normal([864, nb_classes]), name='weight')
b = tf.Variable(tf.random_normal([nb_classes]), name='bias')
# parameters
learning_rate = 0.0001
training_epochs = 40
batch_size = 5
total_batch = int(2142 / batch_size)
# dropout (keep_prob) rate 0.7 on training, but should be 1 for testing
keep_prob = tf.placeholder(tf.float32)
# weights & bias for nn layers
W1 = tf.get_variable("W1", shape=[864, 512],
initializer=tf.contrib.layers.xavier_initializer())
b1 = tf.Variable(tf.random_normal([512]))
L1 = tf.nn.relu(tf.matmul(X, W1) + b1)
L1 = tf.nn.dropout(L1, keep_prob=keep_prob)
W2 = tf.get_variable("W2", shape=[512, 512],
initializer=tf.contrib.layers.xavier_initializer())
b2 = tf.Variable(tf.random_normal([512]))
L2 = tf.nn.relu(tf.matmul(L1, W2) + b2)
L2 = tf.nn.dropout(L2, keep_prob=keep_prob)
W3 = tf.get_variable("W3", shape=[512, 512],
initializer=tf.contrib.layers.xavier_initializer())
b3 = tf.Variable(tf.random_normal([512]))
L3 = tf.nn.relu(tf.matmul(L2, W3) + b3)
L3 = tf.nn.dropout(L3, keep_prob=keep_prob)
W4 = tf.get_variable("W4", shape=[512, 512],
initializer=tf.contrib.layers.xavier_initializer())
b4 = tf.Variable(tf.random_normal([512]))
L4 = tf.nn.relu(tf.matmul(L3, W4) + b4)
L4 = tf.nn.dropout(L4, keep_prob=keep_prob)
W5 = tf.get_variable("W5", shape=[512, nb_classes],
initializer=tf.contrib.layers.xavier_initializer())
b5 = tf.Variable(tf.random_normal([nb_classes]))
hypothesis = tf.matmul(L1, W5) + b5
# define cost/loss & optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=hypothesis, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# initialize
sess = tf.Session()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
# train my model
for epoch in range(50):
avg_cost = 0
for i in range(2142):
feed_dict = {X: x_data, Y: one_hot_targets, keep_prob: 0.7}
c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)
avg_cost += c / total_batch
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(c))
saver.save(sess, 'model-deeptector.ckpt')
print('Learning Finished!')
# Test model and check accuracy
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('Accuracy:', sess.run(accuracy, feed_dict={
X: x_data, Y: one_hot_targets, keep_prob: 1}))
a=2142
b=0
'''
# Get one and predict
for i in range(2142):
print("Label: ", sess.run(tf.argmax(one_hot_targets[i:i + 1], 1)))
result = sess.run(tf.argmax(hypothesis, 1), feed_dict={X: x_data[i:i + 1], keep_prob: 1})
print("Predict : ", result)
if(sess.run(tf.argmax(one_hot_targets[i:i + 1], 1)) == result):
b=b+1
print("Acc : ", (b/a*100))
'''
| 32.256881
| 100
| 0.685438
|
52b039f7e5d876d6fcda21d325c06def56b82935
| 1,577
|
py
|
Python
|
api/reports/views.py
|
Egor4ik325/rankrise
|
c4377237c9afbdda365c01453f73151189129aa9
|
[
"MIT"
] | null | null | null |
api/reports/views.py
|
Egor4ik325/rankrise
|
c4377237c9afbdda365c01453f73151189129aa9
|
[
"MIT"
] | null | null | null |
api/reports/views.py
|
Egor4ik325/rankrise
|
c4377237c9afbdda365c01453f73151189129aa9
|
[
"MIT"
] | null | null | null |
from dj_rest_auth.jwt_auth import JWTAuthentication
from rest_framework import status
from rest_framework.mixins import (
CreateModelMixin,
DestroyModelMixin,
ListModelMixin,
RetrieveModelMixin,
)
from rest_framework.permissions import SAFE_METHODS
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet, ViewSet
from .models import Report
from .permissions import ReportPermission
from .serializers import ReportDeserializer, ReportSerializer
class ReportViewSet(
CreateModelMixin,
RetrieveModelMixin,
ListModelMixin,
DestroyModelMixin,
GenericViewSet,
):
"""
ViewSet for report.
"""
# Queryset and serialization
queryset = Report.objects.all()
serializer_class = ReportSerializer
# URLconf
lookup_field = "uuid"
lookup_url_kwarg = "pk"
lookup_value_regex = r"^[0-9a-fA-F]{8}\b-[0-9a-fA-F]{4}\b-[0-9a-fA-F]{4}\b-[0-9a-fA-F]{4}\b-[0-9a-fA-F]{12}$" # UUID regex
# Authentication and authorization
authentication_classes = [JWTAuthentication]
permission_classes = [ReportPermission]
def get_serializer_class(self):
"""Serializer/deserializer depend on request action/method."""
if self.request.method in SAFE_METHODS:
return ReportSerializer
return ReportDeserializer
def get_queryset(self):
return self.queryset
def create(self, *args, **kwargs):
return super().create(*args, **kwargs)
# return Response({"detail": "Thanks for reporting!"}, status.HTTP_201_CREATED)
| 28.672727
| 127
| 0.718453
|
34e1ec030f60e33062690678ba6641d2af8b2ef3
| 1,230
|
py
|
Python
|
eds/openmtc-gevent/futile/src/futile/os/mount.py
|
piyush82/elastest-device-emulator-service
|
b4d6b393d6042c54a7b3dfb5f58cad5efd00f0e7
|
[
"Apache-2.0"
] | null | null | null |
eds/openmtc-gevent/futile/src/futile/os/mount.py
|
piyush82/elastest-device-emulator-service
|
b4d6b393d6042c54a7b3dfb5f58cad5efd00f0e7
|
[
"Apache-2.0"
] | null | null | null |
eds/openmtc-gevent/futile/src/futile/os/mount.py
|
piyush82/elastest-device-emulator-service
|
b4d6b393d6042c54a7b3dfb5f58cad5efd00f0e7
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on 24.01.2012
@author: kca
'''
from ..path import Path
from ..subprocess import check_output
def umount(where, force = False):
cmd = [ "umount", where ]
if force:
cmd.append("-f")
check_output(cmd)
unmount = umount
def mount(what, where, fstype = None, options = None):
return Mount(what, where, fstype, options).mount()
class Mount(object):
def __init__(self, what, where, fstype = None, options = None):
self.what = Path(what)
self.where = Path(where)
self.fstype = fstype
options = self.options = options and set(options) or set()
if what.isfile():
options.add("loop")
elif not what.isblockdev():
raise ValueError("Mount source must be a file or block device: %s" % (what, ))
def mount(self, fstype = None, options = None):
cmd = [ "mount", self.what, self.where ]
fstype = fstype or self.fstype
if fstype:
cmd += [ "-t", self.fstype ]
opts = self.options
if options:
opts += set(self.options)
if opts:
cmd += [ "-o", ','.join(self.options) ]
check_output(cmd)
return self
__enter__ = mount
def umount(self, force = False):
umount(self.where, force)
unmount = umount
def __exit__(self, exc_type, exc_val, exc_tb):
self.umount(True)
| 22.777778
| 81
| 0.656098
|
54a39a97884b5e4a50dcf1441205d63d8c366435
| 1,283
|
py
|
Python
|
utils/init.py
|
Flyfoxs/DFL-CNN
|
49ef653488aabbcc2495305d39b63f5f4c5093ea
|
[
"MIT"
] | 251
|
2018-09-12T07:01:50.000Z
|
2022-01-22T03:55:58.000Z
|
utils/init.py
|
Flyfoxs/DFL-CNN
|
49ef653488aabbcc2495305d39b63f5f4c5093ea
|
[
"MIT"
] | 17
|
2018-09-17T14:20:00.000Z
|
2020-12-14T12:04:37.000Z
|
utils/init.py
|
Flyfoxs/DFL-CNN
|
49ef653488aabbcc2495305d39b63f5f4c5093ea
|
[
"MIT"
] | 59
|
2018-10-07T13:35:50.000Z
|
2021-12-29T09:11:00.000Z
|
from torch.nn import init
def init_net(net, init_type='normal'):
init_weights(net, init_type)
return net
def init_weights(net, init_type='normal', gain=0.02):
def init_func(m):
# this will apply to each layer
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('conv')!=-1 or classname.find('Linear')!=-1):
if init_type=='normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')#good for relu
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
#print('initialize network with %s' % init_type)
net.apply(init_func)
| 45.821429
| 103
| 0.576773
|
4d4c859006a74f50a8ce276b105f11eb4895d8e2
| 4,199
|
py
|
Python
|
dephell/repositories/_local.py
|
jayvdb/dephell
|
b9a7f596a11aa383a2962c64db08324b60f623bd
|
[
"MIT"
] | 1,880
|
2019-03-21T10:08:25.000Z
|
2022-03-31T12:41:55.000Z
|
dephell/repositories/_local.py
|
rachmadaniHaryono/dephell
|
0ef500c8f2d5f05244bac191b1b1383f68464cd2
|
[
"MIT"
] | 356
|
2019-03-21T19:08:56.000Z
|
2021-01-08T17:45:43.000Z
|
dephell/repositories/_local.py
|
rachmadaniHaryono/dephell
|
0ef500c8f2d5f05244bac191b1b1383f68464cd2
|
[
"MIT"
] | 157
|
2019-04-23T01:13:37.000Z
|
2022-03-24T22:41:18.000Z
|
# built-in
from datetime import datetime
from pathlib import Path
from typing import Optional, Tuple, Union
# app
from ..cache import RequirementsCache
from ..config import Config
from ..constants import FILES
from ..models.release import Release
from ._warehouse import WarehouseLocalRepo
from .base import Interface
class LocalRepo(Interface):
def __init__(self, path: Union[Path, str]):
if type(path) is str:
path = Path(path)
self.path = path
def get_releases(self, dep) -> Tuple[Release, ...]:
releases = []
dist_path = (self.path / 'dist')
if dist_path.exists():
repo = WarehouseLocalRepo(name='tmp', path=dist_path)
releases = list(repo.get_releases(dep=dep))
root = self.get_root(name=dep.name, version='0.0.0')
self.update_dep_from_root(dep=dep, root=root)
releases.append(Release(
raw_name=root.raw_name,
version=root.version,
time=datetime.fromtimestamp(self.path.stat().st_mtime),
))
return tuple(reversed(releases))
async def get_dependencies(self, name: str, version: str, extra: Optional[str] = None) -> tuple:
cache = RequirementsCache('local', 'deps', name, str(version))
deps = cache.load()
if deps:
return deps
root = self.get_root(name=name, version=version)
deps = root.dependencies
if extra:
deps = tuple(dep for dep in deps if extra in dep.envs)
cache.dump(root=root)
return deps
def get_root(self, name: str, version: str):
# app
from ..converters import CONVERTERS, EggInfoConverter, SDistConverter, WheelConverter
if not self.path.exists():
raise FileNotFoundError(str(self.path))
# load from file
if self.path.is_file():
for converter in CONVERTERS.values():
if converter.can_parse(path=self.path):
return converter.load(path=self.path)
raise LookupError('cannot find loader for file ' + str(self.path))
# get from wheel or sdist
patterns = (
('-*-*-*.whl', WheelConverter()),
('.tar.gz', SDistConverter()),
('.tgz', SDistConverter()),
)
for suffix, converter in patterns:
paths = tuple(self.path.glob('**/{name}-{version}{suffix}'.format(
name=name.replace('-', '_'),
version=str(version),
suffix=suffix,
)))
if paths:
path = min(paths, key=lambda path: len(path.parts))
return converter.load(path=path)
# read from egg-info
path = self.path / (name + '.egg-info')
if path.exists():
return EggInfoConverter().load(path=path)
# read from dephell config
path = self.path / 'pyproject.toml'
if path.exists():
config = Config().attach_file(path=path, env='main', silent=True)
if config is not None:
section = config.get('to') or config.get('from')
if section and 'path' in section and 'format' in section:
converter = CONVERTERS[section['format']]
path = self.path.joinpath(section['path'])
return converter.load(path)
# get from dependencies file
for fname in FILES:
path = self.path / fname
if not path.exists():
continue
for converter in CONVERTERS.values():
if converter.can_parse(path=path):
return converter.load(path=path)
raise LookupError('cannot find dependencies in ' + str(self.path))
@staticmethod
def update_dep_from_root(dep, root) -> None:
if not dep.description:
dep.description = root.description
if not dep.authors:
dep.authors = root.authors
if not dep.links:
dep.links = root.links
if not dep.classifiers:
dep.classifiers = root.classifiers
if not dep.license:
dep.license = root.license
| 34.702479
| 100
| 0.576328
|
d5d27ca01ad32551b13eff257832ae7b33e86a73
| 2,251
|
py
|
Python
|
huaweicloud-sdk-as/huaweicloudsdkas/v1/model/delete_scaling_notification_response.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2021-11-03T07:54:50.000Z
|
2021-11-03T07:54:50.000Z
|
huaweicloud-sdk-as/huaweicloudsdkas/v1/model/delete_scaling_notification_response.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-as/huaweicloudsdkas/v1/model/delete_scaling_notification_response.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class DeleteScalingNotificationResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
}
attribute_map = {
}
def __init__(self):
"""DeleteScalingNotificationResponse - a model defined in huaweicloud sdk"""
super(DeleteScalingNotificationResponse, self).__init__()
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteScalingNotificationResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.45122
| 84
| 0.546868
|
547a330139f46d559f4c777c905634d91b6ef6c9
| 5,390
|
py
|
Python
|
metrics/metrics.py
|
neurips2021vat/Variance-Aware-Training
|
2dcd017ef06e81e299448bdd9da65fa682835127
|
[
"BSD-2-Clause"
] | null | null | null |
metrics/metrics.py
|
neurips2021vat/Variance-Aware-Training
|
2dcd017ef06e81e299448bdd9da65fa682835127
|
[
"BSD-2-Clause"
] | null | null | null |
metrics/metrics.py
|
neurips2021vat/Variance-Aware-Training
|
2dcd017ef06e81e299448bdd9da65fa682835127
|
[
"BSD-2-Clause"
] | null | null | null |
import numpy as np
from sklearn.metrics import multilabel_confusion_matrix, roc_auc_score
from skll.metrics import kappa
from sklearn.metrics import mean_squared_error as mse
from mean_average_precision import MetricBuilder
class Dice_score:
def __init__(self, n_classes: int = 2, exclude_class: int = 0):
self.tp = np.array([0] * (n_classes))
self.fp = np.array([0] * (n_classes))
self.fn = np.array([0] * (n_classes))
self.n_classes = n_classes
self.exclude_class = exclude_class
def calc_running_score(self, labels, outputs):
# TODO
labels = np.eye(self.n_classes)[labels.astype(np.int32)]
outputs = np.eye(self.n_classes)[outputs.astype(np.int32)]
tp = np.sum(labels * outputs, axis=0)
fp = np.sum(outputs, axis=0) - tp
fn = np.sum(labels, axis=0) - tp
self.tp = self.tp + tp
self.fp = self.fp + fp
self.fn = self.fn + fn
def compute(self):
# dice macro
f1 = ((1 + 2 ** 2) * self.tp[1:] + 1e-3) / (
(1 + 2 ** 2) * self.tp[1:] + 2 ** 2 * self.fn[1:] + self.fp[1:] + 1e-3
)
self.tp = np.array([0] * (self.n_classes))
self.fp = np.array([0] * (self.n_classes))
self.fn = np.array([0] * (self.n_classes))
return np.mean(f1)
def calc_running_score_samplewise(self, labels, outputs):
mae = np.mean(np.abs(labels - outputs), axis=1)
return mae.tolist()
def reset(self):
self.tp = np.array([0] * (self.n_classes))
self.fp = np.array([0] * (self.n_classes))
self.fn = np.array([0] * (self.n_classes))
return True
class RocAuc:
def __init__(self):
self.labels = []
self.outputs = []
def calc_running_score(self, labels: np.array, outputs: np.array):
outputs = np.round(outputs, 1)
self.labels += labels.tolist()
self.outputs += outputs.tolist()
def compute(self):
score = roc_auc_score(self.labels, self.outputs,average='macro')
self.reset()
return score
def reset(self):
self.labels = []
self.outputs = []
return True
class F1:
def __init__(self, n_classes: int = 2, exclude_class: int = 0):
self.tp = np.array([0] * (n_classes))
self.fp = np.array([0] * (n_classes))
self.fn = np.array([0] * (n_classes))
self.n_classes = n_classes
self.exclude_class = exclude_class
def calc_running_score(self, labels, outputs):
# TODO
labels = np.eye(self.n_classes)[labels.astype(np.int32)]
outputs = np.eye(self.n_classes)[outputs.astype(np.int32)]
tp = np.sum(labels * outputs, axis=0)
fp = np.sum(outputs, axis=0) - tp
fn = np.sum(labels, axis=0) - tp
self.tp = self.tp + tp
self.fp = self.fp + fp
self.fn = self.fn + fn
return True
def compute(self):
# dice macro
f1 = self.tp / (self.tp + 0.5 * (self.fp + self.fn))
self.reset()
return np.mean(f1)
def reset(self):
self.tp = np.array([0] * (self.n_classes))
self.fp = np.array([0] * (self.n_classes))
self.fn = np.array([0] * (self.n_classes))
return True
class AP:
def __init__(self, n_classes):
self.iou_thresholds = [0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75]
self.AP = 0.0
self.n_pictures = 0.0
self.map_calc = MetricBuilder.build_evaluation_metric(
"map_2d", async_mode=True, num_classes=n_classes - 1
)
def calc_running_score(self, y_batch, bboxes, scores, classes):
# TOOO: shange index
if y_batch[0]['labels'][0] == 0:
gt = np.zeros((0, 6))
if bboxes.shape[0] != 0:
self.n_pictures += 1
else:
self.n_pictures += 1
gt = np.concatenate(
[
y_batch[0]['boxes'].astype(np.int32),
np.zeros((y_batch[0]['boxes'].shape[0], 1)),
np.zeros((y_batch[0]['boxes'].shape[0], 2)),
],
axis=1,
)
preds = np.concatenate([bboxes.astype(np.int32), classes.astype(np.int32), scores], axis=1)
# gt: [xmin, ymin, xmax, ymax, class_id, difficult, crowd]
# pred: [xmin, ymin, xmax, ymax, class_id, confidence]
self.map_calc.add(preds, gt)
self.AP += self.map_calc.value(
iou_thresholds=self.iou_thresholds, recall_thresholds=np.arange(0.0, 1.01, 0.01), mpolicy='soft'
)['mAP']
self.map_calc.reset()
return True
def compute(self):
mAP = self.AP / self.n_pictures
self.reset_matric()
return mAP
def reset_matric(self):
self.AP = 0.0
self.n_pictures = 0.0
return True
class Kappa:
def __init__(self):
self.outputs = []
self.labels = []
self.N = 5
def calc_running_score(self,labels: np.array , outputs: np.array ):
self.labels += labels.tolist()
self.outputs += outputs.tolist()
def compute(self):
score = kappa(self.labels,self.outputs,weights='quadratic')
self.reset()
return score
def reset(self):
self.outputs = []
self.labels = []
return True
| 26.038647
| 108
| 0.553247
|
3b22eecdab55b0d1bb5cb0111cd14b4fdfd05570
| 4,749
|
py
|
Python
|
sdk/eventhub/azure-eventhub-checkpointstoreblob-aio/tests/test_storage_blob_partition_manager_aio.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/eventhub/azure-eventhub-checkpointstoreblob-aio/tests/test_storage_blob_partition_manager_aio.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 226
|
2019-07-24T07:57:21.000Z
|
2019-10-15T01:07:24.000Z
|
sdk/eventhub/azure-eventhub-checkpointstoreblob-aio/tests/test_storage_blob_partition_manager_aio.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import pytest
import time
import os
import uuid
import warnings
import asyncio
from azure.eventhub.extensions.checkpointstoreblobaio import BlobCheckpointStore
from azure.eventhub.extensions.checkpointstoreblobaio._vendor.storage.blob import BlobServiceClient
def get_live_storage_blob_client():
try:
storage_connection_str = os.environ['AZURE_STORAGE_CONN_STR']
except KeyError:
return None, None
container_str = str(uuid.uuid4())
blob_service_client = BlobServiceClient.from_connection_string(storage_connection_str)
blob_service_client.create_container(container_str)
return storage_connection_str, container_str
def remove_live_storage_blob_client(container_str):
try:
storage_connection_str = os.environ['AZURE_STORAGE_CONN_STR']
blob_service_client = BlobServiceClient.from_connection_string(storage_connection_str)
blob_service_client.delete_container(container_str)
except:
warnings.warn(UserWarning("storage container teardown failed"))
async def _claim_and_list_ownership(connection_str, container_name):
fully_qualified_namespace = 'test_namespace'
eventhub_name = 'eventhub'
consumer_group = '$default'
ownership_cnt = 8
checkpoint_store = BlobCheckpointStore.from_connection_string(connection_str, container_name)
async with checkpoint_store:
ownership_list = await checkpoint_store.list_ownership(
fully_qualified_namespace=fully_qualified_namespace,
eventhub_name=eventhub_name,
consumer_group=consumer_group)
assert len(ownership_list) == 0
ownership_list = []
for i in range(ownership_cnt):
ownership = {}
ownership['fully_qualified_namespace'] = fully_qualified_namespace
ownership['eventhub_name'] = eventhub_name
ownership['consumer_group'] = consumer_group
ownership['owner_id'] = 'ownerid'
ownership['partition_id'] = str(i)
ownership['last_modified_time'] = time.time()
ownership_list.append(ownership)
await checkpoint_store.claim_ownership(ownership_list)
ownership_list = await checkpoint_store.list_ownership(
fully_qualified_namespace=fully_qualified_namespace,
eventhub_name=eventhub_name,
consumer_group=consumer_group)
assert len(ownership_list) == ownership_cnt
@pytest.mark.liveTest
def test_claim_and_list_ownership():
connection_str, container_name = get_live_storage_blob_client()
if not connection_str:
pytest.skip("Storage blob client can't be created")
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(_claim_and_list_ownership(connection_str, container_name))
finally:
remove_live_storage_blob_client(container_name)
async def _update_checkpoint(connection_str, container_name):
fully_qualified_namespace = 'test_namespace'
eventhub_name = 'eventhub'
consumer_group = '$default'
partition_cnt = 8
checkpoint_store = BlobCheckpointStore.from_connection_string(connection_str, container_name)
async with checkpoint_store:
for i in range(partition_cnt):
checkpoint = {
'fully_qualified_namespace': fully_qualified_namespace,
'eventhub_name': eventhub_name,
'consumer_group': consumer_group,
'partition_id': str(i),
'offset': '2',
'sequence_number': 20
}
await checkpoint_store.update_checkpoint(checkpoint)
checkpoint_list = await checkpoint_store.list_checkpoints(
fully_qualified_namespace=fully_qualified_namespace,
eventhub_name=eventhub_name,
consumer_group=consumer_group)
assert len(checkpoint_list) == partition_cnt
for checkpoint in checkpoint_list:
assert checkpoint['offset'] == '2'
assert checkpoint['sequence_number'] == 20
@pytest.mark.liveTest
def test_update_checkpoint():
connection_str, container_name = get_live_storage_blob_client()
if not connection_str:
pytest.skip("Storage blob client can't be created")
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(_update_checkpoint(connection_str, container_name))
finally:
remove_live_storage_blob_client(container_name)
| 37.992
| 99
| 0.698252
|
912ca03f8e8e389b8ee4103e60e3a38bcd3a08fa
| 750
|
py
|
Python
|
sandbox/apps/oauth/admin.py
|
aiegoo/django-blog
|
f1c1de7435336825c97d6d1d030c53df400af73c
|
[
"BSD-3-Clause"
] | null | null | null |
sandbox/apps/oauth/admin.py
|
aiegoo/django-blog
|
f1c1de7435336825c97d6d1d030c53df400af73c
|
[
"BSD-3-Clause"
] | null | null | null |
sandbox/apps/oauth/admin.py
|
aiegoo/django-blog
|
f1c1de7435336825c97d6d1d030c53df400af73c
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import Ouser
@admin.register(Ouser)
class OuserAdmin(UserAdmin):
list_display = ('username', 'email', 'is_staff', 'is_active', 'date_joined')
fieldsets = (
('basic information', {'fields': (('username', 'email', 'password'), ('link',))}),
('Authority information', {'fields': (('is_active', 'is_staff', 'is_superuser'),
'groups', 'user_permissions')}),
('Important date', {'fields': (('last_login', 'date_joined'),)}),
)
filter_horizontal = ('groups', 'user_permissions',)
list_filter = ('is_staff', 'is_superuser', 'is_active', 'groups')
search_fields = ('username', 'email')
| 41.666667
| 90
| 0.625333
|
be568137447afc13886a2b53aed7f2ea51366249
| 2,009
|
py
|
Python
|
S2l/Thesis_Ch3/Real_robot_exp/exp2_exp3/plotter_taskcompletion_bar.py
|
leopauly/Observation-Learning-Simulations
|
462c04a87c45aae51537b8ea5b44646afa31d3a5
|
[
"MIT"
] | null | null | null |
S2l/Thesis_Ch3/Real_robot_exp/exp2_exp3/plotter_taskcompletion_bar.py
|
leopauly/Observation-Learning-Simulations
|
462c04a87c45aae51537b8ea5b44646afa31d3a5
|
[
"MIT"
] | null | null | null |
S2l/Thesis_Ch3/Real_robot_exp/exp2_exp3/plotter_taskcompletion_bar.py
|
leopauly/Observation-Learning-Simulations
|
462c04a87c45aae51537b8ea5b44646afa31d3a5
|
[
"MIT"
] | null | null | null |
#### For plotting task completion rates in bar plot for simulation experiments
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
def mean_std(rate1,rate2):
mean=np.mean([rate1,rate2])
std=np.std([rate1,rate2])
return mean,std
#------------------------------------------------------------------------------------------------------------#
mean0_e2,std0_e2=mean_std(.8442,.9515)
mean1_e2,std1_e2=mean_std(.6782,.8442)
mean2_e2,std2_e2=mean_std(.8235,.8650)
mean3_e2,std3_e2=mean_std(.8096,.8477)
mean4_e2,std4_e2=mean_std(.8892,.8615)
mean5_e2,std5_e2=mean_std(.7923,.6332)
mean0_e3,std0_e3=mean_std(1,1)
mean1_e3,std1_e3=mean_std(1,1)
mean2_e3,std2_e3=mean_std(1,1)
mean3_e3,std3_e3=mean_std(1,1)
mean4_e3,std4_e3=mean_std(1,1)
mean5_e3,std5_e3=mean_std(1,1)
#------------------------------------------------------------------------------------------------------------#
mean_e2=[mean0_e2,mean1_e2,mean2_e2,mean3_e2,mean4_e2,mean5_e2]
std_e2=[std0_e2,std1_e2,std2_e2,std3_e2,std4_e2,std5_e2]
mean_e3=[mean0_e3,mean1_e3,mean2_e3,mean3_e3,mean4_e3,mean5_e3]
std_e3=[std0_e3,std1_e3,std2_e3,std3_e3,std4_e3,std5_e3]
#------------------------------------------------------------------------------------------------------------#
bar_width=1
ind_e3=[0,4,8,12,16,20]
ind_e2=[1,5,9,13,17,21]
#------------------------------------------------------------------------------------------------------------#
ax_e2=plt.bar(ind_e2,mean_e2,width=bar_width,yerr=std_e2,bottom=0,color='r',label='Pushing')
ax_e3=plt.bar(ind_e3,mean_e3,width=bar_width,yerr=std_e3,bottom=0,color='g',label='Hammering')
my_ticks=['V1','V2', 'Obj1','Obj2', 'BG','M']
x_ticks=[1,5,9,13,17,21]
plt.xticks(x_ticks,my_ticks)
plt.legend(loc='upper right')
plt.title('Tasks: Reaching and Hammering')
plt.xlabel('Experiments')
plt.ylim(-.5,1.5)
plt.ylabel('Task completion measure')
plt.savefig('Completionrate_barplot_real_e2_e3.png')
plt.show()
| 30.439394
| 111
| 0.599801
|
327f8e9eafdce37911559999e8a3c2a9eb44581a
| 799
|
py
|
Python
|
project/urls.py
|
easthermutheumwengei/my-gallery
|
8ffdabf546c5df44e7febbaedd9cdd5c0f395cc8
|
[
"MIT"
] | null | null | null |
project/urls.py
|
easthermutheumwengei/my-gallery
|
8ffdabf546c5df44e7febbaedd9cdd5c0f395cc8
|
[
"MIT"
] | null | null | null |
project/urls.py
|
easthermutheumwengei/my-gallery
|
8ffdabf546c5df44e7febbaedd9cdd5c0f395cc8
|
[
"MIT"
] | null | null | null |
"""project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
urlpatterns = [
url('admin/', admin.site.urls),
url(r'', include('gallery.urls')),
]
| 34.73913
| 77
| 0.703379
|
47a9b452746b609e674b2d0de6370ba4be19ad75
| 7,040
|
py
|
Python
|
include/server/bw/handlers/games.py
|
spacebeam/bw
|
8f975a2925f309b0038c876f1234595df9798c98
|
[
"Apache-2.0"
] | 2
|
2019-10-30T04:26:21.000Z
|
2019-10-31T17:26:59.000Z
|
include/server/bw/handlers/games.py
|
spacebeam/bw
|
8f975a2925f309b0038c876f1234595df9798c98
|
[
"Apache-2.0"
] | 22
|
2019-08-21T17:13:45.000Z
|
2020-08-06T00:38:56.000Z
|
include/server/bw/handlers/games.py
|
spacebeam/bw
|
8f975a2925f309b0038c876f1234595df9798c98
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# This file is part of bw.
# Distributed under the terms of the last AGPL License.
__author__ = 'Jean Chassoul'
import logging
from tornado import gen
from bw.schemas import games as models
from bw.systems import games
from bw.tools import check_json
from bw.handlers import BaseHandler
from collections import OrderedDict
class Handler(games.Games, BaseHandler):
'''
HTTP request handlers
'''
@gen.coroutine
def head(self,
session=None,
game_uuid=None,
start=None,
end=None,
lapse='hours',
page_num=1):
'''
Get games
'''
# request query arguments
query_args = self.request.arguments
# getting pagination ready
page_num = int(query_args.get('page', [page_num])[0])
# rage against the finite state machine
status = 'all'
# init message on error
message = {'error': True}
# init status that match with our message
self.set_status(400)
# check if we're list processing
if not game_uuid:
message = yield self.get_game_list(session,
start,
end,
lapse,
status,
page_num)
self.set_status(200)
else:
game_uuid = game_uuid.rstrip('/')
message = yield self.get_game(session, game_uuid)
self.set_status(200)
self.finish(message)
@gen.coroutine
def get(self,
session=None,
game_uuid=None,
start=None,
end=None,
lapse='hours',
page_num=1):
'''
Get games
'''
# request query arguments
query_args = self.request.arguments
# session, tournament, run?
session = query_args.get('session', session)
# getting pagination ready
page_num = int(query_args.get('page', [page_num])[0])
# rage against the finite state machine
status = 'all'
# init message on error
message = {'error': True}
# init status that match with our message
self.set_status(400)
# check if we're list processing
if not game_uuid:
# TODO: missing session, start, end, lapse and status support!
message = yield self.get_game_list(session,
start,
end,
lapse,
status,
page_num)
self.set_status(200)
else:
game_uuid = game_uuid.rstrip('/')
message = yield self.get_game(session, game_uuid)
self.set_status(200)
# so long and thanks for all the fish
self.finish(message)
@gen.coroutine
def post(self):
'''
Schedule a new game
'''
struct = yield check_json(self.request.body)
format_pass = (True if struct and not struct.get('errors') else False)
if not format_pass:
self.set_status(400)
self.finish({'JSON': format_pass})
return
# create new game struct
game_uuid = yield self.new_game(struct)
# complete message with receive uuid.
message = {'uuid': game_uuid}
self.set_status(201)
self.finish(message)
@gen.coroutine
def patch(self, game_uuid):
'''
Modify game
'''
struct = yield check_json(self.request.body)
format_pass = (True if not dict(struct).get('errors') else False)
message = {'message': 'not found'}
if not format_pass:
self.set_status(400)
self.finish({'JSON': format_pass})
return
session = self.request.arguments.get('session', [None])[0]
if not session:
# if no session try to get session from struct
session = struct.get('session', None)
result = yield self.modify_game(session, game_uuid, struct)
if not result:
self.set_status(400)
self.finish(message)
return
self.set_status(200)
message = {'message': 'update completed successfully'}
self.finish(message)
@gen.coroutine
def delete(self, game_uuid):
'''
Delete game
'''
query_args = self.request.arguments
session = query_args.get('session', [None])[0]
result = yield self.remove_game(session, game_uuid)
if not result:
self.set_status(400)
message = {'message': 'Error something was wrong!'}
self.finish(message)
return
self.set_status(204)
self.finish()
@gen.coroutine
def options(self, game_uuid=None):
'''
Resource options
'''
self.set_header('Access-Control-Allow-Origin', '*')
self.set_header('Access-Control-Allow-Methods',
'HEAD, GET, POST, PATCH, DELETE, OPTIONS')
self.set_header('Access-Control-Allow-Headers',
''.join(
('Accept-Language,',
'DNT,Keep-Alive,User-Agent,X-Requested-With,',
'If-Modified-Since,Cache-Control,Content-Type,',
'Content-Range,Range,Date,Etag')))
# allowed http methods
message = {
'Allow': ['HEAD', 'GET', 'POST', 'PATCH', 'DELETE', 'OPTIONS']
}
# resource parameters
parameters = {}
# mock your stuff
stuff = False
while not stuff:
try:
stuff = models.Game.get_mock_object().to_primitive()
except Exception as error:
logging.warning(error)
pass
for k, v in stuff.items():
if v is None:
parameters[k] = str(type('none'))[1:-1].split(' ')[1][1:-1]
else:
parameters[k] = str(type(v))[1:-1].split(' ')[1][1:-1]
# after automatic madness return description and parameters
parameters['labels'] = 'list/str'
# end of manual cleaning
POST = {
"description": "Create new game",
"parameters": OrderedDict(
sorted(parameters.items(), key=lambda t: t[0]))
}
# filter single resource
if not game_uuid:
message['POST'] = POST
else:
message['Allow'].remove('POST')
message['Allow'].append('PATCH')
message['Allow'].append('DELETE')
self.set_status(200)
self.finish(message)
| 33.051643
| 78
| 0.509659
|
37fc3a785e87eef4fc2bcfad146a23b3fa6f4748
| 3,447
|
py
|
Python
|
custom_components/anniversaries/const.py
|
Nag94/HomeAssistantConfig
|
d5f806e05be8d92bf487c58322d20cd9b08c6b98
|
[
"Unlicense"
] | 163
|
2020-08-01T12:19:46.000Z
|
2022-03-28T09:04:57.000Z
|
custom_components/anniversaries/const.py
|
Nag94/HomeAssistantConfig
|
d5f806e05be8d92bf487c58322d20cd9b08c6b98
|
[
"Unlicense"
] | 81
|
2020-08-04T00:28:46.000Z
|
2022-03-29T15:48:51.000Z
|
custom_components/anniversaries/const.py
|
Nag94/HomeAssistantConfig
|
d5f806e05be8d92bf487c58322d20cd9b08c6b98
|
[
"Unlicense"
] | 28
|
2020-08-02T12:02:24.000Z
|
2022-03-22T00:07:34.000Z
|
""" Constants """
from typing import Optional
import voluptuous as vol
from datetime import datetime
import homeassistant.helpers.config_validation as cv
from homeassistant.const import CONF_NAME
# Base component constants
DOMAIN = "anniversaries"
DOMAIN_DATA = f"{DOMAIN}_data"
VERSION = "4.4.2"
PLATFORM = "sensor"
ISSUE_URL = "https://github.com/pinkywafer/Anniversaries/issues"
ATTRIBUTION = "Sensor data calculated by Anniversaries Integration"
ATTR_YEARS_NEXT = "years_at_next_anniversary"
ATTR_YEARS_CURRENT = "current_years"
ATTR_DATE = "date"
# Device classes
BINARY_SENSOR_DEVICE_CLASS = "connectivity"
# Configuration
CONF_SENSOR = "sensor"
CONF_ENABLED = "enabled"
CONF_DATE = "date"
CONF_DATE_TEMPLATE = "date_template"
CONF_ICON_NORMAL = "icon_normal"
CONF_ICON_TODAY = "icon_today"
CONF_ICON_SOON = "icon_soon"
CONF_DATE_FORMAT = "date_format"
CONF_SENSORS = "sensors"
CONF_SOON = "days_as_soon"
CONF_HALF_ANNIVERSARY = "show_half_anniversary"
CONF_UNIT_OF_MEASUREMENT = "unit_of_measurement"
CONF_ID_PREFIX = "id_prefix"
CONF_ONE_TIME = "one_time"
CONF_COUNT_UP = "count_up"
CONF_DATE_EXCLUSION_ERROR = "Configuration cannot include both `date` and `date_template`. configure ONLY ONE"
CONF_DATE_REQD_ERROR = "Either `date` or `date_template` is Required"
# Defaults
DEFAULT_NAME = DOMAIN
DEFAULT_ICON_NORMAL = "mdi:calendar-blank"
DEFAULT_ICON_TODAY = "mdi:calendar-star"
DEFAULT_ICON_SOON = "mdi:calendar"
DEFAULT_DATE_FORMAT = "%Y-%m-%d"
DEFAULT_SOON = 1
DEFAULT_HALF_ANNIVERSARY = False
DEFAULT_UNIT_OF_MEASUREMENT = "Days"
DEFAULT_ID_PREFIX = "anniversary_"
DEFAULT_ONE_TIME = False
DEFAULT_COUNT_UP = False
ICON = DEFAULT_ICON_NORMAL
def check_date(value):
try:
datetime.strptime(value, "%Y-%m-%d")
return value
except ValueError:
pass
try:
datetime.strptime(value, "%m-%d")
return value
except ValueError:
raise vol.Invalid(f"Invalid date: {value}")
DATE_SCHEMA = vol.Schema(
{
vol.Required(
vol.Any(CONF_DATE,CONF_DATE_TEMPLATE,msg=CONF_DATE_REQD_ERROR)
): object
}, extra=vol.ALLOW_EXTRA
)
SENSOR_CONFIG_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Exclusive(CONF_DATE, CONF_DATE, msg=CONF_DATE_EXCLUSION_ERROR): check_date,
vol.Exclusive(CONF_DATE_TEMPLATE, CONF_DATE, msg=CONF_DATE_EXCLUSION_ERROR): cv.string,
vol.Optional(CONF_SOON, default=DEFAULT_SOON): cv.positive_int,
vol.Optional(CONF_ICON_NORMAL, default=DEFAULT_ICON_NORMAL): cv.icon,
vol.Optional(CONF_ICON_TODAY, default=DEFAULT_ICON_TODAY): cv.icon,
vol.Optional(CONF_ICON_SOON, default=DEFAULT_ICON_SOON): cv.icon,
vol.Optional(CONF_DATE_FORMAT, default=DEFAULT_DATE_FORMAT): cv.string,
vol.Optional(CONF_HALF_ANNIVERSARY, default=DEFAULT_HALF_ANNIVERSARY): cv.boolean,
vol.Optional(CONF_UNIT_OF_MEASUREMENT, default=DEFAULT_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_ID_PREFIX, default=DEFAULT_ID_PREFIX): cv.string,
vol.Optional(CONF_ONE_TIME, default=DEFAULT_ONE_TIME): cv.boolean,
vol.Optional(CONF_COUNT_UP, default=DEFAULT_COUNT_UP): cv.boolean,
}
)
SENSOR_SCHEMA = vol.All(SENSOR_CONFIG_SCHEMA, DATE_SCHEMA)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{vol.Optional(CONF_SENSORS): vol.All(cv.ensure_list, [SENSOR_SCHEMA])}
)
},
extra=vol.ALLOW_EXTRA,
)
| 32.518868
| 110
| 0.747026
|
821d42b09e60d257777dda4f40587c4b7584191d
| 2,561
|
py
|
Python
|
examples/flask-echo/app_with_handler.py
|
bocheng47/LINE_bocheng
|
87c0e605bd922ae2002f103ff16f0b0707794b50
|
[
"Apache-2.0"
] | null | null | null |
examples/flask-echo/app_with_handler.py
|
bocheng47/LINE_bocheng
|
87c0e605bd922ae2002f103ff16f0b0707794b50
|
[
"Apache-2.0"
] | null | null | null |
examples/flask-echo/app_with_handler.py
|
bocheng47/LINE_bocheng
|
87c0e605bd922ae2002f103ff16f0b0707794b50
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from argparse import ArgumentParser
from flask import Flask, request, abort
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage,
)
app = Flask(__name__)
# get channel_secret and channel_access_token from your environment variable
channel_secret = os.getenv('d57f500b5ac8ede281c2b196e1ad547b', None)
channel_access_token = os.getenv('Qpc1/KrZ6Vm3qlyyoYhlrBUSfk4k2HuEJco9QyKbRY3VffS8gDRz9+ZyGqSKirb3Psk3lnrjZdsDlYGgXv56B7NMDSDHxgW4/AkLuhl8oNGQqZZQYJxa8oB5NQIrx07UYV/0UcEXVPCWtQ6V/YMZUgdB04t89/1O/w1cDnyilFU=', None)
if channel_secret is None:
print('Specify LINE_CHANNEL_SECRET as environment variable.')
sys.exit(1)
if channel_access_token is None:
print('Specify LINE_CHANNEL_ACCESS_TOKEN as environment variable.')
sys.exit(1)
line_bot_api = LineBotApi(channel_access_token)
handler = WebhookHandler(channel_secret)
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
@handler.add(MessageEvent, message=TextMessage)
def message_text(event):
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=event.message.text)
)
if __name__ == "__main__":
arg_parser = ArgumentParser(
usage='Usage: python ' + __file__ + ' [--port <port>] [--help]'
)
arg_parser.add_argument('-p', '--port', default=8000, help='port')
arg_parser.add_argument('-d', '--debug', default=False, help='debug')
options = arg_parser.parse_args()
app.run(debug=options.debug, port=options.port)
| 31.617284
| 214
| 0.738774
|
056e1b6b2af32108cd44079b869c8b1339636bd7
| 1,878
|
py
|
Python
|
PyOpenGl/torus.py
|
kirillzx/Geometry-and-Visualization-
|
3cf3e47268eac7b2a389f6f18b1d2e3503e9d13a
|
[
"MIT"
] | null | null | null |
PyOpenGl/torus.py
|
kirillzx/Geometry-and-Visualization-
|
3cf3e47268eac7b2a389f6f18b1d2e3503e9d13a
|
[
"MIT"
] | null | null | null |
PyOpenGl/torus.py
|
kirillzx/Geometry-and-Visualization-
|
3cf3e47268eac7b2a389f6f18b1d2e3503e9d13a
|
[
"MIT"
] | null | null | null |
from math import *
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
# Процедура инициализации
def init():
glEnable(GL_DEPTH_TEST)
glClearColor(0.5, 0.5, 0.5, 1.0) # Серый цвет для первоначальной закраски
gluOrtho2D(-1.0, 1.0, -1.0, 1.0) # Определяем границы рисования по горизонтали и вертикали
global filled
filled = 1
# Процедура обработки обычных клавиш
def keyboardkeys(key, x, y):
global filled
if key == b'\x1b':
sys.exit(0)
if key == b' ':
filled = 1 - filled
glutPostRedisplay() # Вызываем процедуру перерисовки
# Процедура рисования
def draw(*args, **kwargs):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) # Очищаем экран и заливаем текущим цветом фона
glRotated(0.125,1,1,1)
R1 = 0.2
R2 = 0.1
for j in range(-9,8):
glBegin(GL_QUAD_STRIP)
glColor3f(1,0.5,0)
for i in range(21):
x1 = (R1 + R2 * cos(pi * j/18)) * cos(pi * i/10)
y1 = (R1 + R2 * cos(pi * j/18)) * sin(pi * i/10)
z1 = R2 * sin(pi * j/16)
x2 = (R1 + R2 * cos(pi * (j+1)/18)) * cos(pi * (i+1)/10)
y2 = (R1 + R2 * cos(pi * (j+1)/18)) * sin(pi * (i+1)/10)
z2 = R2 * sin((j+1)/16)
glVertex3d(x1, y1, z1)
glVertex3d(x2, y2, z2)
glEnd()
glutSwapBuffers() # Меняем буферы
glutPostRedisplay() # Вызываем процедуру перерисовки
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)
glutInitWindowSize(800, 600)
glutInitWindowPosition(50, 50)
glutInit(sys.argv)
glutCreateWindow(b"OpenGL")
# Определяем процедуру, отвечающую за рисование
glutDisplayFunc(draw)
# Определяем процедуру, отвечающую за обработку обычных клавиш
glutKeyboardFunc(keyboardkeys)
# Вызываем нашу функцию инициализации
init()
glutMainLoop()
| 30.786885
| 102
| 0.614483
|
4ebd679ea7859b34072c8df48f22a9b4088df33f
| 30,163
|
py
|
Python
|
atomai/models/dgm/vae.py
|
aghosh92/atomai
|
9a9fd7a4cff7dc9b5af9fcc5b1a4b3894c9df685
|
[
"MIT"
] | null | null | null |
atomai/models/dgm/vae.py
|
aghosh92/atomai
|
9a9fd7a4cff7dc9b5af9fcc5b1a4b3894c9df685
|
[
"MIT"
] | null | null | null |
atomai/models/dgm/vae.py
|
aghosh92/atomai
|
9a9fd7a4cff7dc9b5af9fcc5b1a4b3894c9df685
|
[
"MIT"
] | null | null | null |
"""
vae.py
=======
Module for analysis of system "building blocks" with variational autoencoders
Created by Maxim Ziatdinov (email: maxim.ziatdinov@ai4microscopy.com)
"""
import os
from copy import deepcopy as dc
from typing import Dict, List, Optional, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
import torch
from scipy.stats import norm
from torchvision.utils import make_grid
from ...losses_metrics import vae_loss
from ...nets import init_VAE_nets
from ...trainers import viBaseTrainer
from ...utils import (crop_borders, extract_subimages, get_coord_grid,
imcoordgrid, set_train_rng, subimg_trajectories,
to_onehot)
class BaseVAE(viBaseTrainer):
"""
General class for VAE models
Args:
in_dim:
(height, width) or (height, width, channel) of input images
latent_dim:
Number of latent dimensions
nb_classes:
Number of classes (for class-conditional VAEs)
seed:
seed for torch and numpy (pseudo-)random numbers generators
**conv_encoder (bool):
use convolutional layers in encoder
**conv_decoder (bool):
use convolutional layers in decoder (doesn't apply to rVAE)
**numlayers_encoder (int):
number of layers in encoder (Default: 2)
**numlayers_decoder (int):
number of layers in decoder (Default: 2)
**numhidden_encoder (int):
number of hidden units OR conv filters in encoder (Default: 128)
**numhidden_decoder (int):
number of hidden units OR conv filters in decoder (Default: 128)
"""
def __init__(self,
in_dim: Tuple[int],
latent_dim: int,
nb_classes: int = 0,
coord: int = 0,
discrete_dim: Optional[List] = None,
seed: int = 0,
**kwargs: Union[int, bool]) -> None:
super(BaseVAE, self).__init__()
"""
Initializes encoder-decoder object
"""
in_dim_error_msg = (
"You must specify the input dimensions and pass them as a tuple. "
"For images, specify (height, width) or (height, width, channels)" +
" if multiple channels. For spectra, specify (length,)")
if in_dim is None or not isinstance(in_dim, (tuple, list)):
raise AssertionError(in_dim_error_msg)
if isinstance(in_dim, tuple) and not isinstance(in_dim[0], int):
raise AssertionError(in_dim_error_msg)
if torch.cuda.is_available:
torch.cuda.empty_cache()
set_train_rng(seed)
np.random.seed(seed)
self.in_dim = in_dim
self.z_dim = latent_dim
if isinstance(discrete_dim, list):
self.z_dim = self.z_dim + sum(discrete_dim)
self.discrete_dim = discrete_dim
if coord:
if len(in_dim) not in (2, 3):
raise NotImplementedError(
"VAE with rotation and translational invariance are " +
"available only for 2D image data")
self.z_dim = self.z_dim + coord
self.x_coord = imcoordgrid(in_dim).to(self.device)
self.nb_classes = nb_classes
(encoder_net, decoder_net,
self.metadict) = init_VAE_nets(
in_dim, latent_dim, coord, discrete_dim,
nb_classes, **kwargs)
self.set_model(encoder_net, decoder_net)
self.sigmoid_out = self.metadict["sigmoid_out"]
self.coord = coord
def encode_(self,
x_new: Union[np.ndarray, torch.Tensor],
**kwargs: int) -> Tuple[np.ndarray]:
"""
Encodes input image data using a trained VAE's encoder
Args:
x_test:
image array to encode
**num_batches:
number of batches (Default: 10)
Returns:
Concatenated array of encoded vectors
"""
def inference() -> np.ndarray:
with torch.no_grad():
encoded = self.encoder_net(x_i)
encoded = torch.cat(encoded, -1).cpu().numpy()
return encoded
if isinstance(x_new, np.ndarray):
x_new = torch.from_numpy(x_new).float()
if (x_new.ndim == len(self.in_dim) == 2 or
x_new.ndim == len(self.in_dim) == 3):
x_new = x_new.unsqueeze(0)
x_new = x_new.to(self.device)
num_batches = kwargs.get("num_batches", 10)
batch_size = len(x_new) // num_batches
z_encoded = []
for i in range(num_batches):
x_i = x_new[i*batch_size:(i+1)*batch_size]
z_encoded_i = inference()
z_encoded.append(z_encoded_i)
x_i = x_new[(i+1)*batch_size:]
if len(x_i) > 0:
z_encoded_i = inference()
z_encoded.append(z_encoded_i)
return np.concatenate(z_encoded)
def encode(self,
x_new: Union[np.ndarray, torch.Tensor],
**kwargs: int) -> Tuple[np.ndarray]:
"""
Encodes input image data using a trained VAE's encoder
Args:
x_test:
image array to encode
**num_batches:
number of batches (Default: 10)
Returns:
Mean and SD of the encoded continuous distribution,
and alphas ("class probabilities") for the encoded
discrete distribution(s) (if any)
"""
z = self.encode_(x_new, **kwargs)
if not self.discrete_dim:
z_mean = z[:, :self.z_dim]
z_logsd = z[:, self.z_dim:]
return z_mean, z_logsd
cont_dim = self.z_dim - sum(self.discrete_dim)
z_mean = z[:, :cont_dim]
z_logsd = z[:, cont_dim:cont_dim+cont_dim]
alphas = z[:, cont_dim+cont_dim:]
return z_mean, z_logsd, alphas
def decode(self, z_sample: Union[np.ndarray, torch.Tensor],
y: Optional[Union[int, np.ndarray, torch.Tensor]] = None
) -> np.ndarray:
"""
Takes a point in latent space and maps it to data space
via the learned generative model
Args:
z_sample: point(s) in latent space
y: label
Returns:
Generated ("decoded") image(s)
"""
if isinstance(z_sample, np.ndarray):
z_sample = torch.from_numpy(z_sample).float()
if len(z_sample.size()) == 1:
z_sample = z_sample[None, ...]
if self.coord:
x_coord = self.x_coord.expand(z_sample.size(0), *self.x_coord.size())
z_sample = z_sample.cuda() if torch.cuda.is_available() else z_sample
if y is not None:
if isinstance(y, int):
y = torch.tensor(y)
elif isinstance(y, np.ndarray):
y = torch.from_numpy(y)
if y.dim() == 0:
y = y.unsqueeze(0)
y = y.cuda() if torch.cuda.is_available() else y
targets = to_onehot(y, self.nb_classes)
z_sample = torch.cat((z_sample, targets), dim=-1)
if torch.cuda.is_available():
self.decoder_net.cuda()
self.decoder_net.eval()
with torch.no_grad():
if self.coord:
x_decoded = self.decoder_net(x_coord, z_sample)
else:
x_decoded = self.decoder_net(z_sample)
if self.sigmoid_out:
x_decoded = torch.sigmoid(x_decoded)
imdec = x_decoded.cpu().numpy()
return imdec
def reconstruct(self,
x_new: Union[np.ndarray, torch.Tensor],
**kwargs: int) -> np.ndarray:
"""
Forward prediction with uncertainty quantification by sampling from
the encoded mean and std. Works only for regular VAE (and not for rVAE)
Args:
x_new:
image array to encode
**label:
class to be reconstructed (for cVAE, crVAE, jVAE, and jrVAE)
**num_samples:
number of samples to generate from normal distribution
Returns:
Ensemble of "decoded" images
"""
num_samples = kwargs.get("num_samples", 32)
label = kwargs.get("label")
if isinstance(x_new, np.ndarray):
x_new = torch.from_numpy(x_new).float()
if torch.cuda.is_available():
x_new = x_new.cuda()
self.encoder_net.cuda()
with torch.no_grad():
encoded = self.encoder_net(x_new)
if len(encoded) == 2:
z_mean, z_logsd = encoded
else:
z_mean, z_logsd, alphas = encoded
z_mean = z_mean[:, self.coord:]
z_logsd = z_logsd[:, self.coord:]
if label is not None:
n = self.nb_classes if self.discrete_dim is None else self.discrete_dim # probably makes sense to use nb_classes for j(r)VAE's discrete_dim
alphas = to_onehot(
torch.tensor(label).unsqueeze(0).to(self.device),
torch.tensor(n).to(self.device))
z_sd = torch.exp(z_logsd)
ndist = torch.distributions.Normal(z_mean, z_sd)
decoded_all = []
for i in range(num_samples):
z_sample = ndist.rsample()
z_sample = z_sample.view(1, -1)
if len(encoded) > 2 or label is not None:
z_sample = torch.cat([z_sample, alphas], dim=1)
decoded_all.append(self.decode(z_sample))
decoded_all = np.concatenate(decoded_all, axis=0)
return decoded_all
def encode_images(self,
imgdata: np.ndarray,
**kwargs: int) -> Tuple[np.ndarray, np.ndarray]:
"""
Encodes every pixel of every image in image stack
Args:
imgdata:
3D numpy array. Can also be a single 2D image
**num_batches (int):
number of batches for for encoding pixels of a single image
Returns:
Cropped original image stack and encoded array (cropping is due to finite window size)
"""
if (imgdata.ndim == len(self.in_dim) == 2 or
imgdata.ndim == len(self.in_dim) == 3):
imgdata = np.expand_dims(imgdata, axis=0)
imgdata_encoded, imgdata_ = [], []
for i, img in enumerate(imgdata):
print("\rImage {}/{}".format(i+1, imgdata.shape[0]), end="")
img_, img_encoded = self.encode_image_(img, **kwargs)
imgdata_encoded.append(img_encoded)
imgdata_.append(img_)
return np.array(imgdata_), np.array(imgdata_encoded)
def encode_image_(self,
img: np.ndarray,
**kwargs: int) -> Tuple[np.ndarray, np.ndarray]:
"""
Crops and encodes a subimage around each pixel in the input image.
The size of subimage is determined by size of images in VAE training data.
Args:
img:
2D numpy array
**num_batches (int):
number of batches for encoding subimages
Returns:
Cropped original image and encoded array (cropping is due to finite window size)
"""
num_batches = kwargs.get("num_batches", 10)
inf = np.int(1e5)
img_to_encode = img.copy()
coordinates = get_coord_grid(img_to_encode, 1, return_dict=False)
batch_size = coordinates.shape[0] // num_batches
encoded_img = -inf * np.ones((*img_to_encode.shape, self.z_dim))
for i in range(num_batches):
coord_i = coordinates[i*batch_size:(i+1)*batch_size]
subimgs_i, com_i, _ = extract_subimages(
img_to_encode, coord_i, self.in_dim[0])
if len(subimgs_i) > 0:
z_mean, _ = self.encode(subimgs_i, num_batches=10)
for k, (l, m) in enumerate(com_i):
encoded_img[int(l), int(m)] = z_mean[k]
coord_i = coordinates[(i+1)*batch_size:]
if len(coord_i) > 0:
subimgs_i, com_i, _ = extract_subimages(
img_to_encode, coord_i, self.in_dim[0])
if len(subimgs_i) > 0:
z_mean, _ = self.encode(subimgs_i, num_batches=10)
for k, (l, m) in enumerate(com_i):
encoded_img[int(l), int(m)] = z_mean[k]
img_to_encode[encoded_img[..., 0] == -inf] = 0
img_to_encode = crop_borders(img_to_encode[..., None], 0)
encoded_img = crop_borders(encoded_img, -inf)
return img_to_encode[..., 0], encoded_img
def encode_trajectories(self,
imgdata: np.ndarray,
coord_class_dict: Dict[int, np.ndarray],
window_size: int,
min_length: int,
rmax: int,
**kwargs: int
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
"""
Calculates trajectories and latent variable value
for each point in a trajectory.
Args:
imgdata:
NN output (preferable) or raw data
coord_class_dict:
atomic/defect/particle coordinates
window_size:
size of subimages to crop
min_length:
minimum length of trajectory to be included
rmax:
maximum allowed distance (projected on xy plane) between defect
in one frame and the position of its nearest neigbor in the next one
**num_batches (int):
number of batches for self.encode (Default: 10)
Returns:
List of encoded trajectories and corresponding movie frame numbers
"""
t = subimg_trajectories(
imgdata, coord_class_dict, window_size, min_length, rmax)
trajectories, frames, subimgs_all = t.get_all_trajectories()
trajectories_enc_all = []
for traj, subimgs in zip(trajectories, subimgs_all):
z_mean, _ = self.encode(
subimgs, num_batches=kwargs.get("num_batches", 10))
traj_enc = np.concatenate((traj[:, :2], z_mean), axis=-1)
trajectories_enc_all.append(traj_enc)
return trajectories_enc_all, frames, subimgs_all
def manifold2d(self, **kwargs: Union[int, List, str, bool]) -> None: # use torchvision's grid here
"""
Performs mapping from latent space to data space allowing the learned
manifold to be visualized. This works only for 2d latent variable
(not counting angle & translation dimensions)
Args:
**d (int): grid size
**l1 (list): range of 1st latent variable
**l2 (list): range of 2nd latent variable
**label (int): label in class-conditioned (r)VAE
**disc_idx (int): discrete "class"
**cmap (str): color map (Default: gnuplot)
**draw_grid (bool): plot semi-transparent grid
**origin (str): plot origin (e.g. 'lower')
"""
y = kwargs.get("label")
if y is None and self.nb_classes != 0:
y = 0
elif y and self.nb_classes == 0:
y = None
l1, l2 = kwargs.get("l1"), kwargs.get("l2")
d = kwargs.get("d", 9)
cmap = kwargs.get("cmap", "gnuplot")
if len(self.in_dim) == 2:
figure = np.zeros((self.in_dim[0] * d, self.in_dim[1] * d))
elif len(self.in_dim) == 3:
figure = np.zeros((self.in_dim[0] * d, self.in_dim[1] * d, self.in_dim[-1]))
if l1 and l2:
grid_x = np.linspace(l1[1], l1[0], d)
grid_y = np.linspace(l2[0], l2[1], d)
else:
grid_x = norm.ppf(np.linspace(0.95, 0.05, d))
grid_y = norm.ppf(np.linspace(0.05, 0.95, d))
if self.discrete_dim:
z_disc = np.zeros((sum(self.discrete_dim)))[None]
z_disc[:, kwargs.get("disc_idx", 0)] = 1
for i, xi in enumerate(grid_x):
for j, yi in enumerate(grid_y):
z_sample = np.array([[xi, yi]])
if self.discrete_dim:
z_sample = np.concatenate((z_sample, z_disc), -1)
if y is not None:
imdec = self.decode(z_sample, y)
else:
imdec = self.decode(z_sample)
figure[i * self.in_dim[0]: (i + 1) * self.in_dim[0],
j * self.in_dim[1]: (j + 1) * self.in_dim[1]] = imdec
if figure.min() < 0:
figure = (figure - figure.min()) / figure.ptp()
fig, ax = plt.subplots(figsize=(10, 10))
ax.imshow(figure, cmap=cmap, origin=kwargs.get("origin", "lower"),
extent=[grid_x.min(), grid_x.max(), grid_y.min(), grid_y.max()])
ax.set_xlabel("$z_1$")
ax.set_ylabel("$z_2$")
draw_grid = kwargs.get("draw_grid")
if draw_grid:
major_ticks_x = np.arange(0, d * self.in_dim[0], self.in_dim[0])
major_ticks_y = np.arange(0, d * self.in_dim[1], self.in_dim[1])
ax.set_xticks(major_ticks_x)
ax.set_yticks(major_ticks_y)
ax.grid(which='major', alpha=0.6)
for item in ([ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(18)
if not kwargs.get("savefig"):
plt.show()
else:
savedir = kwargs.get("savedir", './vae_learning/')
fname = kwargs.get("filename", "manifold_2d")
if not os.path.exists(savedir):
os.makedirs(savedir)
fig.savefig(os.path.join(savedir, '{}.png'.format(fname)))
plt.close(fig)
def manifold_traversal(self, cont_idx: int,
d: int = 10,
cont_idx_fixed: int = 0,
plot: bool = True,
**kwargs: Union[str, float]
) -> np.ndarray:
"""
Latent space traversals for joint continuous and discrete
latent representations
"""
if self.discrete_dim is None:
raise TypeError(
"Traversal of latent space is implemented only for joint",
" continuous and discrete latent distributions")
if len(self.in_dim) == 3 and self.in_dim[-1] != 3:
raise ValueError(
"This works only for a single channel and 3-channel images")
num_samples = d**2
cont_dim = self.z_dim - sum(self.discrete_dim) - self.coord
# Get continuous latent coordinates
samples_cont = np.zeros(shape=(num_samples, cont_dim)) + cont_idx_fixed
cdf_traversal = np.linspace(0.05, 0.95, d)
cont_traversal = norm.ppf(cdf_traversal)
for i in range(d):
for j in range(d):
samples_cont[i * d + j, cont_idx] = cont_traversal[j]
# Get discrete latent coordinates
disc_dim = self.discrete_dim[0]
n = np.arange(0, disc_dim)
n = np.tile(n, d // disc_dim + 1)[:d]
samples_disc = []
for i in range(d):
samples_disc_i = np.zeros((d, disc_dim))
samples_disc_i[:, n[i]] = 1
samples_disc.append(samples_disc_i)
samples_disc = np.concatenate(samples_disc)
# Put them together and pass through a decoder
samples = np.concatenate((samples_cont, samples_disc), -1)
decoded = self.decode(samples)
# Use a built-in torchvision utility to construct a nice grid
decoded = decoded.transpose(0, 3, 1, 2) if decoded.ndim == 4 else decoded[:, None]
grid = make_grid(torch.from_numpy(decoded),
nrow=d, padding=kwargs.get("pad", 2)).numpy()
grid = grid.transpose(1, 2, 0) if len(self.in_dim) == 3 else grid[0]
grid = (grid - grid.min()) / grid.ptp()
if not kwargs.get("keep_square", False) and disc_dim != d:
grid = grid[:(self.in_dim[0]+kwargs.get("pad", 2)) * disc_dim]
if plot:
plt.figure(figsize=(12, 12))
plt.imshow(grid, cmap='gnuplot',
origin=kwargs.get("origin", "lower"))
plt.xlabel("$z_{cont}$", fontsize=18)
plt.ylabel("$z_{disc}$", fontsize=18)
plt.xticks([])
plt.yticks([])
plt.show()
return grid
@classmethod
def visualize_manifold_learning(cls,
frames_dir: str,
**kwargs: Union[str, int]) -> None:
"""
Creates and stores a video showing evolution of
learned 2D manifold during rVAE's training
Args:
frames_dir:
directory with snapshots of manifold as .png files
(the files should be named as "1.png", "2.png", etc.)
**moviename (str): name of the movie
**frame_duration (int): duration of each movie frame
"""
from atomai.utils import animation_from_png
movie_name = kwargs.get("moviename", "manifold_learning")
duration = kwargs.get("frame_duration", 1)
animation_from_png(frames_dir, movie_name, duration, remove_dir=False)
def _check_inputs(self,
X_train: np.ndarray,
y_train: Optional[np.ndarray] = None,
X_test: Optional[np.ndarray] = None,
y_test: Optional[np.ndarray] = None
) -> None:
"""
Asserts that dimensionality and number classes contained in
training and test data matches those specified at initialization
"""
if self.in_dim != X_train.shape[1:]:
raise RuntimeError(
"The values of input dimensions you specified do not match " +
"the training data dimensions. " +
"Expected {} but got {}".format(self.in_dim, X_train.shape[1:]))
if X_test is not None and self.in_dim != X_test.shape[1:]:
raise RuntimeError(
"The values of input dimensions you specified do not match " +
"the test data dimensions. " +
"Expected {} but got {}".format(self.in_dim, X_test.shape[1:]))
if y_train is not None and self.nb_classes == 0:
raise RuntimeError(
"You must have forgotten to specify number of classes " +
"during the initialization. Example of correct usage: " +
"vae = VAE(in_dim=(28, 28), nb_classes=10)); " +
"vae.fit(train_data, train_labels).")
lbl_match = True
if y_train is not None and y_test is None:
lbl_match = self.nb_classes == len(np.unique(y_train))
elif y_train is not None and y_test is not None:
lbl_match = (self.nb_classes == len(np.unique(y_train))
== len(np.unique(y_test)))
if not lbl_match:
raise RuntimeError(
"The number of classes specified at initialization must be " +
"equal the the number of classes in train and test labels")
def _2torch(self,
X: Union[np.ndarray, torch.Tensor],
y: Union[np.ndarray, torch.Tensor] = None
) -> torch.Tensor:
"""
Rules for conversion of numpy arrays to torch tensors
"""
if isinstance(X, np.ndarray):
X = torch.from_numpy(X).float()
if isinstance(y, np.ndarray):
y = torch.from_numpy(y).long()
return X, y
class VAE(BaseVAE):
"""
Implements a standard Variational Autoencoder (VAE)
Args:
in_dim:
Input dimensions for image data passed as (heigth, width)
for grayscale data or (height, width, channels)
for multichannel data
latent_dim:
Number of VAE latent dimensions
nb_classes:
Number of classes for class-conditional rVAE
seed:
seed for torch and numpy (pseudo-)random numbers generators
**conv_encoder (bool):
use convolutional layers in encoder
**conv_decoder (bool):
use convolutional layers in decoder
**numlayers_encoder (int):
number of layers in encoder (Default: 2)
**numlayers_decoder (int):
number of layers in decoder (Default: 2)
**numhidden_encoder (int):
number of hidden units OR conv filters in encoder (Default: 128)
**numhidden_decoder (int):
number of hidden units OR conv filters in decoder (Default: 128)
Example:
>>> input_dim = (28, 28) # Input data dimensions (without n_samples)
>>> # Intitialize model
>>> vae = aoi.models.VAE(input_dim)
>>> # Train
>>> vae.fit(imstack_train, training_cycles=100, batch_size=100)
>>> # Visualize learned manifold (for 2 latent dimesnions)
>>> vae.manifold2d(origin="upper", cmap="gnuplot2)
One can also pass labels to train a class-conditioned VAE
>>> # Intitialize model
>>> vae = aoi.models.VAE(input_dim, nb_classes=10)
>>> # Train
>>> vae.fit(imstack_train, labels_train, training_cycles=100, batch_size=100)
>>> # Visualize learned manifold for class 1
>>> vae.manifold2d(label=1, origin="upper", cmap="gnuplot2")
"""
def __init__(self,
in_dim: int = None,
latent_dim: int = 2,
nb_classes: int = 0,
seed: int = 0,
**kwargs: Union[int, bool, str]
) -> None:
super(VAE, self).__init__(in_dim, latent_dim, nb_classes, 0, **kwargs)
set_train_rng(seed)
self.kdict_ = dc(kwargs)
self.kdict_["num_iter"] = 0
def elbo_fn(self, x: torch.Tensor, x_reconstr: torch.Tensor,
*args: torch.Tensor,
**kwargs) -> torch.Tensor:
"""
Calculates ELBO
"""
return vae_loss(self.loss, self.in_dim, x, x_reconstr, *args, **kwargs)
def forward_compute_elbo(self,
x: torch.Tensor,
y: Optional[torch.Tensor] = None,
mode: str = "train"
) -> torch.Tensor:
"""
VAE's forward pass with training/test loss computation
"""
x = x.to(self.device)
if mode == "eval":
with torch.no_grad():
z_mean, z_logsd = self.encoder_net(x)
else:
z_mean, z_logsd = self.encoder_net(x)
self.kdict_["num_iter"] += 1
z_sd = torch.exp(z_logsd)
z = self.reparameterize(z_mean, z_sd)
if y is not None:
targets = to_onehot(y, self.nb_classes)
z = torch.cat((z, targets), -1)
if mode == "eval":
with torch.no_grad():
x_reconstr = self.decoder_net(z)
else:
x_reconstr = self.decoder_net(z)
return self.elbo_fn(x, x_reconstr, z_mean, z_logsd, **self.kdict_)
def fit(self,
X_train: Union[np.ndarray, torch.Tensor],
y_train: Optional[Union[np.ndarray, torch.Tensor]] = None,
X_test: Optional[Union[np.ndarray, torch.Tensor]] = None,
y_test: Optional[Union[np.ndarray, torch.Tensor]] = None,
loss: str = "mse",
**kwargs) -> None:
"""
Trains VAE model
Args:
X_train:
For images, 3D or 4D stack of training images with dimensions
(n_images, height, width) for grayscale data or
or (n_images, height, width, channels) for multi-channel data.
For spectra, 2D stack of spectra with dimensions (length,)
y_train:
Vector with labels of dimension (n_images,), where n_images
is a number of training images/spectra
X_test:
3D or 4D stack of test images or 2D stack of spectra with
the same dimensions as for the X_train (Default: None)
y_test:
Vector with labels of dimension (n_images,), where n_images
is a number of test images/spectra
loss:
reconstruction loss function, "ce" or "mse" (Default: "mse")
**capacity (list):
List containing (max_capacity, num_iters, gamma) parameters
to control the capacity of the latent channel.
Based on https://arxiv.org/pdf/1804.03599.pdf
**filename (str):
file path for saving model aftereach training cycle ("epoch")
"""
self._check_inputs(X_train, y_train, X_test, y_test)
for k, v in kwargs.items():
if k in ["capacity"]:
self.kdict_[k] = v
self.compile_trainer(
(X_train, y_train), (X_test, y_test), **kwargs)
self.loss = loss # this part needs to be handled better
if self.loss == "ce":
self.sigmoid_out = True # Use sigmoid layer for "prediction" stage
self.metadict["sigmoid_out"] = True
for e in range(self.training_cycles):
self.current_epoch = e
elbo_epoch = self.train_epoch()
self.loss_history["train_loss"].append(elbo_epoch)
if self.test_iterator is not None:
elbo_epoch_test = self.evaluate_model()
self.loss_history["test_loss"].append(elbo_epoch_test)
self.print_statistics(e)
self.update_metadict()
self.save_model(self.filename)
return
def update_metadict(self):
self.metadict["num_epochs"] = self.current_epoch
self.metadict["num_iter"] = self.kdict_["num_iter"]
| 40.650943
| 152
| 0.55515
|
2d6aa3889097c494ad5b49f8b5bf9e4a2454f8ed
| 2,034
|
py
|
Python
|
src/tests/json_processor_validation_test.py
|
sergicollado/json_redo
|
89ed514e441f7038f18fe1833f2c49a4bf08ef19
|
[
"CC0-1.0"
] | null | null | null |
src/tests/json_processor_validation_test.py
|
sergicollado/json_redo
|
89ed514e441f7038f18fe1833f2c49a4bf08ef19
|
[
"CC0-1.0"
] | null | null | null |
src/tests/json_processor_validation_test.py
|
sergicollado/json_redo
|
89ed514e441f7038f18fe1833f2c49a4bf08ef19
|
[
"CC0-1.0"
] | null | null | null |
import pytest
from infrastructure.message_client import MessageClient
from application_layer.json_processor import JsonProcessor
from infrastructure.processed_data_repository import ProcessedDataRepository
@pytest.fixture
def processed_repository():
repository = ProcessedDataRepository()
yield repository
repository.data.clear()
repository.invalid_items.clear()
@pytest.fixture
def json_processor(processed_repository):
return JsonProcessor(MessageClient, processed_repository)
@pytest.fixture
def post_errors_input_json():
filename = 'src/tests/fixtures/fixture_post_error.json'
json_file = open(filename, "rb")
yield json_file
json_file.close()
@pytest.fixture
def error_input_json():
filename = 'src/tests/fixtures/fixture_with_error.json'
json_file = open(filename, "rb")
yield json_file
json_file.close()
@pytest.fixture
def no_type_input_json():
filename = 'src/tests/fixtures/fixture_with_no_type_error.json'
json_file = open(filename, "rb")
yield json_file
json_file.close()
def test_should_save_invalid_type_post_items(post_errors_input_json, json_processor, processed_repository):
json_processor.process(post_errors_input_json)
expected_invalid_item_indices = [0,2]
for index in expected_invalid_item_indices:
assert processed_repository.get_invalid_item(index)
def test_should_save_every_type_notification_with_invalid_params(error_input_json, json_processor, processed_repository):
json_processor.process(error_input_json)
expected_invalid_item_indices = [0,1,2,3,4,5]
for index in expected_invalid_item_indices:
assert processed_repository.get_invalid_item(index)
def test_should_save_general_notification_error_when_item_has_not_type_or_name(no_type_input_json, json_processor, processed_repository):
json_processor.process(no_type_input_json)
expected_invalid_item_indices = [0,2,4]
for index in expected_invalid_item_indices:
assert processed_repository.get_invalid_item(index)
| 35.068966
| 137
| 0.806785
|
fbbc8b89c983453d12e7391785752d18bb7ccfc7
| 93
|
py
|
Python
|
runtime/python/Lib/curses/panel.py
|
hwaipy/InteractionFreeNode
|
88642b68430f57b028fd0f276a5709f89279e30d
|
[
"MIT"
] | 207
|
2018-10-01T08:53:01.000Z
|
2022-03-14T12:15:54.000Z
|
Thonny/Lib/curses/panel.py
|
Pydiderot/pydiderotIDE
|
a42fcde3ea837ae40c957469f5d87427e8ce46d3
|
[
"MIT"
] | 30
|
2019-01-04T10:14:56.000Z
|
2020-10-12T14:00:31.000Z
|
Thonny/Lib/curses/panel.py
|
Pydiderot/pydiderotIDE
|
a42fcde3ea837ae40c957469f5d87427e8ce46d3
|
[
"MIT"
] | 76
|
2020-03-16T01:47:46.000Z
|
2022-03-21T16:37:07.000Z
|
"""curses.panel
Module for using panels with curses.
"""
from _curses_panel import *
| 13.285714
| 37
| 0.688172
|
10a8bf9c8c76b604e4b32b3941a8b10e99a65bdb
| 289
|
py
|
Python
|
manage.py
|
MatanAvitan/React-Redux-Flask-MongoDB
|
499dc5d2ef5787f0a0d3583f9f1354ce1b4bcc1f
|
[
"MIT"
] | 14
|
2017-03-04T14:25:18.000Z
|
2021-02-23T05:10:50.000Z
|
manage.py
|
MatanAvitan/React-Redux-Flask-MongoDB
|
499dc5d2ef5787f0a0d3583f9f1354ce1b4bcc1f
|
[
"MIT"
] | null | null | null |
manage.py
|
MatanAvitan/React-Redux-Flask-MongoDB
|
499dc5d2ef5787f0a0d3583f9f1354ce1b4bcc1f
|
[
"MIT"
] | 9
|
2017-12-02T17:32:41.000Z
|
2021-03-15T20:02:05.000Z
|
from flask_script import Server, Manager, Shell
from application.app import app, db
manager = Manager(app)
manager.add_command('runserver', Server())
manager.add_command('shell', Shell(make_context=lambda: {
'app': app,
'db': db
}))
if __name__ == '__main__':
manager.run()
| 20.642857
| 57
| 0.702422
|
2eb7d89d6921edca7c8c518f1ec6caff0428e41b
| 1,693
|
py
|
Python
|
api/main.py
|
GabrielPila/twitter-extractor
|
1ec983815a8a0603c3b911b87242ee5b3f9ecdd3
|
[
"MIT"
] | 2
|
2022-02-15T17:11:26.000Z
|
2022-02-20T02:47:42.000Z
|
api/main.py
|
GabrielPila/twitter-extractor
|
1ec983815a8a0603c3b911b87242ee5b3f9ecdd3
|
[
"MIT"
] | null | null | null |
api/main.py
|
GabrielPila/twitter-extractor
|
1ec983815a8a0603c3b911b87242ee5b3f9ecdd3
|
[
"MIT"
] | null | null | null |
import requests
import os
import json
import pandas as pd
import datetime
from tqdm import tqdm
from settings import BEARER_TOKEN
from params import search_url, keyword, start_time, max_results, step_time, num_steps
from utils import create_headers, create_url, connect_to_endpoint
#Inputs for the request
bearer_token = BEARER_TOKEN
headers = create_headers(bearer_token)
start_time = pd.to_datetime(start_time)
if not os.path.exists('data'):
os.mkdir('data')
if not os.path.exists('data_users'):
os.mkdir('data_users')
for i in tqdm(list(range(num_steps))):
start = str(start_time + datetime.timedelta(minutes = i * step_time))
end = str(start_time + datetime.timedelta(minutes = (i+1) * step_time))
start = start.replace(" ", "T").replace("+00.00",".000Z")
end = end.replace(" ", "T").replace("+00.00",".000Z")
params = create_url(keyword, start, end, max_results)
json_response = connect_to_endpoint(search_url, headers, params)
if json_response['meta']['result_count'] == 0:
continue
timestamp = str(datetime.datetime.now())[:-4]
end_file = f'{keyword.split(" ")[0]}_start_{start.split("+")[0]}_end_{end.split("+")[0]}'
data = pd.DataFrame(json_response['data'])
data['keyword'] = keyword
data['timestamp'] = timestamp
data.to_csv(f'data/dataTW_GP_{end_file}.csv', index=False)
users = pd.DataFrame(json_response['includes']['users'])
users['keyword'] = keyword
users['timestamp'] = timestamp
users.to_csv(f'data_users/usersTW_GP_{end_file}.csv', index=False)
#params = create_url(keyword, start_time, end_time, max_results)
#json_response = connect_to_endpoint(search_url, headers, params)
| 32.557692
| 93
| 0.711164
|
908d0199df218cd3d8f8f42b1fcd198484b1e68a
| 15,609
|
py
|
Python
|
sdk/storage/azure-storage-blob/tests/test_largest_block_blob_async.py
|
praveenkuttappan/azure-sdk-for-python
|
4b79413667b7539750a6c7dde15737013a3d4bd5
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/storage/azure-storage-blob/tests/test_largest_block_blob_async.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/storage/azure-storage-blob/tests/test_largest_block_blob_async.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from io import BytesIO
import pytest
from os import path, remove, urandom
import platform
import uuid
from azure.core.pipeline.policies import SansIOHTTPPolicy
from azure.core.pipeline.transport import AioHttpTransport
from multidict import CIMultiDict, CIMultiDictProxy
from azure.storage.blob.aio import (
BlobServiceClient
)
from azure.storage.blob import (
BlobBlock
)
from azure.storage.blob._shared.base_client import _format_shared_key_credential
from azure.storage.blob._shared.constants import CONNECTION_TIMEOUT, READ_TIMEOUT
from devtools_testutils.storage.aio import AsyncStorageTestCase
from settings.testcase import BlobPreparer
# ------------------------------------------------------------------------------
TEST_BLOB_PREFIX = 'largestblob'
LARGEST_BLOCK_SIZE = 4000 * 1024 * 1024
LARGEST_SINGLE_UPLOAD_SIZE = 5000 * 1024 * 1024
# ------------------------------------------------------------------------------
if platform.python_implementation() == 'PyPy':
pytest.skip("Skip tests for Pypy", allow_module_level=True)
class AiohttpTestTransport(AioHttpTransport):
"""Workaround to vcrpy bug: https://github.com/kevin1024/vcrpy/pull/461
"""
async def send(self, request, **config):
response = await super(AiohttpTestTransport, self).send(request, **config)
if not isinstance(response.headers, CIMultiDictProxy):
response.headers = CIMultiDictProxy(CIMultiDict(response.internal_response.headers))
response.content_type = response.headers.get("content-type")
return response
class StorageLargestBlockBlobTestAsync(AsyncStorageTestCase):
async def _setup(self, storage_account_name, key, additional_policies=None, min_large_block_upload_threshold=1 * 1024 * 1024,
max_single_put_size=32 * 1024):
self.bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=key,
max_single_put_size=max_single_put_size,
max_block_size=LARGEST_BLOCK_SIZE,
min_large_block_upload_threshold=min_large_block_upload_threshold,
_additional_pipeline_policies=additional_policies,
transport=AiohttpTestTransport(
connection_timeout=CONNECTION_TIMEOUT,
read_timeout=READ_TIMEOUT
))
self.config = self.bsc._config
self.container_name = self.get_resource_name('utcontainer')
self.container_name = self.container_name + str(uuid.uuid4())
if self.is_live:
await self.bsc.create_container(self.container_name)
def _teardown(self, file_name):
if path.isfile(file_name):
try:
remove(file_name)
except:
pass
# --Helpers-----------------------------------------------------------------
def _get_blob_reference(self):
return self.get_resource_name(TEST_BLOB_PREFIX)
async def _create_blob(self):
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.upload_blob(b'')
return blob
# --Test cases for block blobs --------------------------------------------
@pytest.mark.live_test_only
@pytest.mark.skip(reason="This takes really long time")
@BlobPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_put_block_bytes_largest(self, storage_account_name, storage_account_key):
await self._setup(storage_account_name, storage_account_key)
blob = await self._create_blob()
# Act
data = urandom(LARGEST_BLOCK_SIZE)
blockId = str(uuid.uuid4()).encode('utf-8')
resp = await blob.stage_block(
blockId,
data,
length=LARGEST_BLOCK_SIZE)
await blob.commit_block_list([BlobBlock(blockId)])
block_list = await blob.get_block_list()
# Assert
self.assertIsNotNone(resp)
assert 'content_md5' in resp
assert 'content_crc64' in resp
assert 'request_id' in resp
self.assertIsNotNone(block_list)
self.assertEqual(len(block_list), 2)
self.assertEqual(len(block_list[1]), 0)
self.assertEqual(len(block_list[0]), 1)
self.assertEqual(block_list[0][0].size, LARGEST_BLOCK_SIZE)
@pytest.mark.live_test_only
@BlobPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_put_block_bytes_largest_without_network(self, storage_account_name, storage_account_key):
payload_dropping_policy = PayloadDroppingPolicy()
credential_policy = _format_shared_key_credential(storage_account_name, storage_account_key)
await self._setup(storage_account_name, storage_account_key, [payload_dropping_policy, credential_policy])
blob = await self._create_blob()
# Act
data = urandom(LARGEST_BLOCK_SIZE)
blockId = str(uuid.uuid4()).encode('utf-8')
resp = await blob.stage_block(
blockId,
data,
length=LARGEST_BLOCK_SIZE)
await blob.commit_block_list([BlobBlock(blockId)])
block_list = await blob.get_block_list()
# Assert
self.assertIsNotNone(resp)
assert 'content_md5' in resp
assert 'content_crc64' in resp
assert 'request_id' in resp
self.assertIsNotNone(block_list)
self.assertEqual(len(block_list), 2)
self.assertEqual(len(block_list[1]), 0)
self.assertEqual(len(block_list[0]), 1)
self.assertEqual(payload_dropping_policy.put_block_counter, 1)
self.assertEqual(payload_dropping_policy.put_block_sizes[0], LARGEST_BLOCK_SIZE)
@pytest.mark.live_test_only
@pytest.mark.skip(reason="This takes really long time")
@BlobPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_put_block_stream_largest(self, storage_account_name, storage_account_key):
await self._setup(storage_account_name, storage_account_key)
blob = await self._create_blob()
# Act
stream = LargeStream(LARGEST_BLOCK_SIZE)
blockId = str(uuid.uuid4())
requestId = str(uuid.uuid4())
resp = await blob.stage_block(
blockId,
stream,
length=LARGEST_BLOCK_SIZE,
client_request_id=requestId)
await blob.commit_block_list([BlobBlock(blockId)])
block_list = await blob.get_block_list()
# Assert
self.assertIsNotNone(resp)
assert 'content_md5' in resp
assert 'content_crc64' in resp
assert 'request_id' in resp
self.assertIsNotNone(block_list)
self.assertEqual(len(block_list), 2)
self.assertEqual(len(block_list[1]), 0)
self.assertEqual(len(block_list[0]), 1)
self.assertEqual(block_list[0][0].size, LARGEST_BLOCK_SIZE)
@pytest.mark.live_test_only
@BlobPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_put_block_stream_largest_without_network(self, storage_account_name, storage_account_key):
payload_dropping_policy = PayloadDroppingPolicy()
credential_policy = _format_shared_key_credential(storage_account_name, storage_account_key)
await self._setup(storage_account_name, storage_account_key, [payload_dropping_policy, credential_policy])
blob = await self._create_blob()
# Act
stream = LargeStream(LARGEST_BLOCK_SIZE)
blockId = str(uuid.uuid4())
requestId = str(uuid.uuid4())
resp = await blob.stage_block(
blockId,
stream,
length=LARGEST_BLOCK_SIZE,
client_request_id=requestId)
await blob.commit_block_list([BlobBlock(blockId)])
block_list = await blob.get_block_list()
# Assert
self.assertIsNotNone(resp)
assert 'content_md5' in resp
assert 'content_crc64' in resp
assert 'request_id' in resp
self.assertIsNotNone(block_list)
self.assertEqual(len(block_list), 2)
self.assertEqual(len(block_list[1]), 0)
self.assertEqual(len(block_list[0]), 1)
self.assertEqual(payload_dropping_policy.put_block_counter, 1)
self.assertEqual(payload_dropping_policy.put_block_sizes[0], LARGEST_BLOCK_SIZE)
@pytest.mark.live_test_only
@pytest.mark.skip(reason="This takes really long time")
@BlobPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_largest_blob_from_path(self, storage_account_name, storage_account_key):
await self._setup(storage_account_name, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
FILE_PATH = 'largest_blob_from_path.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
largeStream = LargeStream(LARGEST_BLOCK_SIZE, 100 * 1024 * 1024)
chunk = largeStream.read()
while chunk:
stream.write(chunk)
chunk = largeStream.read()
# Act
with open(FILE_PATH, 'rb') as stream:
await blob.upload_blob(stream, max_concurrency=2)
# Assert
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@BlobPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_largest_blob_from_path_without_network(self, storage_account_name, storage_account_key):
payload_dropping_policy = PayloadDroppingPolicy()
credential_policy = _format_shared_key_credential(storage_account_name, storage_account_key)
await self._setup(storage_account_name, storage_account_key, [payload_dropping_policy, credential_policy])
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
FILE_PATH = 'largest_blob_from_path.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
largeStream = LargeStream(LARGEST_BLOCK_SIZE, 100 * 1024 * 1024)
chunk = largeStream.read()
while chunk:
stream.write(chunk)
chunk = largeStream.read()
# Act
with open(FILE_PATH, 'rb') as stream:
await blob.upload_blob(stream, max_concurrency=2)
# Assert
self._teardown(FILE_PATH)
self.assertEqual(payload_dropping_policy.put_block_counter, 1)
self.assertEqual(payload_dropping_policy.put_block_sizes[0], LARGEST_BLOCK_SIZE)
@pytest.mark.skip(reason="This takes really long time")
@pytest.mark.live_test_only
@BlobPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_largest_blob_from_stream_without_network(self, storage_account_name, storage_account_key):
payload_dropping_policy = PayloadDroppingPolicy()
credential_policy = _format_shared_key_credential(storage_account_name, storage_account_key)
await self._setup(storage_account_name, storage_account_key, [payload_dropping_policy, credential_policy])
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
number_of_blocks = 50000
stream = LargeStream(LARGEST_BLOCK_SIZE*number_of_blocks)
# Act
await blob.upload_blob(stream, max_concurrency=1)
# Assert
self.assertEqual(payload_dropping_policy.put_block_counter, number_of_blocks)
self.assertEqual(payload_dropping_policy.put_block_sizes[0], LARGEST_BLOCK_SIZE)
@pytest.mark.live_test_only
@BlobPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_largest_blob_from_stream_single_upload_without_network(self, storage_account_name, storage_account_key):
payload_dropping_policy = PayloadDroppingPolicy()
credential_policy = _format_shared_key_credential(storage_account_name, storage_account_key)
await self._setup(storage_account_name, storage_account_key, [payload_dropping_policy, credential_policy],
max_single_put_size=LARGEST_SINGLE_UPLOAD_SIZE + 1)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
stream = LargeStream(LARGEST_SINGLE_UPLOAD_SIZE)
# Act
await blob.upload_blob(stream, length=LARGEST_SINGLE_UPLOAD_SIZE, max_concurrency=1)
# Assert
self.assertEqual(payload_dropping_policy.put_block_counter, 0)
self.assertEqual(payload_dropping_policy.put_blob_counter, 1)
class LargeStream(BytesIO):
def __init__(self, length, initial_buffer_length=1024 * 1024):
self._base_data = urandom(initial_buffer_length)
self._base_data_length = initial_buffer_length
self._position = 0
self._remaining = length
self._closed = False
def read(self, size=None):
if self._remaining == 0:
return b""
if size is None:
e = self._base_data_length
else:
e = size
e = min(e, self._remaining)
if e > self._base_data_length:
self._base_data = urandom(e)
self._base_data_length = e
self._remaining = self._remaining - e
return self._base_data[:e]
def remaining(self):
return self._remaining
def close(self):
self._closed = True
class PayloadDroppingPolicy(SansIOHTTPPolicy):
def __init__(self):
self.put_block_counter = 0
self.put_block_sizes = []
self.put_blob_counter = 0
self.put_blob_sizes = []
def on_request(self, request): # type: (PipelineRequest) -> Union[None, Awaitable[None]]
if _is_put_block_request(request):
if request.http_request.body:
self.put_block_counter = self.put_block_counter + 1
self.put_block_sizes.append(_get_body_length(request))
replacement = "dummy_body"
request.http_request.body = replacement
request.http_request.headers["Content-Length"] = str(len(replacement))
elif _is_put_blob_request(request):
if request.http_request.body:
self.put_blob_counter = self.put_blob_counter + 1
self.put_blob_sizes.append(_get_body_length(request))
replacement = "dummy_body"
request.http_request.body = replacement
request.http_request.headers["Content-Length"] = str(len(replacement))
def _is_put_block_request(request):
query = request.http_request.query
return query and "comp" in query and query["comp"] == "block"
def _is_put_blob_request(request):
query = request.http_request.query
return request.http_request.method == "PUT" and not query
def _get_body_length(request):
body = request.http_request.body
length = 0
if hasattr(body, "read"):
chunk = body.read(10*1024*1024)
while chunk:
length = length + len(chunk)
chunk = body.read(10 * 1024 * 1024)
else:
length = len(body)
return length
# ------------------------------------------------------------------------------
| 40.754569
| 130
| 0.670703
|
c4c88b9ccabd1f50b7c7e07a70d4a5952d475189
| 3,007
|
py
|
Python
|
tests/conftest.py
|
ofek/hatch-vcs
|
c5388d67192d9bf88191927a35f51705121784a1
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
ofek/hatch-vcs
|
c5388d67192d9bf88191927a35f51705121784a1
|
[
"MIT"
] | 1
|
2022-03-08T04:07:09.000Z
|
2022-03-18T05:41:17.000Z
|
tests/conftest.py
|
ofek/hatch-vcs
|
c5388d67192d9bf88191927a35f51705121784a1
|
[
"MIT"
] | null | null | null |
# SPDX-FileCopyrightText: 2022-present Ofek Lev <oss@ofek.dev>
#
# SPDX-License-Identifier: MIT
import errno
import os
import shutil
import stat
import tempfile
from contextlib import contextmanager
import pytest
from .utils import create_file, git, write_file
def handle_remove_readonly(func, path, exc): # no cov
# PermissionError: [WinError 5] Access is denied: '...\\.git\\...'
if func in (os.rmdir, os.remove, os.unlink) and exc[1].errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
func(path)
else:
raise
@pytest.fixture
def temp_dir():
directory = tempfile.mkdtemp()
try:
directory = os.path.realpath(directory)
yield directory
finally:
shutil.rmtree(directory, ignore_errors=False, onerror=handle_remove_readonly)
@contextmanager
def create_project(directory, metadata, setup_vcs=True):
project_dir = os.path.join(directory, 'my-app')
os.mkdir(project_dir)
gitignore_file = os.path.join(project_dir, '.gitignore')
write_file(gitignore_file, '/my_app/version.py')
project_file = os.path.join(project_dir, 'pyproject.toml')
write_file(project_file, metadata)
package_dir = os.path.join(project_dir, 'my_app')
os.mkdir(package_dir)
create_file(os.path.join(package_dir, '__init__.py'))
create_file(os.path.join(package_dir, 'foo.py'))
create_file(os.path.join(package_dir, 'bar.py'))
create_file(os.path.join(package_dir, 'baz.py'))
origin = os.getcwd()
os.chdir(project_dir)
try:
if setup_vcs:
git('init')
git('config', '--local', 'user.name', 'foo')
git('config', '--local', 'user.email', 'foo@bar.baz')
git('add', '.')
git('commit', '-m', 'test')
git('tag', '1.2.3')
yield project_dir
finally:
os.chdir(origin)
@pytest.fixture
def new_project_basic(temp_dir):
with create_project(
temp_dir,
"""\
[build-system]
requires = ["hatchling", "hatch-vcs"]
build-backend = "hatchling.build"
[project]
name = "my-app"
dynamic = ["version"]
[tool.hatch.version]
source = "vcs"
""",
) as project:
yield project
@pytest.fixture
def new_project_write(temp_dir):
with create_project(
temp_dir,
"""\
[build-system]
requires = ["hatchling", "hatch-vcs"]
build-backend = "hatchling.build"
[project]
name = "my-app"
dynamic = ["version"]
[tool.hatch.version]
source = "vcs"
[tool.hatch.build.targets.wheel.hooks.vcs]
version-file = "my_app/_version.py"
""",
) as project:
yield project
@pytest.fixture
def new_project_fallback(temp_dir):
with create_project(
temp_dir,
"""\
[build-system]
requires = ["hatchling", "hatch-vcs"]
build-backend = "hatchling.build"
[project]
name = "my-app"
dynamic = ["version"]
[tool.hatch.version]
source = "vcs"
fallback-version = "7.8.9"
""",
setup_vcs=False,
) as project:
yield project
| 22.609023
| 85
| 0.648487
|
099f46f6b27f1a57cb9e8e5dd18e0e07a7b58bb6
| 1,190
|
py
|
Python
|
pyCs/lab1/token_parser.py
|
kinpa200296/TM_labs
|
a0d69c6318e774f418f478f65dcfd8fbc684e51f
|
[
"MIT"
] | null | null | null |
pyCs/lab1/token_parser.py
|
kinpa200296/TM_labs
|
a0d69c6318e774f418f478f65dcfd8fbc684e51f
|
[
"MIT"
] | null | null | null |
pyCs/lab1/token_parser.py
|
kinpa200296/TM_labs
|
a0d69c6318e774f418f478f65dcfd8fbc684e51f
|
[
"MIT"
] | null | null | null |
import re
from token import Token
class TokenParser(object):
def __init__(self, token_name, templates):
if not isinstance(token_name, unicode):
raise ValueError('token_name should be a unicode string')
if not isinstance(templates, list):
raise ValueError('templates should be a list')
for template in templates:
if not isinstance(template, unicode):
raise ValueError('templates elements should all be unicode strings')
self.token_name = token_name
self.templates = templates
def __repr__(self):
return self.__str__().__repr__()
def __str__(self):
return '"{token_name}":\n\t{templates}'.format(token_name=self.token_name,
templates="\n\t".join(self.templates))
def match(self, s):
if not isinstance(s, unicode):
raise ValueError('argument should be a unicode string')
for template in self.templates:
regexp = "^{template}$".format(template=template)
m = re.match(regexp, s)
if m is not None:
return Token(self.token_name, s)
| 36.060606
| 93
| 0.603361
|
06ed0a90e896f0fa5f12a1f905bafd87bedce07e
| 9
|
py
|
Python
|
traitor/__init__.py
|
Nalisarc/BetrayalClone
|
51086f3d56ec3ab702f479ffd718084b4f2b7212
|
[
"MIT"
] | null | null | null |
traitor/__init__.py
|
Nalisarc/BetrayalClone
|
51086f3d56ec3ab702f479ffd718084b4f2b7212
|
[
"MIT"
] | null | null | null |
traitor/__init__.py
|
Nalisarc/BetrayalClone
|
51086f3d56ec3ab702f479ffd718084b4f2b7212
|
[
"MIT"
] | null | null | null |
## blank
| 4.5
| 8
| 0.555556
|
a4ffedb6e998eae3f47fe49e488fcb9c834d9f21
| 31,072
|
py
|
Python
|
rootfs/usr/lib/python2.7/smtplib.py
|
kappaIO-Dev/kappaIO-sdk-armhf-crosscompile
|
66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2
|
[
"MIT"
] | null | null | null |
rootfs/usr/lib/python2.7/smtplib.py
|
kappaIO-Dev/kappaIO-sdk-armhf-crosscompile
|
66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2
|
[
"MIT"
] | null | null | null |
rootfs/usr/lib/python2.7/smtplib.py
|
kappaIO-Dev/kappaIO-sdk-armhf-crosscompile
|
66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python2.7
'''SMTP/ESMTP client class.
This should follow RFC 821 (SMTP), RFC 1869 (ESMTP), RFC 2554 (SMTP
Authentication) and RFC 2487 (Secure SMTP over TLS).
Notes:
Please remember, when doing ESMTP, that the names of the SMTP service
extensions are NOT the same thing as the option keywords for the RCPT
and MAIL commands!
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> print s.help()
This is Sendmail version 8.8.4
Topics:
HELO EHLO MAIL RCPT DATA
RSET NOOP QUIT HELP VRFY
EXPN VERB ETRN DSN
For more info use "HELP <topic>".
To report bugs in the implementation send email to
sendmail-bugs@sendmail.org.
For local information send email to Postmaster at your site.
End of HELP info
>>> s.putcmd("vrfy","someone@here")
>>> s.getreply()
(250, "Somebody OverHere <somebody@here.my.org>")
>>> s.quit()
'''
# Author: The Dragon De Monsyne <dragondm@integral.org>
# ESMTP support, test code and doc fixes added by
# Eric S. Raymond <esr@thyrsus.com>
# Better RFC 821 compliance (MAIL and RCPT, and CRLF in data)
# by Carey Evans <c.evans@clear.net.nz>, for picky mail servers.
# RFC 2554 (authentication) support by Gerhard Haering <gerhard@bigfoot.de>.
#
# This was modified from the Python 1.5 library HTTP lib.
import socket
import re
import email.utils
import base64
import hmac
from email.base64mime import encode as encode_base64
from sys import stderr
__all__ = ["SMTPException", "SMTPServerDisconnected", "SMTPResponseException",
"SMTPSenderRefused", "SMTPRecipientsRefused", "SMTPDataError",
"SMTPConnectError", "SMTPHeloError", "SMTPAuthenticationError",
"quoteaddr", "quotedata", "SMTP"]
SMTP_PORT = 25
SMTP_SSL_PORT = 465
CRLF = "\r\n"
OLDSTYLE_AUTH = re.compile(r"auth=(.*)", re.I)
# Exception classes used by this module.
class SMTPException(Exception):
"""Base class for all exceptions raised by this module."""
class SMTPServerDisconnected(SMTPException):
"""Not connected to any SMTP server.
This exception is raised when the server unexpectedly disconnects,
or when an attempt is made to use the SMTP instance before
connecting it to a server.
"""
class SMTPResponseException(SMTPException):
"""Base class for all exceptions that include an SMTP error code.
These exceptions are generated in some instances when the SMTP
server returns an error code. The error code is stored in the
`smtp_code' attribute of the error, and the `smtp_error' attribute
is set to the error message.
"""
def __init__(self, code, msg):
self.smtp_code = code
self.smtp_error = msg
self.args = (code, msg)
class SMTPSenderRefused(SMTPResponseException):
"""Sender address refused.
In addition to the attributes set by on all SMTPResponseException
exceptions, this sets `sender' to the string that the SMTP refused.
"""
def __init__(self, code, msg, sender):
self.smtp_code = code
self.smtp_error = msg
self.sender = sender
self.args = (code, msg, sender)
class SMTPRecipientsRefused(SMTPException):
"""All recipient addresses refused.
The errors for each recipient are accessible through the attribute
'recipients', which is a dictionary of exactly the same sort as
SMTP.sendmail() returns.
"""
def __init__(self, recipients):
self.recipients = recipients
self.args = (recipients,)
class SMTPDataError(SMTPResponseException):
"""The SMTP server didn't accept the data."""
class SMTPConnectError(SMTPResponseException):
"""Error during connection establishment."""
class SMTPHeloError(SMTPResponseException):
"""The server refused our HELO reply."""
class SMTPAuthenticationError(SMTPResponseException):
"""Authentication error.
Most probably the server didn't accept the username/password
combination provided.
"""
def quoteaddr(addr):
"""Quote a subset of the email addresses defined by RFC 821.
Should be able to handle anything rfc822.parseaddr can handle.
"""
m = (None, None)
try:
m = email.utils.parseaddr(addr)[1]
except AttributeError:
pass
if m == (None, None): # Indicates parse failure or AttributeError
# something weird here.. punt -ddm
return "<%s>" % addr
elif m is None:
# the sender wants an empty return address
return "<>"
else:
return "<%s>" % m
def _addr_only(addrstring):
displayname, addr = email.utils.parseaddr(addrstring)
if (displayname, addr) == ('', ''):
# parseaddr couldn't parse it, so use it as is.
return addrstring
return addr
def quotedata(data):
"""Quote data for email.
Double leading '.', and change Unix newline '\\n', or Mac '\\r' into
Internet CRLF end-of-line.
"""
return re.sub(r'(?m)^\.', '..',
re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data))
try:
import ssl
except ImportError:
_have_ssl = False
else:
class SSLFakeFile:
"""A fake file like object that really wraps a SSLObject.
It only supports what is needed in smtplib.
"""
def __init__(self, sslobj):
self.sslobj = sslobj
def readline(self):
str = ""
chr = None
while chr != "\n":
chr = self.sslobj.read(1)
if not chr:
break
str += chr
return str
def close(self):
pass
_have_ssl = True
class SMTP:
"""This class manages a connection to an SMTP or ESMTP server.
SMTP Objects:
SMTP objects have the following attributes:
helo_resp
This is the message given by the server in response to the
most recent HELO command.
ehlo_resp
This is the message given by the server in response to the
most recent EHLO command. This is usually multiline.
does_esmtp
This is a True value _after you do an EHLO command_, if the
server supports ESMTP.
esmtp_features
This is a dictionary, which, if the server supports ESMTP,
will _after you do an EHLO command_, contain the names of the
SMTP service extensions this server supports, and their
parameters (if any).
Note, all extension names are mapped to lower case in the
dictionary.
See each method's docstrings for details. In general, there is a
method of the same name to perform each SMTP command. There is also a
method called 'sendmail' that will do an entire mail transaction.
"""
debuglevel = 0
file = None
helo_resp = None
ehlo_msg = "ehlo"
ehlo_resp = None
does_esmtp = 0
default_port = SMTP_PORT
def __init__(self, host='', port=0, local_hostname=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""Initialize a new instance.
If specified, `host' is the name of the remote host to which to
connect. If specified, `port' specifies the port to which to connect.
By default, smtplib.SMTP_PORT is used. An SMTPConnectError is raised
if the specified `host' doesn't respond correctly. If specified,
`local_hostname` is used as the FQDN of the local host. By default,
the local hostname is found using socket.getfqdn().
"""
self.timeout = timeout
self.esmtp_features = {}
if host:
(code, msg) = self.connect(host, port)
if code != 220:
raise SMTPConnectError(code, msg)
if local_hostname is not None:
self.local_hostname = local_hostname
else:
# RFC 2821 says we should use the fqdn in the EHLO/HELO verb, and
# if that can't be calculated, that we should use a domain literal
# instead (essentially an encoded IP address like [A.B.C.D]).
fqdn = socket.getfqdn()
if '.' in fqdn:
self.local_hostname = fqdn
else:
# We can't find an fqdn hostname, so use a domain literal
addr = '127.0.0.1'
try:
addr = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
pass
self.local_hostname = '[%s]' % addr
def set_debuglevel(self, debuglevel):
"""Set the debug output level.
A non-false value results in debug messages for connection and for all
messages sent to and received from the server.
"""
self.debuglevel = debuglevel
def _get_socket(self, port, host, timeout):
# This makes it simpler for SMTP_SSL to use the SMTP connect code
# and just alter the socket connection bit.
if self.debuglevel > 0:
print>>stderr, 'connect:', (host, port)
return socket.create_connection((port, host), timeout)
def connect(self, host='localhost', port=0):
"""Connect to a host on a given port.
If the hostname ends with a colon (`:') followed by a number, and
there is no port specified, that suffix will be stripped off and the
number interpreted as the port number to use.
Note: This method is automatically invoked by __init__, if a host is
specified during instantiation.
"""
if not port and (host.find(':') == host.rfind(':')):
i = host.rfind(':')
if i >= 0:
host, port = host[:i], host[i + 1:]
try:
port = int(port)
except ValueError:
raise socket.error, "nonnumeric port"
if not port:
port = self.default_port
if self.debuglevel > 0:
print>>stderr, 'connect:', (host, port)
self.sock = self._get_socket(host, port, self.timeout)
(code, msg) = self.getreply()
if self.debuglevel > 0:
print>>stderr, "connect:", msg
return (code, msg)
def send(self, str):
"""Send `str' to the server."""
if self.debuglevel > 0:
print>>stderr, 'send:', repr(str)
if hasattr(self, 'sock') and self.sock:
try:
self.sock.sendall(str)
except socket.error:
self.close()
raise SMTPServerDisconnected('Server not connected')
else:
raise SMTPServerDisconnected('please run connect() first')
def putcmd(self, cmd, args=""):
"""Send a command to the server."""
if args == "":
str = '%s%s' % (cmd, CRLF)
else:
str = '%s %s%s' % (cmd, args, CRLF)
self.send(str)
def getreply(self):
"""Get a reply from the server.
Returns a tuple consisting of:
- server response code (e.g. '250', or such, if all goes well)
Note: returns -1 if it can't read response code.
- server response string corresponding to response code (multiline
responses are converted to a single, multiline string).
Raises SMTPServerDisconnected if end-of-file is reached.
"""
resp = []
if self.file is None:
self.file = self.sock.makefile('rb')
while 1:
try:
line = self.file.readline()
except socket.error as e:
self.close()
raise SMTPServerDisconnected("Connection unexpectedly closed: "
+ str(e))
if line == '':
self.close()
raise SMTPServerDisconnected("Connection unexpectedly closed")
if self.debuglevel > 0:
print>>stderr, 'reply:', repr(line)
resp.append(line[4:].strip())
code = line[:3]
# Check that the error code is syntactically correct.
# Don't attempt to read a continuation line if it is broken.
try:
errcode = int(code)
except ValueError:
errcode = -1
break
# Check if multiline response.
if line[3:4] != "-":
break
errmsg = "\n".join(resp)
if self.debuglevel > 0:
print>>stderr, 'reply: retcode (%s); Msg: %s' % (errcode, errmsg)
return errcode, errmsg
def docmd(self, cmd, args=""):
"""Send a command, and return its response code."""
self.putcmd(cmd, args)
return self.getreply()
# std smtp commands
def helo(self, name=''):
"""SMTP 'helo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.putcmd("helo", name or self.local_hostname)
(code, msg) = self.getreply()
self.helo_resp = msg
return (code, msg)
def ehlo(self, name=''):
""" SMTP 'ehlo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.esmtp_features = {}
self.putcmd(self.ehlo_msg, name or self.local_hostname)
(code, msg) = self.getreply()
# According to RFC1869 some (badly written)
# MTA's will disconnect on an ehlo. Toss an exception if
# that happens -ddm
if code == -1 and len(msg) == 0:
self.close()
raise SMTPServerDisconnected("Server not connected")
self.ehlo_resp = msg
if code != 250:
return (code, msg)
self.does_esmtp = 1
#parse the ehlo response -ddm
resp = self.ehlo_resp.split('\n')
del resp[0]
for each in resp:
# To be able to communicate with as many SMTP servers as possible,
# we have to take the old-style auth advertisement into account,
# because:
# 1) Else our SMTP feature parser gets confused.
# 2) There are some servers that only advertise the auth methods we
# support using the old style.
auth_match = OLDSTYLE_AUTH.match(each)
if auth_match:
# This doesn't remove duplicates, but that's no problem
self.esmtp_features["auth"] = self.esmtp_features.get("auth", "") \
+ " " + auth_match.groups(0)[0]
continue
# RFC 1869 requires a space between ehlo keyword and parameters.
# It's actually stricter, in that only spaces are allowed between
# parameters, but were not going to check for that here. Note
# that the space isn't present if there are no parameters.
m = re.match(r'(?P<feature>[A-Za-z0-9][A-Za-z0-9\-]*) ?', each)
if m:
feature = m.group("feature").lower()
params = m.string[m.end("feature"):].strip()
if feature == "auth":
self.esmtp_features[feature] = self.esmtp_features.get(feature, "") \
+ " " + params
else:
self.esmtp_features[feature] = params
return (code, msg)
def has_extn(self, opt):
"""Does the server support a given SMTP service extension?"""
return opt.lower() in self.esmtp_features
def help(self, args=''):
"""SMTP 'help' command.
Returns help text from server."""
self.putcmd("help", args)
return self.getreply()[1]
def rset(self):
"""SMTP 'rset' command -- resets session."""
return self.docmd("rset")
def noop(self):
"""SMTP 'noop' command -- doesn't do anything :>"""
return self.docmd("noop")
def mail(self, sender, options=[]):
"""SMTP 'mail' command -- begins mail xfer session."""
optionlist = ''
if options and self.does_esmtp:
optionlist = ' ' + ' '.join(options)
self.putcmd("mail", "FROM:%s%s" % (quoteaddr(sender), optionlist))
return self.getreply()
def rcpt(self, recip, options=[]):
"""SMTP 'rcpt' command -- indicates 1 recipient for this mail."""
optionlist = ''
if options and self.does_esmtp:
optionlist = ' ' + ' '.join(options)
self.putcmd("rcpt", "TO:%s%s" % (quoteaddr(recip), optionlist))
return self.getreply()
def data(self, msg):
"""SMTP 'DATA' command -- sends message data to server.
Automatically quotes lines beginning with a period per rfc821.
Raises SMTPDataError if there is an unexpected reply to the
DATA command; the return value from this method is the final
response code received when the all data is sent.
"""
self.putcmd("data")
(code, repl) = self.getreply()
if self.debuglevel > 0:
print>>stderr, "data:", (code, repl)
if code != 354:
raise SMTPDataError(code, repl)
else:
q = quotedata(msg)
if q[-2:] != CRLF:
q = q + CRLF
q = q + "." + CRLF
self.send(q)
(code, msg) = self.getreply()
if self.debuglevel > 0:
print>>stderr, "data:", (code, msg)
return (code, msg)
def verify(self, address):
"""SMTP 'verify' command -- checks for address validity."""
self.putcmd("vrfy", _addr_only(address))
return self.getreply()
# a.k.a.
vrfy = verify
def expn(self, address):
"""SMTP 'expn' command -- expands a mailing list."""
self.putcmd("expn", _addr_only(address))
return self.getreply()
# some useful methods
def ehlo_or_helo_if_needed(self):
"""Call self.ehlo() and/or self.helo() if needed.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
"""
if self.helo_resp is None and self.ehlo_resp is None:
if not (200 <= self.ehlo()[0] <= 299):
(code, resp) = self.helo()
if not (200 <= code <= 299):
raise SMTPHeloError(code, resp)
def login(self, user, password):
"""Log in on an SMTP server that requires authentication.
The arguments are:
- user: The user name to authenticate with.
- password: The password for the authentication.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
This method will return normally if the authentication was successful.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPAuthenticationError The server didn't accept the username/
password combination.
SMTPException No suitable authentication method was
found.
"""
def encode_cram_md5(challenge, user, password):
challenge = base64.decodestring(challenge)
response = user + " " + hmac.HMAC(password, challenge).hexdigest()
return encode_base64(response, eol="")
def encode_plain(user, password):
return encode_base64("\0%s\0%s" % (user, password), eol="")
AUTH_PLAIN = "PLAIN"
AUTH_CRAM_MD5 = "CRAM-MD5"
AUTH_LOGIN = "LOGIN"
self.ehlo_or_helo_if_needed()
if not self.has_extn("auth"):
raise SMTPException("SMTP AUTH extension not supported by server.")
# Authentication methods the server supports:
authlist = self.esmtp_features["auth"].split()
# List of authentication methods we support: from preferred to
# less preferred methods. Except for the purpose of testing the weaker
# ones, we prefer stronger methods like CRAM-MD5:
preferred_auths = [AUTH_CRAM_MD5, AUTH_PLAIN, AUTH_LOGIN]
# Determine the authentication method we'll use
authmethod = None
for method in preferred_auths:
if method in authlist:
authmethod = method
break
if authmethod == AUTH_CRAM_MD5:
(code, resp) = self.docmd("AUTH", AUTH_CRAM_MD5)
if code == 503:
# 503 == 'Error: already authenticated'
return (code, resp)
(code, resp) = self.docmd(encode_cram_md5(resp, user, password))
elif authmethod == AUTH_PLAIN:
(code, resp) = self.docmd("AUTH",
AUTH_PLAIN + " " + encode_plain(user, password))
elif authmethod == AUTH_LOGIN:
(code, resp) = self.docmd("AUTH",
"%s %s" % (AUTH_LOGIN, encode_base64(user, eol="")))
if code != 334:
raise SMTPAuthenticationError(code, resp)
(code, resp) = self.docmd(encode_base64(password, eol=""))
elif authmethod is None:
raise SMTPException("No suitable authentication method found.")
if code not in (235, 503):
# 235 == 'Authentication successful'
# 503 == 'Error: already authenticated'
raise SMTPAuthenticationError(code, resp)
return (code, resp)
def starttls(self, keyfile=None, certfile=None):
"""Puts the connection to the SMTP server into TLS mode.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
If the server supports TLS, this will encrypt the rest of the SMTP
session. If you provide the keyfile and certfile parameters,
the identity of the SMTP server and client can be checked. This,
however, depends on whether the socket module really checks the
certificates.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
"""
self.ehlo_or_helo_if_needed()
if not self.has_extn("starttls"):
raise SMTPException("STARTTLS extension not supported by server.")
(resp, reply) = self.docmd("STARTTLS")
if resp == 220:
if not _have_ssl:
raise RuntimeError("No SSL support included in this Python")
self.sock = ssl.wrap_socket(self.sock, keyfile, certfile)
self.file = SSLFakeFile(self.sock)
# RFC 3207:
# The client MUST discard any knowledge obtained from
# the server, such as the list of SMTP service extensions,
# which was not obtained from the TLS negotiation itself.
self.helo_resp = None
self.ehlo_resp = None
self.esmtp_features = {}
self.does_esmtp = 0
return (resp, reply)
def sendmail(self, from_addr, to_addrs, msg, mail_options=[],
rcpt_options=[]):
"""This command performs an entire mail transaction.
The arguments are:
- from_addr : The address sending this mail.
- to_addrs : A list of addresses to send this mail to. A bare
string will be treated as a list with 1 address.
- msg : The message to send.
- mail_options : List of ESMTP options (such as 8bitmime) for the
mail command.
- rcpt_options : List of ESMTP options (such as DSN commands) for
all the rcpt commands.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first. If the server does ESMTP, message size
and each of the specified options will be passed to it. If EHLO
fails, HELO will be tried and ESMTP options suppressed.
This method will return normally if the mail is accepted for at least
one recipient. It returns a dictionary, with one entry for each
recipient that was refused. Each entry contains a tuple of the SMTP
error code and the accompanying error message sent by the server.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPRecipientsRefused The server rejected ALL recipients
(no mail was sent).
SMTPSenderRefused The server didn't accept the from_addr.
SMTPDataError The server replied with an unexpected
error code (other than a refusal of
a recipient).
Note: the connection will be open even after an exception is raised.
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> tolist=["one@one.org","two@two.org","three@three.org","four@four.org"]
>>> msg = '''\\
... From: Me@my.org
... Subject: testin'...
...
... This is a test '''
>>> s.sendmail("me@my.org",tolist,msg)
{ "three@three.org" : ( 550 ,"User unknown" ) }
>>> s.quit()
In the above example, the message was accepted for delivery to three
of the four addresses, and one was rejected, with the error code
550. If all addresses are accepted, then the method will return an
empty dictionary.
"""
self.ehlo_or_helo_if_needed()
esmtp_opts = []
if self.does_esmtp:
# Hmmm? what's this? -ddm
# self.esmtp_features['7bit']=""
if self.has_extn('size'):
esmtp_opts.append("size=%d" % len(msg))
for option in mail_options:
esmtp_opts.append(option)
(code, resp) = self.mail(from_addr, esmtp_opts)
if code != 250:
self.rset()
raise SMTPSenderRefused(code, resp, from_addr)
senderrs = {}
if isinstance(to_addrs, basestring):
to_addrs = [to_addrs]
for each in to_addrs:
(code, resp) = self.rcpt(each, rcpt_options)
if (code != 250) and (code != 251):
senderrs[each] = (code, resp)
if len(senderrs) == len(to_addrs):
# the server refused all our recipients
self.rset()
raise SMTPRecipientsRefused(senderrs)
(code, resp) = self.data(msg)
if code != 250:
self.rset()
raise SMTPDataError(code, resp)
#if we got here then somebody got our mail
return senderrs
def close(self):
"""Close the connection to the SMTP server."""
if self.file:
self.file.close()
self.file = None
if self.sock:
self.sock.close()
self.sock = None
def quit(self):
"""Terminate the SMTP session."""
res = self.docmd("quit")
self.close()
return res
if _have_ssl:
class SMTP_SSL(SMTP):
""" This is a subclass derived from SMTP that connects over an SSL encrypted
socket (to use this class you need a socket module that was compiled with SSL
support). If host is not specified, '' (the local host) is used. If port is
omitted, the standard SMTP-over-SSL port (465) is used. keyfile and certfile
are also optional - they can contain a PEM formatted private key and
certificate chain file for the SSL connection.
"""
default_port = SMTP_SSL_PORT
def __init__(self, host='', port=0, local_hostname=None,
keyfile=None, certfile=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.keyfile = keyfile
self.certfile = certfile
SMTP.__init__(self, host, port, local_hostname, timeout)
def _get_socket(self, host, port, timeout):
if self.debuglevel > 0:
print>>stderr, 'connect:', (host, port)
new_socket = socket.create_connection((host, port), timeout)
new_socket = ssl.wrap_socket(new_socket, self.keyfile, self.certfile)
self.file = SSLFakeFile(new_socket)
return new_socket
__all__.append("SMTP_SSL")
#
# LMTP extension
#
LMTP_PORT = 2003
class LMTP(SMTP):
"""LMTP - Local Mail Transfer Protocol
The LMTP protocol, which is very similar to ESMTP, is heavily based
on the standard SMTP client. It's common to use Unix sockets for LMTP,
so our connect() method must support that as well as a regular
host:port server. To specify a Unix socket, you must use an absolute
path as the host, starting with a '/'.
Authentication is supported, using the regular SMTP mechanism. When
using a Unix socket, LMTP generally don't support or require any
authentication, but your mileage might vary."""
ehlo_msg = "lhlo"
def __init__(self, host='', port=LMTP_PORT, local_hostname=None):
"""Initialize a new instance."""
SMTP.__init__(self, host, port, local_hostname)
def connect(self, host='localhost', port=0):
"""Connect to the LMTP daemon, on either a Unix or a TCP socket."""
if host[0] != '/':
return SMTP.connect(self, host, port)
# Handle Unix-domain sockets.
try:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(host)
except socket.error, msg:
if self.debuglevel > 0:
print>>stderr, 'connect fail:', host
if self.sock:
self.sock.close()
self.sock = None
raise socket.error, msg
(code, msg) = self.getreply()
if self.debuglevel > 0:
print>>stderr, "connect:", msg
return (code, msg)
# Test the sendmail method, which tests most of the others.
# Note: This always sends to localhost.
if __name__ == '__main__':
import sys
def prompt(prompt):
sys.stdout.write(prompt + ": ")
return sys.stdin.readline().strip()
fromaddr = prompt("From")
toaddrs = prompt("To").split(',')
print "Enter message, end with ^D:"
msg = ''
while 1:
line = sys.stdin.readline()
if not line:
break
msg = msg + line
print "Message length is %d" % len(msg)
server = SMTP('localhost')
server.set_debuglevel(1)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
| 36.214452
| 89
| 0.58535
|
721386113a03edf930ab99b59a5b51bcb05373f2
| 598
|
py
|
Python
|
DetectorDescription/Core/python/test/DDAngularXML_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
DetectorDescription/Core/python/test/DDAngularXML_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
DetectorDescription/Core/python/test/DDAngularXML_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
XMLIdealGeometryESSource = cms.ESSource("XMLIdealGeometryESSource",
geomXMLFiles = cms.vstring('DetectorDescription/Core/test/data/materials.xml',
'DetectorDescription/Core/test/data/world.xml',
'DetectorDescription/Core/test/data/DDAngular.xml'),
rootNodeName = cms.string('world:MotherOfAllBoxes')
)
| 66.444444
| 119
| 0.473244
|
031633c317f010c14bb1e891836ea1bc86ec0f08
| 2,839
|
py
|
Python
|
language-modeling/fast_transformers/recurrent/attention/cross_attention/.ipynb_checkpoints/linear_attention-checkpoint.py
|
minhtannguyen/transformer-mgk
|
304ebf3781b1eb4aeef93f2757319775d2fcdbc4
|
[
"CC0-1.0"
] | 5
|
2021-11-06T16:10:31.000Z
|
2021-12-25T19:47:42.000Z
|
language-modeling/fast_transformers/recurrent/attention/cross_attention/.ipynb_checkpoints/linear_attention-checkpoint.py
|
minhtannguyen/transformer-mgk
|
304ebf3781b1eb4aeef93f2757319775d2fcdbc4
|
[
"CC0-1.0"
] | null | null | null |
language-modeling/fast_transformers/recurrent/attention/cross_attention/.ipynb_checkpoints/linear_attention-checkpoint.py
|
minhtannguyen/transformer-mgk
|
304ebf3781b1eb4aeef93f2757319775d2fcdbc4
|
[
"CC0-1.0"
] | 2
|
2021-11-30T03:36:54.000Z
|
2021-12-25T19:49:58.000Z
|
"""Implement unmasked linear attention as a recurrent cross attention module to
speed up autoregressive decoding."""
import torch
from torch.nn import Module
from ....attention_registry import RecurrentCrossAttentionRegistry, Optional, Int, \
Callable, EventDispatcherInstance
from ....events import EventDispatcher
from ....feature_maps import elu_feature_map
class RecurrentCrossLinearAttention(Module):
"""Implement autoregressive linear cross attention as a recurrent
module.
See fast_transformers.attention.linear_attention.LinearAttention .
Arguments
---------
feature_map: callable, a callable that applies the feature map to the
last dimension of a tensor (default: elu(x)+1)
eps: float, a small number to ensure the numerical stability of the
denominator (default: 1e-6)
event_dispatcher: str or EventDispatcher instance to be used by this
module for dispatching events (default: the default
global dispatcher)
"""
def __init__(self, query_dimensions, feature_map=None, eps=1e-6,
event_dispatcher=""):
super(RecurrentCrossLinearAttention, self).__init__()
self.feature_map = (
feature_map(query_dimensions) if feature_map else
elu_feature_map(query_dimensions)
)
self.eps = eps
self.event_dispatcher = EventDispatcher.get(event_dispatcher)
def forward(self, query, keys, values, key_lengths, state=None):
# If this is a new sequence re initialize the feature map
if state is None:
self.feature_map.new_feature_map(query.device)
# Compute the feature representation of the query
Q = self.feature_map.forward_queries(query)
# If the state is not given compute the key-value matrix and the
# normalizers, namely compute whatever is needed in order to attend to
# keys and values with a given query.
if state is None:
K = self.feature_map.forward_keys(keys)
K = K * key_lengths.float_matrix[:, :, None, None]
S = torch.einsum("nshd,nshm->nhmd", K, values)
Z = K.sum(dim=1)
else:
S, Z = state
# Given S and Z now we can efficiently compute the new value
QZ = 1/(torch.einsum("nhd,nhd->nh", Q, Z)+self.eps)
V = torch.einsum("nhd,nhmd,nh->nhm", Q, S, QZ)
return V.contiguous(), [S, Z]
# Register the attention implementation so that it becomes available in our
# builders
RecurrentCrossAttentionRegistry.register(
"linear", RecurrentCrossLinearAttention,
[
("query_dimensions", Int),
("feature_map", Optional(Callable)),
("event_dispatcher", Optional(EventDispatcherInstance, ""))
]
)
| 37.853333
| 84
| 0.659035
|
39bb4dbcc8146ace37697ba580b46c588e53e49f
| 5,857
|
py
|
Python
|
utils/lmdb.py
|
raghuch/SABER
|
fb0f26152b009f923aaf572cef80940f2f256330
|
[
"MIT"
] | 5
|
2019-11-22T12:42:38.000Z
|
2020-07-14T14:51:47.000Z
|
utils/lmdb.py
|
raghuch/SABER
|
fb0f26152b009f923aaf572cef80940f2f256330
|
[
"MIT"
] | null | null | null |
utils/lmdb.py
|
raghuch/SABER
|
fb0f26152b009f923aaf572cef80940f2f256330
|
[
"MIT"
] | 1
|
2021-02-02T08:43:00.000Z
|
2021-02-02T08:43:00.000Z
|
import sys
import os
import lmdb # install lmdb by "pip install lmdb"
import cv2
import numpy as np
import msgpack
from bisect import bisect_right
import msgpack_numpy as m
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.logger import logger
from joblib import Parallel, delayed
from utils.config import num_cores, train_batch_size
m.patch()
def writeCache(env, cache):
with env.begin(write=True) as txn:
for k, value in cache.items():
key = k.encode('ascii')
txn.put(key, value)
def createDataset_parallel(outputPath, dataset, image_transform=None, label_transform=None, exclude_func=None, n_jobs=num_cores):
nSamples = len(dataset)
env = lmdb.open(outputPath, map_size=1099511627776)
logger.info(f'Begining to create dataset at {outputPath}')
cache = {}
done = 0
ignored = 0
with Parallel(n_jobs=n_jobs, require='sharedmem') as parallel:
while done < nSamples - ignored:
num_left_or_batch_size = min(100, nSamples - done)
parallel(delayed(fillCache)(done + i, dataset, cache, image_transform=image_transform, label_transform=label_transform, exclude_func=exclude_func) for i in range(num_left_or_batch_size))
writeCache(env, cache)
done_batch_size = len(cache.items())
done += done_batch_size
cache = {}
ignored += (num_left_or_batch_size - done_batch_size)
logger.info(f'Written {done:d} / {nSamples - ignored:d}')
nSamples = done
cache['num-samples'] = str(nSamples).encode('ascii')
writeCache(env, cache)
logger.info(f'Created dataset with {nSamples:d} samples')
def fillCache(index, dataset, cache, image_transform=None, label_transform=None, exclude_func=None):
img, label = dataset[index]
dataKey = f'data-{index:09d}'
if exclude_func is not None:
if exclude_func(img, label):
return
if image_transform is not None:
img = image_transform(img)
if label_transform is not None:
label_transformed = label_transform(label)
cache[dataKey] = msgpack.packb({'img': img, 'label': label_transformed}, default=m.encode, use_bin_type=True)
def createDataset_single(outputPath, dataset, image_transform=None, label_transform=None, exclude_func=None):
nSamples = len(dataset)
env = lmdb.open(outputPath, map_size=1099511627776)
logger.info(f'Begining to create dataset at {outputPath}')
cache = {}
cnt = 0
for i, (img, label) in enumerate(dataset):
dataKey = f'data-{cnt:09d}'
if exclude_func is not None:
if exclude_func(img, label):
continue
if image_transform is not None:
img = image_transform(img)
if label_transform is not None:
label_transformed = label_transform(label)
cache[dataKey] = msgpack.packb({'img': img, 'label': label_transformed}, default=m.encode, use_bin_type=True)
if cnt % 100 == 0 and cnt != 0:
writeCache(env, cache)
cache = {}
logger.info(f'Written {cnt:d} / {nSamples:d}')
cnt += 1
nSamples = cnt
cache['num-samples'] = str(nSamples).encode('ascii')
writeCache(env, cache)
logger.info(f'Created dataset with {nSamples:d} samples')
class lmdbMultiDataset(Dataset):
def __init__(self, roots=[], transform=None, target_transform=None):
super().__init__()
self.nSamples = 0
self.cutoffs = [0]
for i, root in enumerate(roots):
setattr(self, f'env{i}', lmdb.open(
root,
max_readers=100,
readonly=True,
lock=False,
readahead=False,
meminit=False))
with getattr(self, f'env{i}').begin(write=False) as txn:
nSamples_dataset = int(txn.get('num-samples'.encode('ascii')))
self.nSamples += nSamples_dataset
self.cutoffs.append(self.nSamples)
self.transform = transform
self.target_transform = target_transform
self.epoch = 0
def __len__(self):
return self.nSamples
def set_epochs(self, epoch):
self.epoch = epoch
def __getitem__(self, index):
assert index <= len(self), 'index range error'
bisect_index = bisect_right(self.cutoffs, index) - 1
index -= self.cutoffs[bisect_index]
env = getattr(self, f'env{bisect_index}')
with env.begin(write=False) as txn:
data_key = f'data-{index:09d}'.encode('ascii')
data_enc = txn.get(data_key)
if not data_enc:
return self.__getitem__(np.random.choice(range(len(self))))
data = msgpack.unpackb(data_enc, object_hook=m.decode, raw=False)
img = data['img']
label = data['label']
if self.transform is not None:
img = self.transform(img, self.epoch)
if self.target_transform is not None:
label = self.target_transform(label)
return (img, label)
if __name__ == "__main__":
from utils.config import lmdb_root_path
from datasets.librispeech import sequence_to_string
lmdb_commonvoice_root_path = "lmdb-databases-common_voice"
lmdb_airtel_root_path = "lmdb-databases-airtel"
trainCleanPath = os.path.join(lmdb_root_path, 'train-labelled')
trainOtherPath = os.path.join(lmdb_root_path, 'train-unlabelled')
trainCommonVoicePath = os.path.join(lmdb_commonvoice_root_path, 'train-labelled-en')
testAirtelPath = os.path.join(lmdb_airtel_root_path, 'test-labelled-en')
roots = [trainCleanPath, trainOtherPath, trainCommonVoicePath]
dataset = lmdbMultiDataset(roots=[testAirtelPath])
print(sequence_to_string(dataset[np.random.choice(len(dataset))][1].tolist()))
| 40.393103
| 198
| 0.651016
|
997013f433286a434be0bc62644d89db7ecaf942
| 346
|
py
|
Python
|
PYTHON/Py3_Mundo1_Fundamental/desafios/des001.py
|
Marciobroficial/CURSO-EM-VIDEO
|
37b10c26336a9744236603282af77661fdf8c61a
|
[
"MIT"
] | 1
|
2021-10-09T18:11:20.000Z
|
2021-10-09T18:11:20.000Z
|
PYTHON/Py3_Mundo1_Fundamental/desafios/des001.py
|
Coppini21/CURSO-EM-VIDEO
|
37b10c26336a9744236603282af77661fdf8c61a
|
[
"MIT"
] | 1
|
2021-09-15T04:18:34.000Z
|
2022-03-02T23:16:26.000Z
|
PYTHON/Py3_Mundo1_Fundamental/desafios/des001.py
|
Coppini21/CURSO-EM-VIDEO
|
37b10c26336a9744236603282af77661fdf8c61a
|
[
"MIT"
] | 3
|
2021-12-15T17:19:51.000Z
|
2022-03-29T02:19:00.000Z
|
# Desafio 01
# Faça um programa Leia um nome de uma pessoa e que mostre uma mensagem de boas-vidas de acordo com o valor digitado.
print()
print('=-='*15)
nome = input ('Qual é sue nome ? ')
print ('Ola',nome,'Prazer em te conhecer!')
nome = input('Digite seu nome: ')
print('É um prazer te conhecer, {}!'.format(nome))
print('=-='*15)
print()
| 26.615385
| 117
| 0.67052
|
75660e4d0dc764a32b1346f742fdcf634d787ae9
| 18,052
|
py
|
Python
|
checkov/terraform/runner.py
|
shubh-cs29/checkov
|
36b93391ef369ad3bcf936e2b1d06109bf9f38c9
|
[
"Apache-2.0"
] | 5
|
2021-07-29T18:08:40.000Z
|
2022-03-21T04:39:32.000Z
|
checkov/terraform/runner.py
|
shubh-cs29/checkov
|
36b93391ef369ad3bcf936e2b1d06109bf9f38c9
|
[
"Apache-2.0"
] | null | null | null |
checkov/terraform/runner.py
|
shubh-cs29/checkov
|
36b93391ef369ad3bcf936e2b1d06109bf9f38c9
|
[
"Apache-2.0"
] | 2
|
2021-08-23T13:25:36.000Z
|
2021-11-05T21:44:52.000Z
|
import copy
import dataclasses
import logging
import os
from typing import Dict, Optional, Tuple, List
import dpath.util
from checkov.common.graph.db_connectors.networkx.networkx_db_connector import NetworkxConnector
from checkov.common.models.enums import CheckResult
from checkov.common.output.graph_record import GraphRecord
from checkov.common.output.record import Record
from checkov.common.output.report import Report
from checkov.common.util import dict_utils
from checkov.common.runners.base_runner import BaseRunner
from checkov.common.variables.context import EvaluationContext
from checkov.runner_filter import RunnerFilter
from checkov.terraform.checks.data.registry import data_registry
from checkov.terraform.checks.module.registry import module_registry
from checkov.terraform.checks.provider.registry import provider_registry
from checkov.terraform.checks.resource.registry import resource_registry
from checkov.terraform.checks_infra.checks_parser import NXGraphCheckParser
from checkov.terraform.checks_infra.registry import Registry
from checkov.terraform.context_parsers.registry import parser_registry
from checkov.terraform.evaluation.base_variable_evaluation import BaseVariableEvaluation
from checkov.terraform.graph_builder.graph_components.attribute_names import CustomAttributes
from checkov.terraform.graph_builder.graph_to_tf_definitions import convert_graph_vertices_to_tf_definitions
from checkov.terraform.graph_builder.local_graph import LocalGraph
from checkov.terraform.graph_manager import GraphManager
# Allow the evaluation of empty variables
from checkov.terraform.parser import Parser
from checkov.terraform.tag_providers import get_resource_tags
dpath.options.ALLOW_EMPTY_STRING_KEYS = True
CHECK_BLOCK_TYPES = frozenset(['resource', 'data', 'provider', 'module'])
graph_registry = Registry(parser=NXGraphCheckParser())
class Runner(BaseRunner):
check_type = "terraform"
def __init__(self, parser=Parser(), db_connector=NetworkxConnector(), external_registries=None,
source="Terraform", graph_class=LocalGraph, graph_manager=None):
self.external_registries = [] if external_registries is None else external_registries
self.graph_class = graph_class
self.parser = parser
self.tf_definitions = None
self.definitions_context = None
self.breadcrumbs = None
self.definitions_context = {}
self.evaluations_context: Dict[str, Dict[str, EvaluationContext]] = {}
self.graph_manager = graph_manager if graph_manager is not None else GraphManager(source=source,
db_connector=db_connector)
block_type_registries = {
'resource': resource_registry,
'data': data_registry,
'provider': provider_registry,
'module': module_registry,
}
def set_external_data(self, tf_definitions: dict, definitions_context: dict, breadcrumbs: dict):
self.tf_definitions = tf_definitions
self.definitions_context = definitions_context
self.breadcrumbs = breadcrumbs
def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(), collect_skip_comments=True):
report = Report(self.check_type)
parsing_errors = {}
self.load_external_checks(external_checks_dir)
if self.definitions_context is None or self.tf_definitions is None or self.breadcrumbs is None:
self.tf_definitions = {}
logging.info("Scanning root folder and producing fresh tf_definitions and context")
if root_folder:
root_folder = os.path.abspath(root_folder)
local_graph, tf_definitions = \
self.graph_manager.build_graph_from_source_directory(root_folder,
local_graph_class=self.graph_class,
download_external_modules=runner_filter.download_external_modules,
parsing_errors=parsing_errors, excluded_paths=runner_filter.excluded_paths)
elif files:
files = [os.path.abspath(file) for file in files]
root_folder = os.path.split(os.path.commonprefix(files))[0]
self.parser.evaluate_variables = False
for file in files:
if file.endswith(".tf"):
file_parsing_errors = {}
parse_result = self.parser.parse_file(file=file, parsing_errors=file_parsing_errors)
if parse_result is not None:
self.tf_definitions[file] = parse_result
if file_parsing_errors:
parsing_errors.update(file_parsing_errors)
continue
local_graph = self.graph_manager.build_graph_from_tf_definitions(self.tf_definitions)
else:
raise Exception("Root directory was not specified, files were not specified")
self.graph_manager.save_graph(local_graph)
self.tf_definitions, self.breadcrumbs = convert_graph_vertices_to_tf_definitions(local_graph.vertices, root_folder)
else:
logging.info(f"Scanning root folder using existing tf_definitions")
self.check_tf_definition(report, root_folder, runner_filter, collect_skip_comments)
report.add_parsing_errors(parsing_errors.keys())
graph_report = self.get_graph_checks_report(root_folder, runner_filter)
merge_reports(report, graph_report)
return report
def load_external_checks(self, external_checks_dir: List[str]):
if external_checks_dir:
for directory in external_checks_dir:
resource_registry.load_external_checks(directory)
graph_registry.load_external_checks(directory)
def get_graph_checks_report(self, root_folder, runner_filter: RunnerFilter):
report = Report(self.check_type)
checks_results = {}
for r in self.external_registries + [graph_registry]:
r.load_checks()
registry_results = r.run_checks(self.graph_manager.get_reader_traversal(), runner_filter)
checks_results = {**checks_results, **registry_results}
for check, check_results in checks_results.items():
for check_result in check_results:
entity = check_result['entity']
entity_context, entity_evaluations = self.get_entity_context_and_evaluations(entity)
if entity_context:
full_file_path = entity[CustomAttributes.FILE_PATH]
copy_of_check_result = copy.deepcopy(check_result)
for skipped_check in entity_context.get('skipped_checks', []):
if skipped_check['id'] == check.id:
copy_of_check_result['result'] = CheckResult.SKIPPED
copy_of_check_result['suppress_comment'] = skipped_check['suppress_comment']
break
copy_of_check_result['entity'] = entity.get(CustomAttributes.CONFIG)
record = Record(check_id=check.id,
check_name=check.name,
check_result=copy_of_check_result,
code_block=entity_context.get('code_lines'),
file_path=f"/{os.path.relpath(full_file_path, root_folder)}",
file_line_range=[entity_context.get('start_line'),
entity_context.get('end_line')],
resource=".".join(entity_context['definition_path']),
evaluations=entity_evaluations,
check_class=check.__class__.__module__,
file_abs_path=os.path.abspath(full_file_path))
breadcrumb = self.breadcrumbs.get(record.file_path, {}).get(record.resource)
if breadcrumb:
record = GraphRecord(record, breadcrumb)
report.add_record(record=record)
return report
def get_entity_context_and_evaluations(self, entity):
entity_evaluations = None
block_type = entity[CustomAttributes.BLOCK_TYPE]
full_file_path = entity[CustomAttributes.FILE_PATH]
definition_path = entity[CustomAttributes.BLOCK_NAME].split('.')
entity_context_path = [block_type] + definition_path
entity_context = self.definitions_context.get(full_file_path, {})
try:
if not entity_context:
dc_keys = self.definitions_context.keys()
dc_key = next(x for x in dc_keys if x.startswith(full_file_path))
entity_context = self.definitions_context.get(dc_key, {})
for k in entity_context_path:
if k in entity_context:
entity_context = entity_context[k]
else:
logging.warning(f'Failed to find context for {".".join(entity_context_path)}')
return None, None
entity_context['definition_path'] = definition_path
except StopIteration:
logging.debug(f"Did not find context for key {full_file_path}")
return entity_context, entity_evaluations
def check_tf_definition(self, report, root_folder, runner_filter, collect_skip_comments=True):
parser_registry.reset_definitions_context()
if not self.definitions_context:
definitions_context = {}
for definition in self.tf_definitions.items():
definitions_context = parser_registry.enrich_definitions_context(definition, collect_skip_comments)
self.definitions_context = definitions_context
logging.debug('Created definitions context')
for full_file_path, definition in self.tf_definitions.items():
abs_scanned_file, abs_referrer = self._strip_module_referrer(full_file_path)
scanned_file = f"/{os.path.relpath(abs_scanned_file, root_folder)}"
logging.debug(f"Scanning file: {scanned_file}")
self.run_all_blocks(definition, self.definitions_context, full_file_path, root_folder, report,
scanned_file, runner_filter, abs_referrer)
def run_all_blocks(self, definition, definitions_context, full_file_path, root_folder, report,
scanned_file, runner_filter, module_referrer: Optional[str]):
if not definition:
logging.debug("Empty definition, skipping run (root_folder=%s)", root_folder)
return
block_types = set(definition.keys())
for block_type in block_types & CHECK_BLOCK_TYPES:
self.run_block(definition[block_type], definitions_context,
full_file_path, root_folder, report,
scanned_file, block_type, runner_filter, None, module_referrer)
def run_block(self, entities,
definition_context,
full_file_path, root_folder, report, scanned_file,
block_type, runner_filter=None, entity_context_path_header=None,
module_referrer: Optional[str] = None):
registry = self.block_type_registries[block_type]
if not registry:
return
for entity in entities:
entity_evaluations = None
context_parser = parser_registry.context_parsers[block_type]
definition_path = context_parser.get_entity_context_path(entity)
entity_id = ".".join(definition_path) # example: aws_s3_bucket.my_bucket
caller_file_path = None
caller_file_line_range = None
if module_referrer is not None:
referrer_id = self._find_id_for_referrer(full_file_path,
self.tf_definitions)
if referrer_id:
entity_id = f"{referrer_id}.{entity_id}" # ex: module.my_module.aws_s3_bucket.my_bucket
abs_caller_file = module_referrer[:module_referrer.rindex("#")]
caller_file_path = f"/{os.path.relpath(abs_caller_file, root_folder)}"
try:
caller_context = dpath.get(definition_context[abs_caller_file],
# HACK ALERT: module data is currently double-nested in
# definition context. If fixed, remove the
# addition of "module." at the beginning.
"module." + referrer_id,
separator=".")
except KeyError:
logging.debug("Unable to find caller context for: %s", abs_caller_file)
caller_context = None
if caller_context:
caller_file_line_range = [caller_context.get('start_line'), caller_context.get('end_line')]
else:
logging.debug(f"Unable to find referrer ID for full path: %s", full_file_path)
if entity_context_path_header is None:
entity_context_path = [block_type] + definition_path
else:
entity_context_path = entity_context_path_header + block_type + definition_path
# Entity can exist only once per dir, for file as well
try:
entity_context = dict_utils.getInnerDict(definition_context[full_file_path], entity_context_path)
entity_lines_range = [entity_context.get('start_line'), entity_context.get('end_line')]
entity_code_lines = entity_context.get('code_lines')
skipped_checks = entity_context.get('skipped_checks')
except KeyError:
# TODO: Context info isn't working for modules
entity_lines_range = None
entity_code_lines = None
skipped_checks = None
if full_file_path in self.evaluations_context:
variables_evaluations = {}
for var_name, context_info in self.evaluations_context.get(full_file_path, {}).items():
variables_evaluations[var_name] = dataclasses.asdict(context_info)
entity_evaluations = BaseVariableEvaluation.reduce_entity_evaluations(variables_evaluations,
entity_context_path)
results = registry.scan(scanned_file, entity, skipped_checks, runner_filter)
absolut_scanned_file_path, _ = self._strip_module_referrer(file_path=full_file_path)
# This duplicates a call at the start of scan, but adding this here seems better than kludging with some tuple return type
(entity_type, entity_name, entity_config) = registry.extract_entity_details(entity)
tags = get_resource_tags(entity_type, entity_config)
for check, check_result in results.items():
record = Record(check_id=check.id, check_name=check.name, check_result=check_result,
code_block=entity_code_lines, file_path=scanned_file,
file_line_range=entity_lines_range,
resource=entity_id, evaluations=entity_evaluations,
check_class=check.__class__.__module__, file_abs_path=absolut_scanned_file_path,
entity_tags=tags,
caller_file_path=caller_file_path,
caller_file_line_range=caller_file_line_range)
breadcrumb = self.breadcrumbs.get(record.file_path, {}).get('.'.join([entity_type, entity_name]))
if breadcrumb:
record = GraphRecord(record, breadcrumb)
report.add_record(record=record)
@staticmethod
def _strip_module_referrer(file_path: str) -> Tuple[str, Optional[str]]:
"""
For file paths containing module referrer information (e.g.: "module/module.tf[main.tf#0]"), this
returns a tuple containing the file path (e.g., "module/module.tf") and referrer (e.g., "main.tf#0").
If the file path does not contain a referred, the tuple will contain the original file path and None.
"""
if file_path.endswith("]") and "[" in file_path:
return file_path[:file_path.index("[")], file_path[file_path.index("[") + 1: -1]
else:
return file_path, None
@staticmethod
def _find_id_for_referrer(full_file_path, definitions) -> Optional[str]:
for file, file_content in definitions.items():
if "module" not in file_content:
continue
for modules in file_content["module"]:
for module_name, module_content in modules.items():
if "__resolved__" not in module_content:
continue
if full_file_path in module_content["__resolved__"]:
return f"module.{module_name}"
return None
def merge_reports(base_report, report_to_merge):
base_report.passed_checks.extend(report_to_merge.passed_checks)
base_report.failed_checks.extend(report_to_merge.failed_checks)
base_report.skipped_checks.extend(report_to_merge.skipped_checks)
base_report.parsing_errors.extend(report_to_merge.parsing_errors)
| 55.204893
| 148
| 0.632617
|
c415ce38efd5c8dd7391a363d7b3930dcda37c79
| 1,548
|
py
|
Python
|
scripts/automation/tests/verify_dependencies.py
|
henrypan/azure-cli
|
8de0ab5216ed3dc700546ae9a3c485710322376b
|
[
"MIT"
] | null | null | null |
scripts/automation/tests/verify_dependencies.py
|
henrypan/azure-cli
|
8de0ab5216ed3dc700546ae9a3c485710322376b
|
[
"MIT"
] | null | null | null |
scripts/automation/tests/verify_dependencies.py
|
henrypan/azure-cli
|
8de0ab5216ed3dc700546ae9a3c485710322376b
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""Verify package dependency compatibility (with support for allowed exceptions). """
from __future__ import print_function
import subprocess
import sys
ALLOWED_ERRORS = [
"has requirement azure-common[autorest]==1.1.4, but you have azure-common 1.1.5.",
"has requirement azure-common~=1.1.5, but you have azure-common 1.1.4."
]
def verify_dependencies():
try:
subprocess.check_output(['pip', 'check'], stderr=subprocess.STDOUT, universal_newlines=True)
except subprocess.CalledProcessError as err:
pip_messages = err.output.splitlines()
errors = []
for msg in pip_messages:
if not any(a in msg for a in ALLOWED_ERRORS):
errors.append(msg)
if errors:
print('Dependency compatibility errors found!', file=sys.stderr)
print('\n'.join(errors), file=sys.stderr)
sys.exit(1)
else:
print("'pip check' returned exit code {} but the errors are allowable.".format(err.returncode), file=sys.stderr)
print("Full output from pip follows:", file=sys.stderr)
print(err.output, file=sys.stderr)
if __name__ == '__main__':
verify_dependencies()
| 40.736842
| 124
| 0.588501
|
443924889fb2bc02902f4276bdfdab0574a5167f
| 493
|
py
|
Python
|
lbry/lbry/wallet/__init__.py
|
JupyterJones/lbry-sdk
|
be89436fa869e1b4b9f05c3faa5c126ebcfe6e57
|
[
"MIT"
] | null | null | null |
lbry/lbry/wallet/__init__.py
|
JupyterJones/lbry-sdk
|
be89436fa869e1b4b9f05c3faa5c126ebcfe6e57
|
[
"MIT"
] | null | null | null |
lbry/lbry/wallet/__init__.py
|
JupyterJones/lbry-sdk
|
be89436fa869e1b4b9f05c3faa5c126ebcfe6e57
|
[
"MIT"
] | null | null | null |
__node_daemon__ = 'lbrycrdd'
__node_cli__ = 'lbrycrd-cli'
__node_bin__ = ''
__node_url__ = (
'https://github.com/lbryio/lbrycrd/releases/download/v0.17.2.1/lbrycrd-linux.zip'
# 'https://github.com/lbryio/lbrycrd/releases/download/v0.17.3.1/lbrycrd-linux-1731.zip'
)
__spvserver__ = 'lbry.wallet.server.coin.LBCRegTest'
from lbry.wallet.manager import LbryWalletManager
from lbry.wallet.network import Network
from lbry.wallet.ledger import MainNetLedger, RegTestLedger, TestNetLedger
| 37.923077
| 92
| 0.78499
|
2b843788bf2f7a7b77501d3b0341573d25fb5ffb
| 12,946
|
py
|
Python
|
ppcls/arch/backbone/model_zoo/xception.py
|
TxT1212/PaddleClas
|
5a24c8700f738f036bf27f80ca12dbe8471a11b0
|
[
"Apache-2.0"
] | 3,763
|
2020-04-10T04:48:11.000Z
|
2022-03-31T13:24:37.000Z
|
ppcls/arch/backbone/model_zoo/xception.py
|
TxT1212/PaddleClas
|
5a24c8700f738f036bf27f80ca12dbe8471a11b0
|
[
"Apache-2.0"
] | 633
|
2020-04-08T18:27:31.000Z
|
2022-03-31T01:09:43.000Z
|
ppcls/arch/backbone/model_zoo/xception.py
|
TxT1212/PaddleClas
|
5a24c8700f738f036bf27f80ca12dbe8471a11b0
|
[
"Apache-2.0"
] | 846
|
2020-04-08T08:13:18.000Z
|
2022-03-31T12:28:37.000Z
|
import paddle
from paddle import ParamAttr
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.nn import Conv2D, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D
from paddle.nn.initializer import Uniform
import math
import sys
from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url
MODEL_URLS = {
"Xception41":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception41_pretrained.pdparams",
"Xception65":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception65_pretrained.pdparams",
"Xception71":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception71_pretrained.pdparams"
}
__all__ = list(MODEL_URLS.keys())
class ConvBNLayer(nn.Layer):
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
groups=1,
act=None,
name=None):
super(ConvBNLayer, self).__init__()
self._conv = Conv2D(
in_channels=num_channels,
out_channels=num_filters,
kernel_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
weight_attr=ParamAttr(name=name + "_weights"),
bias_attr=False)
bn_name = "bn_" + name
self._batch_norm = BatchNorm(
num_filters,
act=act,
param_attr=ParamAttr(name=bn_name + "_scale"),
bias_attr=ParamAttr(name=bn_name + "_offset"),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
def forward(self, inputs):
y = self._conv(inputs)
y = self._batch_norm(y)
return y
class SeparableConv(nn.Layer):
def __init__(self, input_channels, output_channels, stride=1, name=None):
super(SeparableConv, self).__init__()
self._pointwise_conv = ConvBNLayer(
input_channels, output_channels, 1, name=name + "_sep")
self._depthwise_conv = ConvBNLayer(
output_channels,
output_channels,
3,
stride=stride,
groups=output_channels,
name=name + "_dw")
def forward(self, inputs):
x = self._pointwise_conv(inputs)
x = self._depthwise_conv(x)
return x
class EntryFlowBottleneckBlock(nn.Layer):
def __init__(self,
input_channels,
output_channels,
stride=2,
name=None,
relu_first=False):
super(EntryFlowBottleneckBlock, self).__init__()
self.relu_first = relu_first
self._short = Conv2D(
in_channels=input_channels,
out_channels=output_channels,
kernel_size=1,
stride=stride,
padding=0,
weight_attr=ParamAttr(name + "_branch1_weights"),
bias_attr=False)
self._conv1 = SeparableConv(
input_channels,
output_channels,
stride=1,
name=name + "_branch2a_weights")
self._conv2 = SeparableConv(
output_channels,
output_channels,
stride=1,
name=name + "_branch2b_weights")
self._pool = MaxPool2D(kernel_size=3, stride=stride, padding=1)
def forward(self, inputs):
conv0 = inputs
short = self._short(inputs)
if self.relu_first:
conv0 = F.relu(conv0)
conv1 = self._conv1(conv0)
conv2 = F.relu(conv1)
conv2 = self._conv2(conv2)
pool = self._pool(conv2)
return paddle.add(x=short, y=pool)
class EntryFlow(nn.Layer):
def __init__(self, block_num=3):
super(EntryFlow, self).__init__()
name = "entry_flow"
self.block_num = block_num
self._conv1 = ConvBNLayer(
3, 32, 3, stride=2, act="relu", name=name + "_conv1")
self._conv2 = ConvBNLayer(32, 64, 3, act="relu", name=name + "_conv2")
if block_num == 3:
self._conv_0 = EntryFlowBottleneckBlock(
64, 128, stride=2, name=name + "_0", relu_first=False)
self._conv_1 = EntryFlowBottleneckBlock(
128, 256, stride=2, name=name + "_1", relu_first=True)
self._conv_2 = EntryFlowBottleneckBlock(
256, 728, stride=2, name=name + "_2", relu_first=True)
elif block_num == 5:
self._conv_0 = EntryFlowBottleneckBlock(
64, 128, stride=2, name=name + "_0", relu_first=False)
self._conv_1 = EntryFlowBottleneckBlock(
128, 256, stride=1, name=name + "_1", relu_first=True)
self._conv_2 = EntryFlowBottleneckBlock(
256, 256, stride=2, name=name + "_2", relu_first=True)
self._conv_3 = EntryFlowBottleneckBlock(
256, 728, stride=1, name=name + "_3", relu_first=True)
self._conv_4 = EntryFlowBottleneckBlock(
728, 728, stride=2, name=name + "_4", relu_first=True)
else:
sys.exit(-1)
def forward(self, inputs):
x = self._conv1(inputs)
x = self._conv2(x)
if self.block_num == 3:
x = self._conv_0(x)
x = self._conv_1(x)
x = self._conv_2(x)
elif self.block_num == 5:
x = self._conv_0(x)
x = self._conv_1(x)
x = self._conv_2(x)
x = self._conv_3(x)
x = self._conv_4(x)
return x
class MiddleFlowBottleneckBlock(nn.Layer):
def __init__(self, input_channels, output_channels, name):
super(MiddleFlowBottleneckBlock, self).__init__()
self._conv_0 = SeparableConv(
input_channels,
output_channels,
stride=1,
name=name + "_branch2a_weights")
self._conv_1 = SeparableConv(
output_channels,
output_channels,
stride=1,
name=name + "_branch2b_weights")
self._conv_2 = SeparableConv(
output_channels,
output_channels,
stride=1,
name=name + "_branch2c_weights")
def forward(self, inputs):
conv0 = F.relu(inputs)
conv0 = self._conv_0(conv0)
conv1 = F.relu(conv0)
conv1 = self._conv_1(conv1)
conv2 = F.relu(conv1)
conv2 = self._conv_2(conv2)
return paddle.add(x=inputs, y=conv2)
class MiddleFlow(nn.Layer):
def __init__(self, block_num=8):
super(MiddleFlow, self).__init__()
self.block_num = block_num
self._conv_0 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_0")
self._conv_1 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_1")
self._conv_2 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_2")
self._conv_3 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_3")
self._conv_4 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_4")
self._conv_5 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_5")
self._conv_6 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_6")
self._conv_7 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_7")
if block_num == 16:
self._conv_8 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_8")
self._conv_9 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_9")
self._conv_10 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_10")
self._conv_11 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_11")
self._conv_12 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_12")
self._conv_13 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_13")
self._conv_14 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_14")
self._conv_15 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_15")
def forward(self, inputs):
x = self._conv_0(inputs)
x = self._conv_1(x)
x = self._conv_2(x)
x = self._conv_3(x)
x = self._conv_4(x)
x = self._conv_5(x)
x = self._conv_6(x)
x = self._conv_7(x)
if self.block_num == 16:
x = self._conv_8(x)
x = self._conv_9(x)
x = self._conv_10(x)
x = self._conv_11(x)
x = self._conv_12(x)
x = self._conv_13(x)
x = self._conv_14(x)
x = self._conv_15(x)
return x
class ExitFlowBottleneckBlock(nn.Layer):
def __init__(self, input_channels, output_channels1, output_channels2,
name):
super(ExitFlowBottleneckBlock, self).__init__()
self._short = Conv2D(
in_channels=input_channels,
out_channels=output_channels2,
kernel_size=1,
stride=2,
padding=0,
weight_attr=ParamAttr(name + "_branch1_weights"),
bias_attr=False)
self._conv_1 = SeparableConv(
input_channels,
output_channels1,
stride=1,
name=name + "_branch2a_weights")
self._conv_2 = SeparableConv(
output_channels1,
output_channels2,
stride=1,
name=name + "_branch2b_weights")
self._pool = MaxPool2D(kernel_size=3, stride=2, padding=1)
def forward(self, inputs):
short = self._short(inputs)
conv0 = F.relu(inputs)
conv1 = self._conv_1(conv0)
conv2 = F.relu(conv1)
conv2 = self._conv_2(conv2)
pool = self._pool(conv2)
return paddle.add(x=short, y=pool)
class ExitFlow(nn.Layer):
def __init__(self, class_num):
super(ExitFlow, self).__init__()
name = "exit_flow"
self._conv_0 = ExitFlowBottleneckBlock(
728, 728, 1024, name=name + "_1")
self._conv_1 = SeparableConv(1024, 1536, stride=1, name=name + "_2")
self._conv_2 = SeparableConv(1536, 2048, stride=1, name=name + "_3")
self._pool = AdaptiveAvgPool2D(1)
stdv = 1.0 / math.sqrt(2048 * 1.0)
self._out = Linear(
2048,
class_num,
weight_attr=ParamAttr(
name="fc_weights", initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(name="fc_offset"))
def forward(self, inputs):
conv0 = self._conv_0(inputs)
conv1 = self._conv_1(conv0)
conv1 = F.relu(conv1)
conv2 = self._conv_2(conv1)
conv2 = F.relu(conv2)
pool = self._pool(conv2)
pool = paddle.flatten(pool, start_axis=1, stop_axis=-1)
out = self._out(pool)
return out
class Xception(nn.Layer):
def __init__(self,
entry_flow_block_num=3,
middle_flow_block_num=8,
class_num=1000):
super(Xception, self).__init__()
self.entry_flow_block_num = entry_flow_block_num
self.middle_flow_block_num = middle_flow_block_num
self._entry_flow = EntryFlow(entry_flow_block_num)
self._middle_flow = MiddleFlow(middle_flow_block_num)
self._exit_flow = ExitFlow(class_num)
def forward(self, inputs):
x = self._entry_flow(inputs)
x = self._middle_flow(x)
x = self._exit_flow(x)
return x
def _load_pretrained(pretrained, model, model_url, use_ssld=False):
if pretrained is False:
pass
elif pretrained is True:
load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld)
elif isinstance(pretrained, str):
load_dygraph_pretrain(model, pretrained)
else:
raise RuntimeError(
"pretrained type is not available. Please use `string` or `boolean` type."
)
def Xception41(pretrained=False, use_ssld=False, **kwargs):
model = Xception(entry_flow_block_num=3, middle_flow_block_num=8, **kwargs)
_load_pretrained(
pretrained, model, MODEL_URLS["Xception41"], use_ssld=use_ssld)
return model
def Xception65(pretrained=False, use_ssld=False, **kwargs):
model = Xception(
entry_flow_block_num=3, middle_flow_block_num=16, **kwargs)
_load_pretrained(
pretrained, model, MODEL_URLS["Xception65"], use_ssld=use_ssld)
return model
def Xception71(pretrained=False, use_ssld=False, **kwargs):
model = Xception(
entry_flow_block_num=5, middle_flow_block_num=16, **kwargs)
_load_pretrained(
pretrained, model, MODEL_URLS["Xception71"], use_ssld=use_ssld)
return model
| 34.248677
| 95
| 0.593465
|
ba6edae7df10d2a55e3a7d2662804a2f5fa446e1
| 3,506
|
py
|
Python
|
qa/rpc-tests/p2p-timeouts.py
|
ocminer/kreds-core
|
94d550c68c0829a190c6ad676e08075bfcfa8ac0
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/p2p-timeouts.py
|
ocminer/kreds-core
|
94d550c68c0829a190c6ad676e08075bfcfa8ac0
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/p2p-timeouts.py
|
ocminer/kreds-core
|
94d550c68c0829a190c6ad676e08075bfcfa8ac0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2016 The Kreds Developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
""" TimeoutsTest -- test various net timeouts (only in extended tests)
- Create three kredsd nodes:
no_verack_node - we never send a verack in response to their version
no_version_node - we never send a version (only a ping)
no_send_node - we never send any P2P message.
- Start all three nodes
- Wait 1 second
- Assert that we're connected
- Send a ping to no_verack_node and no_version_node
- Wait 30 seconds
- Assert that we're still connected
- Send a ping to no_verack_node and no_version_node
- Wait 31 seconds
- Assert that we're no longer connected (timeout to receive version/verack is 60 seconds)
"""
from time import sleep
from test_framework.mininode import *
from test_framework.test_framework import KredsTestFramework
from test_framework.util import *
class TestNode(SingleNodeConnCB):
def __init__(self):
SingleNodeConnCB.__init__(self)
self.connected = False
self.received_version = False
def on_open(self, conn):
self.connected = True
def on_close(self, conn):
self.connected = False
def on_version(self, conn, message):
# Don't send a verack in response
self.received_version = True
class TimeoutsTest(KredsTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.nodes = []
# Start up node0 to be a version 1, pre-segwit node.
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
[["-debug", "-logtimemicros=1"]])
def run_test(self):
# Setup the p2p connections and start up the network thread.
self.no_verack_node = TestNode() # never send verack
self.no_version_node = TestNode() # never send version (just ping)
self.no_send_node = TestNode() # never send anything
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_verack_node))
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_version_node, send_version=False))
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_send_node, send_version=False))
self.no_verack_node.add_connection(connections[0])
self.no_version_node.add_connection(connections[1])
self.no_send_node.add_connection(connections[2])
NetworkThread().start() # Start up network handling in another thread
sleep(1)
assert(self.no_verack_node.connected)
assert(self.no_version_node.connected)
assert(self.no_send_node.connected)
ping_msg = msg_ping()
connections[0].send_message(ping_msg)
connections[1].send_message(ping_msg)
sleep(30)
assert(self.no_verack_node.received_version)
assert(self.no_verack_node.connected)
assert(self.no_version_node.connected)
assert(self.no_send_node.connected)
connections[0].send_message(ping_msg)
connections[1].send_message(ping_msg)
sleep(31)
assert(not self.no_verack_node.connected)
assert(not self.no_version_node.connected)
assert(not self.no_send_node.connected)
if __name__ == '__main__':
TimeoutsTest().main()
| 33.711538
| 119
| 0.695379
|
7a243f85c12f390f194eb9478baef6452e978bd4
| 1,136
|
py
|
Python
|
AutomatedTesting/Gem/PythonTests/EditorPythonBindings/MainWindowCommands_test.py
|
aaarsene/o3de
|
37e3b0226958974defd14dd6d808e8557dcd7345
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-09-13T00:01:12.000Z
|
2021-09-13T00:01:12.000Z
|
AutomatedTesting/Gem/PythonTests/EditorPythonBindings/MainWindowCommands_test.py
|
aaarsene/o3de
|
37e3b0226958974defd14dd6d808e8557dcd7345
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
AutomatedTesting/Gem/PythonTests/EditorPythonBindings/MainWindowCommands_test.py
|
aaarsene/o3de
|
37e3b0226958974defd14dd6d808e8557dcd7345
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-07-20T11:07:25.000Z
|
2021-07-20T11:07:25.000Z
|
"""
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
#
# This is a pytest module to test the in-Editor Python API from PythonEditorFuncs
#
import pytest
pytest.importorskip('ly_test_tools')
import sys
import os
sys.path.append(os.path.dirname(__file__))
from hydra_utils import launch_test_case
@pytest.mark.SUITE_sandbox
@pytest.mark.parametrize('launcher_platform', ['windows_editor'])
@pytest.mark.parametrize('project', ['AutomatedTesting'])
@pytest.mark.parametrize('level', ['Simple'])
class TestMainWindowAutomation(object):
def test_MainWindow(self, request, editor, level, launcher_platform):
unexpected_lines=[]
expected_lines = [
"get_pane_class_names worked",
"open_pane worked",
"close_pane worked"
]
test_case_file = os.path.join(os.path.dirname(__file__), 'MainWindowCommands_test_case.py')
launch_test_case(editor, test_case_file, expected_lines, unexpected_lines)
| 30.702703
| 155
| 0.729754
|
4880fa5177ccd74a72ebbbcef4e83f1b4a7ced97
| 5,477
|
py
|
Python
|
model_measuring/kamal/amalgamation/layerwise_amalgamation.py
|
Gouzhong1223/Dubhe
|
8959a51704410dc38b595a0926646b9928451c9a
|
[
"Apache-2.0"
] | 1
|
2022-01-11T07:14:37.000Z
|
2022-01-11T07:14:37.000Z
|
model_measuring/kamal/amalgamation/layerwise_amalgamation.py
|
Gouzhong1223/Dubhe
|
8959a51704410dc38b595a0926646b9928451c9a
|
[
"Apache-2.0"
] | 1
|
2022-03-04T07:19:43.000Z
|
2022-03-04T07:19:43.000Z
|
model_measuring/kamal/amalgamation/layerwise_amalgamation.py
|
Gouzhong1223/Dubhe
|
8959a51704410dc38b595a0926646b9928451c9a
|
[
"Apache-2.0"
] | 1
|
2022-03-20T13:09:14.000Z
|
2022-03-20T13:09:14.000Z
|
"""
Copyright 2020 Tianshu AI Platform. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=============================================================
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from kamal.core.engine.engine import Engine
from kamal.core.engine.hooks import FeatureHook
from kamal.core import tasks
from kamal.utils import set_mode
import typing
import time
from kamal.utils import move_to_device, set_mode
class AmalBlock(nn.Module):
def __init__(self, cs, cts):
super( AmalBlock, self ).__init__()
self.cs, self.cts = cs, cts
self.enc = nn.Conv2d( in_channels=sum(self.cts), out_channels=self.cs, kernel_size=1, stride=1, padding=0, bias=True )
self.fam = nn.Conv2d( in_channels=self.cs, out_channels=self.cs, kernel_size=1, stride=1, padding=0, bias=True )
self.dec = nn.Conv2d( in_channels=self.cs, out_channels=sum(self.cts), kernel_size=1, stride=1, padding=0, bias=True )
def forward(self, fs, fts):
rep = self.enc( torch.cat( fts, dim=1 ) )
_fts = self.dec( rep )
_fts = torch.split( _fts, self.cts, dim=1 )
_fs = self.fam( fs )
return rep, _fs, _fts
class LayerWiseAmalgamator(Engine):
def setup(
self,
student,
teachers,
layer_groups: typing.Sequence[typing.Sequence],
layer_channels: typing.Sequence[typing.Sequence],
dataloader: torch.utils.data.DataLoader,
optimizer: torch.optim.Optimizer,
weights = [1., 1., 1.],
device=None,
):
if device is None:
device = torch.device( 'cuda' if torch.cuda.is_available() else 'cpu' )
self._device = device
self._dataloader = dataloader
self.model = self.student = student.to(self.device)
self.teachers = nn.ModuleList(teachers).to(self.device)
self.optimizer = optimizer
self._weights = weights
amal_blocks = []
for group, C in zip(layer_groups, layer_channels):
hooks = [ FeatureHook(layer) for layer in group ]
amal_block = AmalBlock(cs=C[0], cts=C[1:]).to(self.device).train()
amal_blocks.append( (amal_block, hooks, C) )
self._amal_blocks = amal_blocks
@property
def device(self):
return self._device
def run(self, max_iter, start_iter=0, epoch_length=None ):
block_params = []
for block, _, _ in self._amal_blocks:
block_params.extend( list(block.parameters()) )
if isinstance( self.optimizer, torch.optim.SGD ):
self._amal_optimimizer = torch.optim.SGD( block_params, lr=self.optimizer.param_groups[0]['lr'], momentum=0.9, weight_decay=1e-4 )
else:
self._amal_optimimizer = torch.optim.Adam( block_params, lr=self.optimizer.param_groups[0]['lr'], weight_decay=1e-4 )
self._amal_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( self._amal_optimimizer, T_max=max_iter )
with set_mode(self.student, training=True), \
set_mode(self.teachers, training=False):
super( LayerWiseAmalgamator, self ).run(self.step_fn, self._dataloader, start_iter=start_iter, max_iter=max_iter, epoch_length=epoch_length)
@property
def device(self):
return self._device
def step_fn(self, engine, batch):
start_time = time.perf_counter()
batch = move_to_device(batch, self._device)
data = batch[0]
s_out = self.student( data )
with torch.no_grad():
t_out = [ teacher( data ) for teacher in self.teachers ]
loss_amal = 0
loss_recons = 0
for amal_block, hooks, C in self._amal_blocks:
features = [ h.feat_out for h in hooks ]
fs, fts = features[0], features[1:]
rep, _fs, _fts = amal_block( fs, fts )
loss_amal += F.mse_loss( _fs, rep.detach() )
loss_recons += sum( [ F.mse_loss( _ft, ft ) for (_ft, ft) in zip( _fts, fts ) ] )
loss_kd = tasks.loss.kldiv( s_out, torch.cat( t_out, dim=1 ) )
#loss_kd = F.mse_loss( s_out, torch.cat( t_out, dim=1 ) )
loss_dict = { "loss_kd": self._weights[0] * loss_kd,
"loss_amal": self._weights[1] * loss_amal,
"loss_recons": self._weights[2] * loss_recons }
loss = sum(loss_dict.values())
self.optimizer.zero_grad()
self._amal_optimimizer.zero_grad()
loss.backward()
self.optimizer.step()
self._amal_optimimizer.step()
self._amal_scheduler.step()
step_time = time.perf_counter() - start_time
metrics = { loss_name: loss_value.item() for (loss_name, loss_value) in loss_dict.items() }
metrics.update({
'total_loss': loss.item(),
'step_time': step_time,
'lr': float( self.optimizer.param_groups[0]['lr'] )
})
return metrics
| 41.492424
| 152
| 0.629907
|
b6c654f5da3700b4f48b69aa8f2a1ac6593a567a
| 945
|
py
|
Python
|
image_assets/signals.py
|
alfa24/django-image-assets
|
053e7a6792b74771b02ebffc7fc983fdc6d8d299
|
[
"MIT"
] | 1
|
2021-12-26T02:54:38.000Z
|
2021-12-26T02:54:38.000Z
|
image_assets/signals.py
|
alfa24/django-image-assets
|
053e7a6792b74771b02ebffc7fc983fdc6d8d299
|
[
"MIT"
] | 80
|
2020-03-18T14:58:12.000Z
|
2022-02-03T07:55:08.000Z
|
image_assets/signals.py
|
alfa24/django-image-assets
|
053e7a6792b74771b02ebffc7fc983fdc6d8d299
|
[
"MIT"
] | 2
|
2020-03-18T14:26:34.000Z
|
2021-11-10T14:14:29.000Z
|
from typing import Type
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from image_assets import models
# noinspection PyUnusedLocal
@receiver(pre_delete, sender=models.get_asset_model())
def move_asset_file_to_deleted_asset(
sender, *, instance: models.Asset, **kwargs):
""" When asset is deleted, it's file is moved to deleted asset instance."""
models.get_deleted_asset_model().objects.create(
image=instance.image,
content_type_id=instance.content_type_id,
object_id=instance.object_id,
asset_type_id=instance.asset_type_id)
# noinspection PyUnusedLocal
@receiver(pre_delete, sender=models.get_deleted_asset_model())
def delete_asset_file_for_deleted_asset(
sender, *, instance: models.DeletedAsset, **kwargs):
"""
When deleted asset is deleted from db, it's file is purged from storage.
"""
instance.image.delete(save=False)
| 32.586207
| 79
| 0.749206
|
a88b462279343f8f758b4256ccbff84f7c8491a8
| 27,377
|
py
|
Python
|
tensorflow_probability/python/experimental/mcmc/particle_filter.py
|
PavanKishore21/probability
|
4bad1b796b0e6ed2959205915d42788817620c4c
|
[
"Apache-2.0"
] | 2
|
2019-10-30T04:45:07.000Z
|
2019-10-30T04:45:08.000Z
|
tensorflow_probability/python/experimental/mcmc/particle_filter.py
|
PavanKishore21/probability
|
4bad1b796b0e6ed2959205915d42788817620c4c
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/experimental/mcmc/particle_filter.py
|
PavanKishore21/probability
|
4bad1b796b0e6ed2959205915d42788817620c4c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Particle filtering."""
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.experimental.mcmc import sequential_monte_carlo_kernel as smc_kernel
from tensorflow_probability.python.experimental.mcmc import weighted_resampling
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import docstring_util
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.mcmc.internal import util as mcmc_util
__all__ = [
'infer_trajectories',
'particle_filter',
'reconstruct_trajectories',
]
# Default trace criterion.
_always_trace = lambda *_: True
def _default_trace_fn(state, kernel_results):
return (state.particles,
state.log_weights,
kernel_results.parent_indices,
kernel_results.incremental_log_marginal_likelihood)
particle_filter_arg_str = """\
Each latent state is a `Tensor` or nested structure of `Tensor`s, as defined
by the `initial_state_prior`.
The `transition_fn` and `proposal_fn` args, if specified, have signature
`next_state_dist = fn(step, state)`, where `step` is an `int` `Tensor` index
of the current time step (beginning at zero), and `state` represents
the latent state at time `step`. The return value is a `tfd.Distribution`
instance over the state at time `step + 1`.
Similarly, the `observation_fn` has signature
`observation_dist = observation_fn(step, state)`, where the return value
is a distribution over the value(s) observed at time `step`.
Args:
observations: a (structure of) Tensors, each of shape
`concat([[num_observation_steps, b1, ..., bN], event_shape])` with
optional batch dimensions `b1, ..., bN`.
initial_state_prior: a (joint) distribution over the initial latent state,
with optional batch shape `[b1, ..., bN]`.
transition_fn: callable returning a (joint) distribution over the next
latent state.
observation_fn: callable returning a (joint) distribution over the current
observation.
num_particles: `int` `Tensor` number of particles.
initial_state_proposal: a (joint) distribution over the initial latent
state, with optional batch shape `[b1, ..., bN]`. If `None`, the initial
particles are proposed from the `initial_state_prior`.
Default value: `None`.
proposal_fn: callable returning a (joint) proposal distribution over the
next latent state. If `None`, the dynamics model is used (
`proposal_fn == transition_fn`).
Default value: `None`.
resample_fn: Python `callable` to generate the indices of resampled
particles, given their weights. Generally, one of
`tfp.experimental.mcmc.resample_independent` or
`tfp.experimental.mcmc.resample_systematic`, or any function
with the same signature, `resampled_indices = f(log_probs, event_size, '
'sample_shape, seed)`.
Default: `tfp.experimental.mcmc.resample_systematic`.
resample_criterion_fn: optional Python `callable` with signature
`do_resample = resample_criterion_fn(log_weights)`,
where `log_weights` is a float `Tensor` of shape
`[b1, ..., bN, num_particles]` containing log (unnormalized) weights for
all particles at the current step. The return value `do_resample`
determines whether particles are resampled at the current step. In the
case `resample_criterion_fn==None`, particles are resampled at every step.
The default behavior resamples particles when the current effective
sample size falls below half the total number of particles.
Default value: `tfp.experimental.mcmc.ess_below_threshold`.
unbiased_gradients: If `True`, use the stop-gradient
resampling trick of Scibior, Masrani, and Wood [{scibor_ref_idx}] to
correct for gradient bias introduced by the discrete resampling step. This
will generally increase the variance of stochastic gradients.
Default value: `True`.
rejuvenation_kernel_fn: optional Python `callable` with signature
`transition_kernel = rejuvenation_kernel_fn(target_log_prob_fn)`
where `target_log_prob_fn` is a provided callable evaluating
`p(x[t] | y[t], x[t-1])` at each step `t`, and `transition_kernel`
should be an instance of `tfp.mcmc.TransitionKernel`.
Default value: `None`. # TODO(davmre): not yet supported.
num_transitions_per_observation: scalar Tensor positive `int` number of
state transitions between regular observation points. A value of `1`
indicates that there is an observation at every timestep,
`2` that every other step is observed, and so on. Values greater than `1`
may be used with an appropriately-chosen transition function to
approximate continuous-time dynamics. The initial and final steps
(steps `0` and `num_timesteps - 1`) are always observed.
Default value: `None`.
"""
@docstring_util.expand_docstring(
particle_filter_arg_str=particle_filter_arg_str.format(scibor_ref_idx=2))
def infer_trajectories(observations,
initial_state_prior,
transition_fn,
observation_fn,
num_particles,
initial_state_proposal=None,
proposal_fn=None,
resample_fn=weighted_resampling.resample_systematic,
resample_criterion_fn=smc_kernel.ess_below_threshold,
unbiased_gradients=True,
rejuvenation_kernel_fn=None,
num_transitions_per_observation=1,
seed=None,
name=None): # pylint: disable=g-doc-args
"""Use particle filtering to sample from the posterior over trajectories.
${particle_filter_arg_str}
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
name: Python `str` name for ops created by this method.
Default value: `None` (i.e., `'infer_trajectories'`).
Returns:
trajectories: a (structure of) Tensor(s) matching the latent state, each
of shape
`concat([[num_timesteps, num_particles, b1, ..., bN], event_shape])`,
representing unbiased samples from the posterior distribution
`p(latent_states | observations)`.
incremental_log_marginal_likelihoods: float `Tensor` of shape
`[num_observation_steps, b1, ..., bN]`,
giving the natural logarithm of an unbiased estimate of
`p(observations[t] | observations[:t])` at each timestep `t`. Note that
(by [Jensen's inequality](
https://en.wikipedia.org/wiki/Jensen%27s_inequality))
this is *smaller* in expectation than the true
`log p(observations[t] | observations[:t])`.
#### Examples
**Tracking unknown position and velocity**: Let's consider tracking an object
moving in a one-dimensional space. We'll define a dynamical system
by specifying an `initial_state_prior`, a `transition_fn`,
and `observation_fn`.
The structure of the latent state space is determined by the prior
distribution. Here, we'll define a state space that includes the object's
current position and velocity:
```python
initial_state_prior = tfd.JointDistributionNamed({
'position': tfd.Normal(loc=0., scale=1.),
'velocity': tfd.Normal(loc=0., scale=0.1)})
```
The `transition_fn` specifies the evolution of the system. It should
return a distribution over latent states of the same structure as the prior.
Here, we'll assume that the position evolves according to the velocity,
with a small random drift, and the velocity also changes slowly, following
a random drift:
```python
def transition_fn(_, previous_state):
return tfd.JointDistributionNamed({
'position': tfd.Normal(
loc=previous_state['position'] + previous_state['velocity'],
scale=0.1),
'velocity': tfd.Normal(loc=previous_state['velocity'], scale=0.01)})
```
The `observation_fn` specifies the process by which the system is observed
at each time step. Let's suppose we observe only a noisy version of the =
current position.
```python
def observation_fn(_, state):
return tfd.Normal(loc=state['position'], scale=0.1)
```
Now let's track our object. Suppose we've been given observations
corresponding to an initial position of `0.4` and constant velocity of `0.01`:
```python
# Generate simulated observations.
observed_positions = tfd.Normal(loc=tf.linspace(0.4, 0.8, 0.01),
scale=0.1).sample()
# Run particle filtering to sample plausible trajectories.
(trajectories, # {'position': [40, 1000], 'velocity': [40, 1000]}
lps) = tfp.experimental.mcmc.infer_trajectories(
observations=observed_positions,
initial_state_prior=initial_state_prior,
transition_fn=transition_fn,
observation_fn=observation_fn,
num_particles=1000)
```
For all `i`, `trajectories['position'][:, i]` is a sample from the
posterior over position sequences, given the observations:
`p(state[0:T] | observations[0:T])`. Often, the sampled trajectories
will be highly redundant in their earlier timesteps, because most
of the initial particles have been discarded through resampling
(this problem is known as 'particle degeneracy'; see section 3.5 of
[Doucet and Johansen][1]).
In such cases it may be useful to also consider the series of *filtering*
distributions `p(state[t] | observations[:t])`, in which each latent state
is inferred conditioned only on observations up to that point in time; these
may be computed using `tfp.mcmc.experimental.particle_filter`.
#### References
[1] Arnaud Doucet and Adam M. Johansen. A tutorial on particle
filtering and smoothing: Fifteen years later.
_Handbook of nonlinear filtering_, 12(656-704), 2009.
https://www.stats.ox.ac.uk/~doucet/doucet_johansen_tutorialPF2011.pdf
[2] Adam Scibior, Vaden Masrani, and Frank Wood. Differentiable Particle
Filtering without Modifying the Forward Pass. _arXiv preprint
arXiv:2106.10314_, 2021. https://arxiv.org/abs/2106.10314
"""
with tf.name_scope(name or 'infer_trajectories') as name:
pf_seed, resample_seed = samplers.split_seed(
seed, salt='infer_trajectories')
(particles,
log_weights,
parent_indices,
incremental_log_marginal_likelihoods) = particle_filter(
observations=observations,
initial_state_prior=initial_state_prior,
transition_fn=transition_fn,
observation_fn=observation_fn,
num_particles=num_particles,
initial_state_proposal=initial_state_proposal,
proposal_fn=proposal_fn,
resample_fn=resample_fn,
resample_criterion_fn=resample_criterion_fn,
unbiased_gradients=unbiased_gradients,
rejuvenation_kernel_fn=rejuvenation_kernel_fn,
num_transitions_per_observation=num_transitions_per_observation,
trace_fn=_default_trace_fn,
trace_criterion_fn=lambda *_: True,
seed=pf_seed,
name=name)
weighted_trajectories = reconstruct_trajectories(particles, parent_indices)
# Resample all steps of the trajectories using the final weights.
resample_indices = resample_fn(log_probs=log_weights[-1],
event_size=num_particles,
sample_shape=(),
seed=resample_seed)
trajectories = tf.nest.map_structure(
lambda x: mcmc_util.index_remapping_gather(x, # pylint: disable=g-long-lambda
resample_indices,
axis=1),
weighted_trajectories)
return trajectories, incremental_log_marginal_likelihoods
@docstring_util.expand_docstring(
particle_filter_arg_str=particle_filter_arg_str.format(scibor_ref_idx=1))
def particle_filter(observations,
initial_state_prior,
transition_fn,
observation_fn,
num_particles,
initial_state_proposal=None,
proposal_fn=None,
resample_fn=weighted_resampling.resample_systematic,
resample_criterion_fn=smc_kernel.ess_below_threshold,
unbiased_gradients=True,
rejuvenation_kernel_fn=None, # TODO(davmre): not yet supported. pylint: disable=unused-argument
num_transitions_per_observation=1,
trace_fn=_default_trace_fn,
trace_criterion_fn=_always_trace,
static_trace_allocation_size=None,
parallel_iterations=1,
seed=None,
name=None): # pylint: disable=g-doc-args
"""Samples a series of particles representing filtered latent states.
The particle filter samples from the sequence of "filtering" distributions
`p(state[t] | observations[:t])` over latent
states: at each point in time, this is the distribution conditioned on all
observations *up to that time*. Because particles may be resampled, a particle
at time `t` may be different from the particle with the same index at time
`t + 1`. To reconstruct trajectories by tracing back through the resampling
process, see `tfp.mcmc.experimental.reconstruct_trajectories`.
${particle_filter_arg_str}
trace_fn: Python `callable` defining the values to be traced at each step,
with signature `traced_values = trace_fn(weighted_particles, results)`
in which the first argument is an instance of
`tfp.experimental.mcmc.WeightedParticles` and the second an instance of
`SequentialMonteCarloResults` tuple, and the return value is a structure
of `Tensor`s.
Default value: `lambda s, r: (s.particles, s.log_weights,
r.parent_indices, r.incremental_log_marginal_likelihood)`
trace_criterion_fn: optional Python `callable` with signature
`trace_this_step = trace_criterion_fn(weighted_particles, results)` taking
the same arguments as `trace_fn` and returning a boolean `Tensor`. If
`None`, only values from the final step are returned.
Default value: `lambda *_: True` (trace every step).
static_trace_allocation_size: Optional Python `int` size of trace to
allocate statically. This should be an upper bound on the number of steps
traced and is used only when the length cannot be
statically inferred (for example, if a `trace_criterion_fn` is specified).
It is primarily intended for contexts where static shapes are required,
such as in XLA-compiled code.
Default value: `None`.
parallel_iterations: Passed to the internal `tf.while_loop`.
Default value: `1`.
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
name: Python `str` name for ops created by this method.
Default value: `None` (i.e., `'particle_filter'`).
Returns:
traced_results: A structure of Tensors as returned by `trace_fn`. If
`trace_criterion_fn==None`, this is computed from the final step;
otherwise, each Tensor will have initial dimension `num_steps_traced`
and stacks the traced results across all steps.
#### References
[1] Adam Scibior, Vaden Masrani, and Frank Wood. Differentiable Particle
Filtering without Modifying the Forward Pass. _arXiv preprint
arXiv:2106.10314_, 2021. https://arxiv.org/abs/2106.10314
"""
init_seed, loop_seed = samplers.split_seed(seed, salt='particle_filter')
with tf.name_scope(name or 'particle_filter'):
num_observation_steps = ps.size0(tf.nest.flatten(observations)[0])
num_timesteps = (
1 + num_transitions_per_observation * (num_observation_steps - 1))
# If trace criterion is `None`, we'll return only the final results.
never_trace = lambda *_: False
if trace_criterion_fn is None:
static_trace_allocation_size = 0
trace_criterion_fn = never_trace
initial_weighted_particles = _particle_filter_initial_weighted_particles(
observations=observations,
observation_fn=observation_fn,
initial_state_prior=initial_state_prior,
initial_state_proposal=initial_state_proposal,
num_particles=num_particles,
seed=init_seed)
propose_and_update_log_weights_fn = (
_particle_filter_propose_and_update_log_weights_fn(
observations=observations,
transition_fn=transition_fn,
proposal_fn=proposal_fn,
observation_fn=observation_fn,
num_transitions_per_observation=num_transitions_per_observation))
kernel = smc_kernel.SequentialMonteCarlo(
propose_and_update_log_weights_fn=propose_and_update_log_weights_fn,
resample_fn=resample_fn,
resample_criterion_fn=resample_criterion_fn,
unbiased_gradients=unbiased_gradients)
# Use `trace_scan` rather than `sample_chain` directly because the latter
# would force us to trace the state history (with or without thinning),
# which is not always appropriate.
def seeded_one_step(seed_state_results, _):
seed, state, results = seed_state_results
one_step_seed, next_seed = samplers.split_seed(seed)
next_state, next_results = kernel.one_step(
state, results, seed=one_step_seed)
return next_seed, next_state, next_results
final_seed_state_result, traced_results = mcmc_util.trace_scan(
loop_fn=seeded_one_step,
initial_state=(loop_seed,
initial_weighted_particles,
kernel.bootstrap_results(initial_weighted_particles)),
elems=tf.ones([num_timesteps]),
trace_fn=lambda seed_state_results: trace_fn(*seed_state_results[1:]),
trace_criterion_fn=(
lambda seed_state_results: trace_criterion_fn( # pylint: disable=g-long-lambda
*seed_state_results[1:])),
static_trace_allocation_size=static_trace_allocation_size,
parallel_iterations=parallel_iterations)
if trace_criterion_fn is never_trace:
# Return results from just the final step.
traced_results = trace_fn(*final_seed_state_result[1:])
return traced_results
def _particle_filter_initial_weighted_particles(observations,
observation_fn,
initial_state_prior,
initial_state_proposal,
num_particles,
seed=None):
"""Initialize a set of weighted particles including the first observation."""
# Propose an initial state.
if initial_state_proposal is None:
initial_state = initial_state_prior.sample(num_particles, seed=seed)
initial_log_weights = ps.zeros_like(
initial_state_prior.log_prob(initial_state))
else:
initial_state = initial_state_proposal.sample(num_particles, seed=seed)
initial_log_weights = (initial_state_prior.log_prob(initial_state) -
initial_state_proposal.log_prob(initial_state))
# Normalize the initial weights. If we used a proposal, the weights are
# normalized in expectation, but actually normalizing them reduces variance.
initial_log_weights = tf.nn.log_softmax(initial_log_weights, axis=0)
# Return particles weighted by the initial observation.
return smc_kernel.WeightedParticles(
particles=initial_state,
log_weights=initial_log_weights + _compute_observation_log_weights(
step=0,
particles=initial_state,
observations=observations,
observation_fn=observation_fn))
def _particle_filter_propose_and_update_log_weights_fn(
observations,
transition_fn,
proposal_fn,
observation_fn,
num_transitions_per_observation=1):
"""Build a function specifying a particle filter update step."""
def propose_and_update_log_weights_fn(step, state, seed=None):
particles, log_weights = state.particles, state.log_weights
transition_dist = transition_fn(step, particles)
assertions = _assert_batch_shape_matches_weights(
distribution=transition_dist,
weights_shape=ps.shape(log_weights),
diststr='transition')
if proposal_fn:
proposal_dist = proposal_fn(step, particles)
assertions += _assert_batch_shape_matches_weights(
distribution=proposal_dist,
weights_shape=ps.shape(log_weights),
diststr='proposal')
proposed_particles = proposal_dist.sample(seed=seed)
log_weights += (transition_dist.log_prob(proposed_particles) -
proposal_dist.log_prob(proposed_particles))
# The normalizing constant E~q[p(x)/q(x)] is 1 in expectation,
# so we reduce variance by dividing it out. Intuitively: the marginal
# likelihood of a model with no observations is constant
# (equal to 1.), so the transition and proposal distributions shouldn't
# affect it.
log_weights = tf.nn.log_softmax(log_weights, axis=0)
else:
proposed_particles = transition_dist.sample(seed=seed)
with tf.control_dependencies(assertions):
return smc_kernel.WeightedParticles(
particles=proposed_particles,
log_weights=log_weights + _compute_observation_log_weights(
step + 1, proposed_particles, observations, observation_fn,
num_transitions_per_observation=num_transitions_per_observation))
return propose_and_update_log_weights_fn
def _compute_observation_log_weights(step,
particles,
observations,
observation_fn,
num_transitions_per_observation=1):
"""Computes particle importance weights from an observation step.
Args:
step: int `Tensor` current step.
particles: Nested structure of `Tensor`s, each of shape
`concat([[num_particles, b1, ..., bN], event_shape])`, where
`b1, ..., bN` are optional batch dimensions and `event_shape` may
differ across `Tensor`s.
observations: Nested structure of `Tensor`s, each of shape
`concat([[num_observations, b1, ..., bN], event_shape])`
where `b1, ..., bN` are optional batch dimensions and `event_shape` may
differ across `Tensor`s.
observation_fn: callable with signature
`observation_dist = observation_fn(step, particles)`, producing
a batch of distributions over the `observation` at the given `step`,
one for each particle.
num_transitions_per_observation: optional int `Tensor` number of times
to apply the transition model between successive observation steps.
Default value: `1`.
Returns:
log_weights: `Tensor` of shape `concat([num_particles, b1, ..., bN])`.
"""
with tf.name_scope('compute_observation_log_weights'):
step_has_observation = (
# The second of these conditions subsumes the first, but both are
# useful because the first can often be evaluated statically.
ps.equal(num_transitions_per_observation, 1) |
ps.equal(step % num_transitions_per_observation, 0))
observation_idx = step // num_transitions_per_observation
observation = tf.nest.map_structure(
lambda x, step=step: tf.gather(x, observation_idx), observations)
log_weights = observation_fn(step, particles).log_prob(observation)
return tf.where(step_has_observation,
log_weights,
tf.zeros_like(log_weights))
def reconstruct_trajectories(particles, parent_indices, name=None):
"""Reconstructs the ancestor trajectory that generated each final particle."""
with tf.name_scope(name or 'reconstruct_trajectories'):
# Walk backwards to compute the ancestor of each final particle at time t.
final_indices = smc_kernel._dummy_indices_like(parent_indices[-1]) # pylint: disable=protected-access
ancestor_indices = tf.scan(
fn=lambda ancestor, parent: mcmc_util.index_remapping_gather( # pylint: disable=g-long-lambda
parent, ancestor, axis=0),
elems=parent_indices[1:],
initializer=final_indices,
reverse=True)
ancestor_indices = tf.concat([ancestor_indices, [final_indices]], axis=0)
return tf.nest.map_structure(
lambda part: mcmc_util.index_remapping_gather( # pylint: disable=g-long-lambda
part, ancestor_indices, axis=1, indices_axis=1),
particles)
def _assert_batch_shape_matches_weights(distribution, weights_shape, diststr):
"""Checks that all parts of a distribution have the expected batch shape."""
shapes = [weights_shape] + tf.nest.flatten(distribution.batch_shape_tensor())
static_shapes = [tf.get_static_value(ps.convert_to_shape_tensor(s))
for s in shapes]
static_shapes_not_none = [s for s in static_shapes if s is not None]
static_shapes_match = all([
np.all(a == b) # Also need to check for rank mismatch (below).
for (a, b) in zip(static_shapes_not_none[1:],
static_shapes_not_none[:-1])])
# Build a separate list of static ranks, since rank is often static even when
# shape is not.
ranks = [ps.rank_from_shape(s) for s in shapes]
static_ranks = [int(r) for r in ranks if not tf.is_tensor(r)]
static_ranks_match = all([a == b for (a, b) in zip(static_ranks[1:],
static_ranks[:-1])])
msg = (
"The {diststr} distribution's batch shape does not match the particle "
"weights; a correct {diststr} distribution must return an independent "
"log-density for each particle. You may be "
"creating a joint distribution in which some parts do not depend on the "
"previous particles, and/or you are creating an autobatched joint "
"distribution without setting `batch_ndims`.".format(
diststr=diststr))
if not (static_ranks_match and static_shapes_match):
raise ValueError(msg + ' ' +
'Weights have shape {}, but the distribution has batch '
'shape {}.'.format(
weights_shape, distribution.batch_shape))
assertions = []
if distribution.validate_args and any([s is None for s in static_shapes]):
assertions = [assert_util.assert_equal(a, b, message=msg)
for a, b in zip(shapes[1:], shapes[:-1])]
return assertions
| 47.039519
| 116
| 0.697666
|
753897d01619da9ccabc579df1bca9c3e88b485b
| 4,284
|
py
|
Python
|
plugin.video.plexodus/resources/lib/modules/playlist.py
|
MR-Unknown-Cm/addons
|
8df1ebe58c95620bb02a05dbae7bf37954915cbd
|
[
"Apache-2.0"
] | 1
|
2020-03-03T10:01:21.000Z
|
2020-03-03T10:01:21.000Z
|
plugin.video.plexodus/resources/lib/modules/playlist.py
|
MR-Unknown-Cm/addons
|
8df1ebe58c95620bb02a05dbae7bf37954915cbd
|
[
"Apache-2.0"
] | null | null | null |
plugin.video.plexodus/resources/lib/modules/playlist.py
|
MR-Unknown-Cm/addons
|
8df1ebe58c95620bb02a05dbae7bf37954915cbd
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
plexOdus Add-on
"""
import json, xbmcgui, xbmc
from resources.lib.modules import control, cleantitle
from resources.lib.extensions import tools
Id = xbmc.PLAYLIST_VIDEO
videoplaylist = 10028
notification = True
refresh = True
def playlistManager(name = None, url = None, meta = None, art = None):
try:
items = []
items += [(control.lang(32065).encode('utf-8'), 'playlistAdd')]
items += [(control.lang(35518).encode('utf-8'), 'playlistRemove')]
items += [(control.lang(35517).encode('utf-8'), 'playlistShow')]
items += [(control.lang(35516).encode('utf-8'), 'playlistClear')]
control.hide()
select = control.selectDialog([i[0] for i in items], heading = control.addonInfo('name') + ' - ' + control.lang(35522).encode('utf-8'))
if select == -1:
return
if select >= 0:
if select == 0:
control.busy()
playlistAdd(name, url, meta, art)
control.hide()
elif select == 1:
control.busy()
playlistRemove(name)
control.hide()
elif select == 2:
control.busy()
playlistShow()
control.hide()
elif select == 3:
control.busy()
playlistClear()
control.hide()
except:
import traceback
traceback.print_exc()
control.hide()
def playlist():
return xbmc.PlayList(Id)
def playlistShow():
if len(playListItems()) > 0:
control.closeAll()
videoplaylistID = 10028
control.execute('ActivateWindow(%d)' % videoplaylistID)
else:
if notification:
control.notification(title = 35522, message = 'Playlist is empty', icon = 'INFO', sound = False)
def playlistClear():
playlist().clear()
if notification:
control.notification(title = 35522, message = 35521, icon = 'INFO', sound = False)
def playListItems():
rpc = '{"jsonrpc": "2.0", "method": "Playlist.GetItems", "params": {"playlistid" : %s}, "id": 1 }' % Id
result = control.jsonrpc(rpc)
limits =json.loads(result)['result']['limits']
total = limits['total']
if int(total) <= 0: return []
result = unicode(result, 'utf-8', errors = 'ignore')
result = json.loads(result)['result']['items']
# xbmc.log('line 87 result = %s' % result, 2)
# label = cleantitle(i['label'])
try: return [i['label'].encode('utf-8') for i in result]
except: return []
def position(label):
try: return playListItems().index(label)
except: return -1
def playlistAdd(name, url, meta, art):
# if not name is None: name.encode('utf-8')
labelPosition = position(label = name)
if labelPosition >= 0:
return control.notification(title = 35522, message = 'Title already in playlist', icon = 'INFO', sound = False)
# if url is None:
# control.queueItem()
# else:
if isinstance(meta, basestring): meta = json.loads(meta)
if isinstance(art, basestring): art = json.loads(art)
xbmc.log('line 106 name = %s' % name, 2)
item = control.item(label=name)
item.setArt(art)
item.setProperty('IsPlayable', 'true')
item.setInfo(type='video', infoLabels=control.metadataClean(meta))
video_streaminfo = {'codec': 'h264'}
item.addStreamInfo('video', video_streaminfo)
cm = []
item.addContextMenuItems(cm)
playlist().add(url=url, listitem=item)
if notification:
control.notification(title = 35522, message = str(name) + ' Added to playlist', icon = 'INFO', sound = False)
def playlistRemove(name):
labelPosition = position(label=name)
if labelPosition >= 0:
rpc = '{"jsonrpc": "2.0", "method": "Playlist.Remove", "params": {"playlistid": %s, "position": %s}, "id": 1 }' % (Id, labelPosition)
control.jsonrpc(rpc)
if notification:
control.notification(title = 35522, message = str(name) + ' Removed from playlist', icon = 'INFO', sound = False)
if labelPosition == -1:
if notification:
control.notification(title = 35522, message = 'Not found in playlist', icon = 'INFO', sound = False)
# control.refresh()
| 31.970149
| 143
| 0.592204
|
d5346075f5f5660543c137e0424570efc730d600
| 4,745
|
py
|
Python
|
bigml/api_handlers/topicdistributionhandler.py
|
pertinkoira/python
|
c486060f7f7c79ef9f48ced567f118ac7aae3f84
|
[
"Apache-2.0"
] | null | null | null |
bigml/api_handlers/topicdistributionhandler.py
|
pertinkoira/python
|
c486060f7f7c79ef9f48ced567f118ac7aae3f84
|
[
"Apache-2.0"
] | 3
|
2022-03-29T17:54:19.000Z
|
2022-03-29T17:54:42.000Z
|
bigml/api_handlers/topicdistributionhandler.py
|
pertinkoira/python
|
c486060f7f7c79ef9f48ced567f118ac7aae3f84
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2016-2022 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for topicdistributions' REST calls
https://bigml.com/api/topic_distributions
"""
try:
import simplejson as json
except ImportError:
import json
from bigml.api_handlers.resourcehandler import ResourceHandlerMixin
from bigml.api_handlers.resourcehandler import check_resource_type, \
get_resource_type, check_resource, get_topic_model_id
from bigml.constants import TOPIC_MODEL_PATH, TOPIC_DISTRIBUTION_PATH, \
IMAGE_FIELDS_FILTER, SPECIFIC_EXCLUDES
class TopicDistributionHandlerMixin(ResourceHandlerMixin):
"""This class is used by the BigML class as
a mixin that provides the REST calls models. It should not
be instantiated independently.
"""
def __init__(self):
"""Initializes the TopicDistributionHandler. This class is intended to
be used as a mixin on ResourceHandler, that inherits its
attributes and basic method from BigMLConnection, and must not be
instantiated independently.
"""
self.topic_distribution_url = self.url + TOPIC_DISTRIBUTION_PATH
def create_topic_distribution(self, topic_model, input_data=None,
args=None, wait_time=3, retries=10):
"""Creates a new topic distribution.
"""
resource_type = get_resource_type(topic_model)
if resource_type != TOPIC_MODEL_PATH:
raise Exception("A topic model resource id is needed"
" to create a prediction. %s found." %
resource_type)
topic_model_id = get_topic_model_id(topic_model)
if topic_model_id is None:
raise Exception("Failed to detect a correct topic model structure"
" in %s." % topic_model)
if isinstance(topic_model, dict) and \
topic_model.get("resource") is not None:
# retrieving fields info from model structure
model_info = topic_model
else:
image_fields_filter = IMAGE_FIELDS_FILTER + "," + \
",".join(SPECIFIC_EXCLUDES[resource_type])
model_info = check_resource(topic_model_id,
query_string=IMAGE_FIELDS_FILTER,
wait_time=wait_time,
retries=retries,
raise_on_error=True,
api=self)
if input_data is None:
input_data = {}
create_args = {}
if args is not None:
create_args.update(args)
create_args.update({
"input_data": self.prepare_image_fields(model_info, input_data),
"topicmodel": topic_model_id})
body = json.dumps(create_args)
return self._create(self.topic_distribution_url, body,
verify=self.verify_prediction)
def get_topic_distribution(self, topic_distribution, query_string=''):
"""Retrieves a topic distribution.
"""
check_resource_type(topic_distribution, TOPIC_DISTRIBUTION_PATH,
message="A topic distribution id is needed.")
return self.get_resource(topic_distribution, query_string=query_string)
def list_topic_distributions(self, query_string=''):
"""Lists all your topic distributions.
"""
return self._list(self.topic_distribution_url, query_string)
def update_topic_distribution(self, topic_distribution, changes):
"""Updates a topic distribution.
"""
check_resource_type(topic_distribution, TOPIC_DISTRIBUTION_PATH,
message="A topic distribution id is needed.")
return self.update_resource(topic_distribution, changes)
def delete_topic_distribution(self, topic_distribution):
"""Deletes a topic distribution.
"""
check_resource_type(topic_distribution, TOPIC_DISTRIBUTION_PATH,
message="A topic distribution id is needed.")
return self.delete_resource(topic_distribution)
| 38.266129
| 79
| 0.6451
|
c8a9d13c5cc17aeb1876cd7ab9891d7c1c89b509
| 70,339
|
py
|
Python
|
packages/python/plotly/plotly/graph_objs/volume/_colorbar.py
|
reservoirinvest/plotly.py
|
73651942b51587086da1eee53f67995f28d68ee3
|
[
"MIT"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
packages/python/plotly/plotly/graph_objs/volume/_colorbar.py
|
reservoirinvest/plotly.py
|
73651942b51587086da1eee53f67995f28d68ee3
|
[
"MIT"
] | 11
|
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
packages/python/plotly/plotly/graph_objs/volume/_colorbar.py
|
reservoirinvest/plotly.py
|
73651942b51587086da1eee53f67995f28d68ee3
|
[
"MIT"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ColorBar(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "volume"
_path_str = "volume.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"len",
"lenmode",
"minexponent",
"nticks",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"titlefont",
"titleside",
"x",
"xanchor",
"xpad",
"y",
"yanchor",
"ypad",
}
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# len
# ---
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
# lenmode
# -------
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
# minexponent
# -----------
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# outlinecolor
# ------------
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
# outlinewidth
# ------------
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# thicknessmode
# -------------
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.volume.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.volume.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for dates
see: https://github.com/d3/d3-time-format#locale_format We add
one item to d3's date formatter: "%{n}f" for fractional seconds
with n digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.volume.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly.graph_objs.volume.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as
layout.template.data.volume.colorbar.tickformatstopdefaults),
sets the default property values to use for elements of
volume.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.volume.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.volume.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for ticktext .
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for tickvals .
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.volume.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
Returns
-------
plotly.graph_objs.volume.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use volume.colorbar.title.font instead. Sets
this color bar's title font. Note that the title's font used to
be set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.volume.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# titleside
# ---------
@property
def titleside(self):
"""
Deprecated: Please use volume.colorbar.title.side instead.
Determines the location of color bar's title with respect to
the color bar. Note that the title's location used to be set by
the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
"""
return self["titleside"]
@titleside.setter
def titleside(self, val):
self["titleside"] = val
# x
# -
@property
def x(self):
"""
Sets the x position of the color bar (in plot fraction).
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xpad
# ----
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
# y
# -
@property
def y(self):
"""
Sets the y position of the color bar (in plot fraction).
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# ypad
# ----
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-time-
format#locale_format We add one item to d3's date
formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.volume.colorbar
.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.volume
.colorbar.tickformatstopdefaults), sets the default
property values to use for elements of
volume.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.volume.colorbar.Title`
instance or dict with compatible properties
titlefont
Deprecated: Please use volume.colorbar.title.font
instead. Sets this color bar's title font. Note that
the title's font used to be set by the now deprecated
`titlefont` attribute.
titleside
Deprecated: Please use volume.colorbar.title.side
instead. Determines the location of color bar's title
with respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
"""
_mapped_properties = {
"titlefont": ("title", "font"),
"titleside": ("title", "side"),
}
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
titleside=None,
x=None,
xanchor=None,
xpad=None,
y=None,
yanchor=None,
ypad=None,
**kwargs
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.volume.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-time-
format#locale_format We add one item to d3's date
formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.volume.colorbar
.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.volume
.colorbar.tickformatstopdefaults), sets the default
property values to use for elements of
volume.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.volume.colorbar.Title`
instance or dict with compatible properties
titlefont
Deprecated: Please use volume.colorbar.title.font
instead. Sets this color bar's title font. Note that
the title's font used to be set by the now deprecated
`titlefont` attribute.
titleside
Deprecated: Please use volume.colorbar.title.side
instead. Determines the location of color bar's title
with respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
ColorBar
"""
super(ColorBar, self).__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.volume.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.volume.ColorBar`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("borderwidth", None)
_v = borderwidth if borderwidth is not None else _v
if _v is not None:
self["borderwidth"] = _v
_v = arg.pop("dtick", None)
_v = dtick if dtick is not None else _v
if _v is not None:
self["dtick"] = _v
_v = arg.pop("exponentformat", None)
_v = exponentformat if exponentformat is not None else _v
if _v is not None:
self["exponentformat"] = _v
_v = arg.pop("len", None)
_v = len if len is not None else _v
if _v is not None:
self["len"] = _v
_v = arg.pop("lenmode", None)
_v = lenmode if lenmode is not None else _v
if _v is not None:
self["lenmode"] = _v
_v = arg.pop("minexponent", None)
_v = minexponent if minexponent is not None else _v
if _v is not None:
self["minexponent"] = _v
_v = arg.pop("nticks", None)
_v = nticks if nticks is not None else _v
if _v is not None:
self["nticks"] = _v
_v = arg.pop("outlinecolor", None)
_v = outlinecolor if outlinecolor is not None else _v
if _v is not None:
self["outlinecolor"] = _v
_v = arg.pop("outlinewidth", None)
_v = outlinewidth if outlinewidth is not None else _v
if _v is not None:
self["outlinewidth"] = _v
_v = arg.pop("separatethousands", None)
_v = separatethousands if separatethousands is not None else _v
if _v is not None:
self["separatethousands"] = _v
_v = arg.pop("showexponent", None)
_v = showexponent if showexponent is not None else _v
if _v is not None:
self["showexponent"] = _v
_v = arg.pop("showticklabels", None)
_v = showticklabels if showticklabels is not None else _v
if _v is not None:
self["showticklabels"] = _v
_v = arg.pop("showtickprefix", None)
_v = showtickprefix if showtickprefix is not None else _v
if _v is not None:
self["showtickprefix"] = _v
_v = arg.pop("showticksuffix", None)
_v = showticksuffix if showticksuffix is not None else _v
if _v is not None:
self["showticksuffix"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("thicknessmode", None)
_v = thicknessmode if thicknessmode is not None else _v
if _v is not None:
self["thicknessmode"] = _v
_v = arg.pop("tick0", None)
_v = tick0 if tick0 is not None else _v
if _v is not None:
self["tick0"] = _v
_v = arg.pop("tickangle", None)
_v = tickangle if tickangle is not None else _v
if _v is not None:
self["tickangle"] = _v
_v = arg.pop("tickcolor", None)
_v = tickcolor if tickcolor is not None else _v
if _v is not None:
self["tickcolor"] = _v
_v = arg.pop("tickfont", None)
_v = tickfont if tickfont is not None else _v
if _v is not None:
self["tickfont"] = _v
_v = arg.pop("tickformat", None)
_v = tickformat if tickformat is not None else _v
if _v is not None:
self["tickformat"] = _v
_v = arg.pop("tickformatstops", None)
_v = tickformatstops if tickformatstops is not None else _v
if _v is not None:
self["tickformatstops"] = _v
_v = arg.pop("tickformatstopdefaults", None)
_v = tickformatstopdefaults if tickformatstopdefaults is not None else _v
if _v is not None:
self["tickformatstopdefaults"] = _v
_v = arg.pop("ticklen", None)
_v = ticklen if ticklen is not None else _v
if _v is not None:
self["ticklen"] = _v
_v = arg.pop("tickmode", None)
_v = tickmode if tickmode is not None else _v
if _v is not None:
self["tickmode"] = _v
_v = arg.pop("tickprefix", None)
_v = tickprefix if tickprefix is not None else _v
if _v is not None:
self["tickprefix"] = _v
_v = arg.pop("ticks", None)
_v = ticks if ticks is not None else _v
if _v is not None:
self["ticks"] = _v
_v = arg.pop("ticksuffix", None)
_v = ticksuffix if ticksuffix is not None else _v
if _v is not None:
self["ticksuffix"] = _v
_v = arg.pop("ticktext", None)
_v = ticktext if ticktext is not None else _v
if _v is not None:
self["ticktext"] = _v
_v = arg.pop("ticktextsrc", None)
_v = ticktextsrc if ticktextsrc is not None else _v
if _v is not None:
self["ticktextsrc"] = _v
_v = arg.pop("tickvals", None)
_v = tickvals if tickvals is not None else _v
if _v is not None:
self["tickvals"] = _v
_v = arg.pop("tickvalssrc", None)
_v = tickvalssrc if tickvalssrc is not None else _v
if _v is not None:
self["tickvalssrc"] = _v
_v = arg.pop("tickwidth", None)
_v = tickwidth if tickwidth is not None else _v
if _v is not None:
self["tickwidth"] = _v
_v = arg.pop("title", None)
_v = title if title is not None else _v
if _v is not None:
self["title"] = _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("titleside", None)
_v = titleside if titleside is not None else _v
if _v is not None:
self["titleside"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xanchor", None)
_v = xanchor if xanchor is not None else _v
if _v is not None:
self["xanchor"] = _v
_v = arg.pop("xpad", None)
_v = xpad if xpad is not None else _v
if _v is not None:
self["xpad"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("yanchor", None)
_v = yanchor if yanchor is not None else _v
if _v is not None:
self["yanchor"] = _v
_v = arg.pop("ypad", None)
_v = ypad if ypad is not None else _v
if _v is not None:
self["ypad"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 35.632725
| 92
| 0.556604
|
bd5fa11c7fdd37a3aed65e592b6886abeeaf73b2
| 11,444
|
py
|
Python
|
flink-ai-flow/examples/python_examples/sklearn_batch_train_stream_predict/python_codes/batch_train_stream_predict_executor.py
|
shanshanpt/flink-ai-extended
|
c9f4a980ac229188a2bc09558952f7e0085bda70
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2021-06-03T05:37:21.000Z
|
2021-06-03T05:37:21.000Z
|
flink-ai-flow/examples/python_examples/sklearn_batch_train_stream_predict/python_codes/batch_train_stream_predict_executor.py
|
sentimentist/flink-ai-extended
|
689d000f2db8919fd80e0725a1609918ca4a26f4
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
flink-ai-flow/examples/python_examples/sklearn_batch_train_stream_predict/python_codes/batch_train_stream_predict_executor.py
|
sentimentist/flink-ai-extended
|
689d000f2db8919fd80e0725a1609918ca4a26f4
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import shutil
import threading
import time
from typing import List
import numpy as np
from joblib import dump, load
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.utils import check_random_state
import ai_flow as af
from streamz import Stream
from ai_flow.model_center.entity.model_version_stage import ModelVersionStage
from python_ai_flow import FunctionContext, Executor, ExampleExecutor
from ai_flow.common.path_util import get_file_dir
from notification_service.base_notification import EventWatcher
def preprocess_data(x_data, y_data=None):
random_state = check_random_state(0)
permutation = random_state.permutation(x_data.shape[0])
if y_data is None:
return x_data[permutation]
else:
return x_data[permutation], y_data[permutation]
class ExampleReader(ExampleExecutor):
def execute(self, function_context: FunctionContext, input_list: List) -> List:
with np.load(function_context.node_spec.example_meta.batch_uri) as f:
x_train, y_train = f['x_train'], f['y_train']
f.close()
return [[x_train, y_train]]
class ExampleTransformer(Executor):
def execute(self, function_context: FunctionContext, input_list: List) -> List:
x_train, y_train = preprocess_data(input_list[0][0], input_list[0][1])
x_train = x_train.reshape((x_train.shape[0], -1))
res = [[StandardScaler().fit_transform(x_train), y_train]]
return res
class ModelTrainer(Executor):
def execute(self, function_context: FunctionContext, input_list: List) -> List:
# https://scikit-learn.org/stable/auto_examples/linear_model/plot_sparse_logistic_regression_mnist.html
clf = LogisticRegression(C=50. / 5000, penalty='l1', solver='saga', tol=1)
x_train, y_train = input_list[0][0], input_list[0][1]
clf.fit(x_train, y_train)
model_path = get_file_dir(__file__) + '/saved_model'
if not os.path.exists(model_path):
os.makedirs(model_path)
model_timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
model_path = model_path + '/' + model_timestamp
dump(clf, model_path)
af.register_model_version(model=function_context.node_spec.output_model, model_path=model_path)
print("Done for {}".format(self.__class__.__name__))
return []
class ValidateExampleReader(ExampleExecutor):
def execute(self, function_context: FunctionContext, input_list: List) -> List:
with np.load(function_context.node_spec.example_meta.batch_uri) as f:
x_test, y_test = f['x_test'], f['y_test']
return [[x_test, y_test]]
class ValidateTransformer(Executor):
def execute(self, function_context: FunctionContext, input_list: List) -> List:
x_test, y_test = preprocess_data(input_list[0][0], input_list[0][1])
x_test = x_test.reshape((x_test.shape[0], -1))
return [[StandardScaler().fit_transform(x_test), y_test]]
class ModelValidator(Executor):
def __init__(self, artifact_name):
super().__init__()
self.artifact_name = artifact_name
self.model_name = None
self.model_path = None
self.model_version = None
self.model_meta = None
def setup(self, function_context: FunctionContext):
self.model_name = function_context.node_spec.model.name
self.model_meta = af.get_latest_generated_model_version(self.model_name)
self.model_path = self.model_meta.model_path
self.model_version = self.model_meta.version
def execute(self, function_context: FunctionContext, input_list: List) -> List:
deployed_model_version = af.get_deployed_model_version(model_name=self.model_name)
x_validate, y_validate = input_list[0][0], input_list[0][1]
clf = load(self.model_path)
scores = cross_val_score(clf, x_validate, y_validate, scoring='precision_macro')
batch_uri = af.get_artifact_by_name(self.artifact_name).batch_uri
if deployed_model_version is None:
with open(batch_uri, 'a') as f:
f.write('generated model version[{}] scores: {}\n'.format(self.model_version, np.mean(scores)))
af.update_model_version(model_name=self.model_name,
model_version=self.model_version,
current_stage=ModelVersionStage.VALIDATED)
else:
deployed_clf = load(deployed_model_version.model_path)
deployed_scores = cross_val_score(deployed_clf, x_validate, y_validate, scoring='precision_macro')
f = open(batch_uri, 'a')
f.write('current model version[{}] scores: {}\n'.format(deployed_model_version.version,
np.mean(deployed_scores)))
f.write('new generated model version[{}] scores: {}\n'.format(self.model_version, np.mean(scores)))
if np.mean(scores) > np.mean(deployed_scores):
# Make latest generated model to be validated
af.update_model_version(model_name=self.model_name,
model_version=self.model_version,
current_stage=ModelVersionStage.VALIDATED)
f.write('new generated model version[{}] pass validation.\n'.format(self.model_version))
else:
f.write('new generated model version[{}] fail validation.\n'.format(self.model_version))
f.close()
return []
class ModelPusher(Executor):
def __init__(self, artifact):
super().__init__()
self.artifact = artifact
def execute(self, function_context: FunctionContext, input_list: List) -> List:
model_name = function_context.node_spec.model.name
validated_model = af.get_latest_validated_model_version(model_name)
# Deprecate deployed model
deployed_model_version = af.get_deployed_model_version(model_name)
if deployed_model_version is not None:
af.update_model_version(model_name=model_name,
model_version=deployed_model_version.version,
current_stage=ModelVersionStage.DEPRECATED)
af.update_model_version(model_name=model_name,
model_version=validated_model.version,
current_stage=ModelVersionStage.DEPLOYED)
af.send_event(key='START_PREDICTION', value=validated_model.version)
print(validated_model.version)
# Copy deployed model to deploy_model_dir
deployed_model_dir = af.get_artifact_by_name(self.artifact).batch_uri
if not os.path.exists(deployed_model_dir):
os.makedirs(deployed_model_dir)
for file in os.listdir(deployed_model_dir):
file_path = os.path.join(deployed_model_dir, file)
if os.path.isfile(file_path):
os.remove(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path, True)
deployed_model_version = af.get_deployed_model_version(model_name)
shutil.copy(deployed_model_version.model_path, deployed_model_dir)
return []
class ExamplePredictThread(threading.Thread):
def __init__(self, stream_uri):
super().__init__()
self.stream_uri = stream_uri
self.stream = Stream()
def run(self) -> None:
for i in range(0, 8):
with np.load(self.stream_uri) as f:
x_test = f['x_test']
self.stream.emit(x_test)
print("### {} {}".format(self.__class__.__name__, "generate data flow"))
time.sleep(50)
class PredictExampleReader(ExampleExecutor):
def __init__(self):
super().__init__()
self.thread = None
def setup(self, function_context: FunctionContext):
stream_uri = function_context.node_spec.example_meta.stream_uri
self.thread = ExamplePredictThread(stream_uri)
self.thread.start()
def execute(self, function_context: FunctionContext, input_list: List) -> List:
return [self.thread.stream]
class PredictTransformer(Executor):
def execute(self, function_context: FunctionContext, input_list: List) -> List:
def transform(df):
x_test = preprocess_data(df, None)
x_test = x_test.reshape((x_test.shape[0], -1))
return StandardScaler().fit_transform(x_test)
return [input_list[0].map(transform)]
class PredictWatcher(EventWatcher):
def __init__(self):
super().__init__()
self.model_version = None
def process(self, notifications):
for notification in notifications:
self.model_version = notification.value
class ModelPredictor(Executor):
def __init__(self):
super().__init__()
self.model_name = None
self.model_version = None
self.watcher = PredictWatcher()
def setup(self, function_context: FunctionContext):
# In this class, we show the usage of start_listen_event method which make it possible to send various events.
# Users can also refer `stream train stream predict` example to directly use provided API to get model version.
af.start_listen_event(key='START_PREDICTION', watcher=self.watcher)
self.model_name = function_context.node_spec.model.name
print("### {} setup done for {}".format(self.__class__.__name__, function_context.node_spec.model.name))
def execute(self, function_context: FunctionContext, input_list: List) -> List:
while self.watcher.model_version is None:
time.sleep(2)
print("### {} ".format(self.watcher.model_version))
def predict(df):
x_test = df
model_meta = af.get_deployed_model_version(self.model_name)
model_path = model_meta.model_path
clf = load(model_path)
return model_meta.version, clf.predict(x_test)
return [input_list[0].map(predict)]
class ExampleWriter(ExampleExecutor):
def execute(self, function_context: FunctionContext, input_list: List) -> List:
def write_example(df):
with open(function_context.node_spec.example_meta.stream_uri, 'a') as f:
f.write('model version[{}] predict: {}\n'.format(df[0], df[1]))
return df
def sink(df):
pass
input_list[0].map(write_example).sink(sink)
return []
| 40.871429
| 119
| 0.673366
|
c032bf2984693db1ee8a5f1362510868626f63fd
| 265
|
py
|
Python
|
custom_user/serializers.py
|
manoj-makkuboy/feed-ninja
|
339bac6f24f9bf968e5cd0d83eb71e36d7c525cd
|
[
"MIT"
] | null | null | null |
custom_user/serializers.py
|
manoj-makkuboy/feed-ninja
|
339bac6f24f9bf968e5cd0d83eb71e36d7c525cd
|
[
"MIT"
] | null | null | null |
custom_user/serializers.py
|
manoj-makkuboy/feed-ninja
|
339bac6f24f9bf968e5cd0d83eb71e36d7c525cd
|
[
"MIT"
] | null | null | null |
# from django.contrib.auth.models import User
from custom_user.models import CustomUser
from rest_framework.serializers import ModelSerializer
class CustomUserSerializer(ModelSerializer):
class Meta:
model = CustomUser
fields = ('location',)
| 24.090909
| 54
| 0.762264
|
ee24fc69b8ef9a82a9723220f471d7146cceaedd
| 9,270
|
py
|
Python
|
maml/datasets.py
|
nmi-lab/pytorch-maml
|
86f09531ac1618619be97acebbedaca14a3d09ac
|
[
"MIT"
] | null | null | null |
maml/datasets.py
|
nmi-lab/pytorch-maml
|
86f09531ac1618619be97acebbedaca14a3d09ac
|
[
"MIT"
] | null | null | null |
maml/datasets.py
|
nmi-lab/pytorch-maml
|
86f09531ac1618619be97acebbedaca14a3d09ac
|
[
"MIT"
] | null | null | null |
import torch.nn.functional as F
from collections import namedtuple
from torchmeta.datasets import Omniglot, MiniImagenet
from torchmeta.toy import Sinusoid
from torchmeta.transforms import ClassSplitter, Categorical, Rotation
from torchvision.transforms import ToTensor, Resize, Compose
from maml.model import ModelConvOmniglot, ModelConvMiniImagenet, ModelMLPSinusoid, ModelConvDoubleNMNIST, ModelDECOLLE
from maml.utils import ToTensor1D
from torchmeta.utils.data import CombinationMetaDataset
Benchmark = namedtuple('Benchmark', 'meta_train_dataset meta_val_dataset '
'meta_test_dataset model loss_function')
def get_benchmark_by_name(name,
folder,
num_ways,
num_shots,
num_shots_test,
hidden_size=None):
dataset_transform = ClassSplitter(shuffle=True,
num_train_per_class=num_shots,
num_test_per_class=num_shots_test)
if name == 'sinusoid':
transform = ToTensor1D()
meta_train_dataset = Sinusoid(num_shots + num_shots_test,
num_tasks=1000000,
transform=transform,
target_transform=transform,
dataset_transform=dataset_transform)
meta_val_dataset = Sinusoid(num_shots + num_shots_test,
num_tasks=1000000,
transform=transform,
target_transform=transform,
dataset_transform=dataset_transform)
meta_test_dataset = Sinusoid(num_shots + num_shots_test,
num_tasks=1000000,
transform=transform,
target_transform=transform,
dataset_transform=dataset_transform)
model = ModelMLPSinusoid(hidden_sizes=[40, 40])
loss_function = F.mse_loss
elif name == 'omniglot':
class_augmentations = [Rotation([90, 180, 270])]
transform = Compose([Resize(28), ToTensor()])
meta_train_dataset = Omniglot(folder,
transform=transform,
target_transform=Categorical(num_ways),
num_classes_per_task=num_ways,
meta_train=True,
class_augmentations=class_augmentations,
dataset_transform=dataset_transform,
download=True)
meta_val_dataset = Omniglot(folder,
transform=transform,
target_transform=Categorical(num_ways),
num_classes_per_task=num_ways,
meta_val=True,
class_augmentations=class_augmentations,
dataset_transform=dataset_transform)
meta_test_dataset = Omniglot(folder,
transform=transform,
target_transform=Categorical(num_ways),
num_classes_per_task=num_ways,
meta_test=True,
dataset_transform=dataset_transform)
model = ModelConvOmniglot(num_ways, hidden_size=hidden_size)
loss_function = F.cross_entropy
elif name == 'miniimagenet':
transform = Compose([Resize(84), ToTensor()])
meta_train_dataset = MiniImagenet(folder,
transform=transform,
target_transform=Categorical(num_ways),
num_classes_per_task=num_ways,
meta_train=True,
dataset_transform=dataset_transform,
download=True)
meta_val_dataset = MiniImagenet(folder,
transform=transform,
target_transform=Categorical(num_ways),
num_classes_per_task=num_ways,
meta_val=True,
dataset_transform=dataset_transform)
meta_test_dataset = MiniImagenet(folder,
transform=transform,
target_transform=Categorical(num_ways),
num_classes_per_task=num_ways,
meta_test=True,
dataset_transform=dataset_transform)
model = ModelConvMiniImagenet(num_ways, hidden_size=hidden_size)
loss_function = F.cross_entropy
elif name == 'doublenmnist':
from torchneuromorphic.doublenmnist_torchmeta.doublenmnist_dataloaders import DoubleNMNIST,Compose,ClassNMNISTDataset,CropDims,Downsample,ToCountFrame,ToTensor,ToEventSum,Repeat,toOneHot
from torchneuromorphic.utils import plot_frames_imshow
from matplotlib import pyplot as plt
from torchmeta.utils.data import CombinationMetaDataset
root = 'data/nmnist/n_mnist.hdf5'
chunk_size = 300
ds = 2
dt = 1000
transform = None
target_transform = None
size = [2, 32//ds, 32//ds]
transform = Compose([
CropDims(low_crop=[0,0], high_crop=[32,32], dims=[2,3]),
Downsample(factor=[dt,1,ds,ds]),
ToEventSum(T = chunk_size, size = size),
ToTensor()])
if target_transform is None:
target_transform = Compose([Repeat(chunk_size), toOneHot(num_ways)])
loss_function = F.cross_entropy
meta_train_dataset = ClassSplitter(DoubleNMNIST(root = root, meta_train=True, transform = transform, target_transform = target_transform, chunk_size=chunk_size, num_classes_per_task=num_ways), num_train_per_class = num_shots, num_test_per_class = num_shots_test)
meta_val_dataset = ClassSplitter(DoubleNMNIST(root = root, meta_val=True, transform = transform, target_transform = target_transform, chunk_size=chunk_size, num_classes_per_task=num_ways), num_train_per_class = num_shots, num_test_per_class = num_shots_test)
meta_test_dataset = ClassSplitter(DoubleNMNIST(root = root, meta_test=True, transform = transform, target_transform = target_transform, chunk_size=chunk_size, num_classes_per_task=num_ways), num_train_per_class = num_shots, num_test_per_class = num_shots_test)
model = ModelConvDoubleNMNIST(num_ways, hidden_size=hidden_size)
elif name == 'doublenmnistsequence':
from torchneuromorphic.doublenmnist_torchmeta.doublenmnist_dataloaders import DoubleNMNIST,Compose,ClassNMNISTDataset,CropDims,Downsample,ToCountFrame,ToTensor,ToEventSum,Repeat,toOneHot
from torchneuromorphic.utils import plot_frames_imshow
from matplotlib import pyplot as plt
from torchmeta.utils.data import CombinationMetaDataset
root = 'data/nmnist/n_mnist.hdf5'
chunk_size = 300
ds = 2
dt = 1000
transform = None
target_transform = None
size = [2, 32//ds, 32//ds]
transform = Compose([
CropDims(low_crop=[0,0], high_crop=[32,32], dims=[2,3]),
Downsample(factor=[dt,1,ds,ds]),
ToCountFrame(T = chunk_size, size = size),
ToTensor()])
if target_transform is None:
target_transform = Compose([Repeat(chunk_size), toOneHot(num_ways)])
loss_function = F.cross_entropy
meta_train_dataset = ClassSplitter(DoubleNMNIST(root = root, meta_train=True, transform = transform, target_transform = target_transform, chunk_size=chunk_size, num_classes_per_task=num_ways), num_train_per_class = num_shots, num_test_per_class = num_shots_test)
meta_val_dataset = ClassSplitter(DoubleNMNIST(root = root, meta_val=True, transform = transform, target_transform = target_transform, chunk_size=chunk_size, num_classes_per_task=num_ways), num_train_per_class = num_shots, num_test_per_class = num_shots_test)
meta_test_dataset = ClassSplitter(DoubleNMNIST(root = root, meta_test=True, transform = transform, target_transform = target_transform, chunk_size=chunk_size, num_classes_per_task=num_ways), num_train_per_class = num_shots, num_test_per_class = num_shots_test)
model = ModelDECOLLE(num_ways)
else:
raise NotImplementedError('Unknown dataset `{0}`.'.format(name))
return Benchmark(meta_train_dataset=meta_train_dataset,
meta_val_dataset=meta_val_dataset,
meta_test_dataset=meta_test_dataset,
model=model,
loss_function=loss_function)
| 52.372881
| 271
| 0.591262
|
baccb6816ec40cfef7ee90ba3d603e581eadb797
| 86
|
py
|
Python
|
src/pyaid/tests/__init__.py
|
jddecarlo/pyaid
|
c9ff224ece4fd890be2355991309179ee8a6a5bd
|
[
"MIT"
] | null | null | null |
src/pyaid/tests/__init__.py
|
jddecarlo/pyaid
|
c9ff224ece4fd890be2355991309179ee8a6a5bd
|
[
"MIT"
] | null | null | null |
src/pyaid/tests/__init__.py
|
jddecarlo/pyaid
|
c9ff224ece4fd890be2355991309179ee8a6a5bd
|
[
"MIT"
] | null | null | null |
"""Initializes the pyaid.tests module. Add any necessary initialization code here."""
| 43
| 85
| 0.77907
|
45844f2688cd673b9ad019a8de32eb1bcf42d246
| 4,087
|
py
|
Python
|
param_teller/parameter_store.py
|
chanzuckerberg/param-teller
|
6c69bd619a80fbb1a1141be679a80c2c7db258b1
|
[
"MIT"
] | null | null | null |
param_teller/parameter_store.py
|
chanzuckerberg/param-teller
|
6c69bd619a80fbb1a1141be679a80c2c7db258b1
|
[
"MIT"
] | null | null | null |
param_teller/parameter_store.py
|
chanzuckerberg/param-teller
|
6c69bd619a80fbb1a1141be679a80c2c7db258b1
|
[
"MIT"
] | null | null | null |
import botocore
from boto3 import client
class ParameterStore(object):
"""
Retrieves parameters from AWS Parameter Store.
"""
def __init__(self, ssm_client=None, with_decryption=True):
# type: (botocore.client.SSM, bool) -> None
"""
Initialize new parameter store client.
:param ssm_client: Optional client provided by user. By default, it creates a new client using the default
session.
:param with_decryption: If true, parameter store will decrypt values.
"""
self._ssm_client = ssm_client or client('ssm')
self._with_decryption = with_decryption
def get_value(self, key):
# type: (str) -> str
"""
Retrieve single parameter from store.
:param key:
:return:
"""
response = self._ssm_client.get_parameter(
Name=key,
WithDecryption=self._with_decryption
)
return response.get('Parameter', {}).get('Value')
def get_values_by_path(self, path):
# type: (str) -> dict
"""
Retrieve all parameter values in a user provider path.
:param path: Path where the parameters are store.
:return: Dictionary of parameter values indexed by parameter key.
"""
# In AWS, a leading path is not required, i.e. "/param" and "param" match the same key and are not unique
# However, boto3 and moto (corresponding mocking library) require a leading path:
# http://boto3.readthedocs.io/en/latest/reference/services/ssm.html#SSM.Client.get_parameters_by_path
# Therefore we are enforcing the leading '/'
path = path.strip()
if path and not path.startswith("/"):
path = "/{path}".format(path=path)
values = {}
extra_args = {}
while True:
response = self._ssm_client.get_parameters_by_path(
Path=path,
WithDecryption=self._with_decryption,
**extra_args
)
values.update({param['Name']: param['Value'] for param in response['Parameters']})
next_token = response.get('NextToken')
if not next_token:
break
extra_args['NextToken'] = next_token
return values
def get_values(self, *keys):
# type: (str) -> dict
"""
Retrieve parameter values by key names.
:param keys: keys to retrieve.
:return: Dictionary of parameter values indexed by parameter key (includes only found keys).
"""
if not keys:
return {}
values = {}
extra_args = {}
while True:
response = self._ssm_client.get_parameters(
Names=keys,
WithDecryption=self._with_decryption,
**extra_args
)
values.update({param['Name']: param['Value'] for param in response['Parameters']})
next_token = response.get('NextToken')
if not next_token:
break
extra_args['NextToken'] = next_token
return values
def get_values_by_prefix(self, prefix=''):
# type: (str) -> dict
"""
Retrieve all parameter values for keys that start with given prefix.
:param prefix: Key name prefix.
:return: Dictionary of parameter values indexed by parameter key.
"""
keys = []
filters = [{'Key': 'Name', 'Values': ['{prefix}'.format(prefix=prefix)]}] if prefix else []
extra_args = {}
while True:
response = self._ssm_client.describe_parameters(
Filters=filters,
**extra_args
)
keys += [param.get('Name') for param in response.get('Parameters', [])]
next_token = response.get('NextToken')
if not next_token:
break
extra_args['NextToken'] = next_token
values = {}
if keys:
values = self.get_values(*keys)
return values
| 31.198473
| 114
| 0.569366
|
bf8d31d8575f5619aa685620ad89c32e5c52fca3
| 5,857
|
py
|
Python
|
nomen-fileren.py
|
urbanware-org/nomen
|
61febef80fe8ebc80e9a4b407e92510c64d58649
|
[
"MIT"
] | null | null | null |
nomen-fileren.py
|
urbanware-org/nomen
|
61febef80fe8ebc80e9a4b407e92510c64d58649
|
[
"MIT"
] | 1
|
2020-09-02T19:16:39.000Z
|
2020-09-02T19:16:39.000Z
|
nomen-fileren.py
|
urbanware-org/nomen
|
61febef80fe8ebc80e9a4b407e92510c64d58649
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ============================================================================
# Nomen - Multi-purpose rename tool
# File Renamer script
# Copyright (C) 2018 by Ralf Kilian
# Distributed under the MIT License (https://opensource.org/licenses/MIT)
#
# GitHub: https://github.com/urbanware-org/nomen
# GitLab: https://gitlab.com/urbanware-org/nomen
# ============================================================================
import os
import sys
def main():
from core import clap
from core import common
from core import fileren
try:
p = clap.Parser()
except Exception as e:
print("%s: error: %s" % (os.path.basename(sys.argv[0]), e))
sys.exit(1)
p.set_description("Rename the base name of files within a directory "
"and (if requested) in all of its sub-directories "
"based on the name of the directory where the files "
"are stored in and add a unique numeric ID.")
p.set_epilog("Further information and usage examples can be found "
"inside the documentation file for this script.")
# Required arguments
p.add_avalue("-d", "--directory", "directory that contains the files "
"to process", "directory", None, True)
p.add_predef("-m", "--rename-mode", "rename mode to use", "rename_mode",
["fill-gaps", "keep-order", "rename-new"], True)
# Optional arguments
p.add_switch("-c", "--case-sensitive", "do not ignore the case of the "
"given exclude or explicit pattern", "case", False, False)
p.add_switch(None, "--confirm", "skip the confirmation prompt and "
"instantly rename files", "confirm", True, False)
p.add_avalue(None, "--custom-name", "custom file name (instead of the "
"directory name where the files are stored in)",
"custom_name", None, False)
p.add_avalue(None, "--exclude", "pattern to exclude certain files "
"(case-insensitive, multiple patterns separated via "
"semicolon)", "exclude_pattern", None, False)
p.add_avalue(None, "--explicit", "explicit pattern to only process "
"certain files (case-insensitive, multiple patterns "
"separated via semicolon)", "explicit_pattern", None, False)
p.add_switch("-h", "--help", "print this help message and exit", None,
True, False)
p.add_switch(None, "--ignore-file-ext", "ignore file extensions when "
"numerating files", "ignore_file_ext", True, False)
p.add_switch(None, "--ignore-symlinks", "ignore symbolic links",
"ignore_symlinks", True, False)
p.add_predef("-o", "--order-by", "order files by last accessed, "
"created or modified date", "order_by",
["accessed", "created", "modified"], False)
p.add_avalue("-p", "--padding", "set a user-defined numeric padding "
"(if no user-defined padding value is given, it will be "
"set automatically based on the amount of files per "
"directory)", "padding", 0, False)
p.add_switch("-r", "--recursive", "process the given directory "
"recursively", "recursive", True, False)
p.add_switch(None, "--regex", "use regex syntax for the exclude or "
"explicit pattern instead of just asterisk wildcards and "
"semicolon separators (for details see the section "
"'Regular expression operations' inside the official "
"Python documentation)", "regex_syntax", True, False)
p.add_avalue("-s", "--separator", "use a user-defined character or "
"string as a separator between the directory name and the "
"unique numeric ID", "separator", " ", False)
p.add_avalue(None, "--simulate", "simulate the rename process and "
"write the details into a report file", "report_file", None,
False)
p.add_avalue(None, "--step", "steps between each numeric ID", "step", 1,
False)
p.add_switch(None, "--version", "print the version number and exit", None,
True, False)
if len(sys.argv) == 1:
p.error("At least one required argument is missing.")
elif ("-h" in sys.argv) or ("--help" in sys.argv):
p.print_help()
sys.exit(0)
elif "--version" in sys.argv:
print(fileren.get_version())
sys.exit(0)
args = p.parse_args()
if args.confirm and args.report_file is not None:
p.error("The confirm and the simulate argument cannot be given at "
"the same time.")
if args.exclude_pattern and args.explicit_pattern:
p.error("The exclude and the explicit pattern argument cannot be "
"given at the same time.")
try:
if not args.confirm and args.report_file is None:
if not common.confirm_notice():
sys.exit(0)
if args.exclude_pattern:
pattern = args.exclude_pattern
exclude = True
elif args.explicit_pattern:
exclude = False
pattern = args.explicit_pattern
else:
exclude = None
pattern = None
fileren.rename_files(args.directory, args.rename_mode, args.separator,
args.recursive, args.padding, exclude, pattern,
args.case, args.regex_syntax, args.report_file,
args.ignore_symlinks, args.ignore_file_ext,
args.custom_name, args.step, args.order_by)
except Exception as e:
p.error(e)
if __name__ == "__main__":
main()
# EOF
| 44.371212
| 78
| 0.574868
|
da463d1b889b5ee9690553465f915ed8c0ef8d2c
| 1,166
|
py
|
Python
|
usbhid_dump.py
|
dale-taylor/py-usbhid-dump-stream
|
bfaf4f64d6fc0ecc8d18aa2549e43dc4e6b9f184
|
[
"MIT"
] | null | null | null |
usbhid_dump.py
|
dale-taylor/py-usbhid-dump-stream
|
bfaf4f64d6fc0ecc8d18aa2549e43dc4e6b9f184
|
[
"MIT"
] | null | null | null |
usbhid_dump.py
|
dale-taylor/py-usbhid-dump-stream
|
bfaf4f64d6fc0ecc8d18aa2549e43dc4e6b9f184
|
[
"MIT"
] | null | null | null |
import subprocess
import re
import shlex
def dump_stream(bus, devices, timeout, callback):
if len(devices) == 1:
dev = devices[0]
else:
dev = 0
cmd = "usbhid-dump -a {}:{} -es -t {}".format(bus, dev, timeout)
process = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
last_device_id = None
last_rep_id = None
last_timestamp = None
while True:
output = process.stdout.readline()
if output == '' and process.poll() is not None:
break
if output:
# Check if line is a device ID and timestamp line
m = re.search('\d{3}:(\d{3}):(\d{3}):STREAM\s+([0-9\.]+)', str(output).strip())
if m is not None:
last_device_id = m.group(1)
last_rep_id = m.group(2)
last_timestamp = m.group(3)
continue
# Check if line is a report payload
m = re.search('((?:[0-9A-Fa-f][0-9A-Fa-f]\s?){1,8})', str(output).strip())
if m is not None:
data = bytes.fromhex(m.group(1).replace(" ", ""))
# Call callback function if device was included in list
if last_device_id in devices:
callback(last_device_id, last_rep_id, last_timestamp, data)
rc = process.poll()
return rc
| 25.347826
| 96
| 0.64837
|
4e87358ea1aca3353a3a695ce5c21f61e2ba45c4
| 4,006
|
py
|
Python
|
clinicadl/clinicadl/tools/tsv/tsv_utils.py
|
yogeshmj/AD-DL
|
76b9b564061581effe8f3698992bfea3ffb055fa
|
[
"MIT"
] | 112
|
2019-10-21T14:50:35.000Z
|
2022-03-29T03:15:47.000Z
|
clinicadl/clinicadl/tools/tsv/tsv_utils.py
|
yogeshmj/AD-DL
|
76b9b564061581effe8f3698992bfea3ffb055fa
|
[
"MIT"
] | 136
|
2019-10-17T17:40:55.000Z
|
2021-06-30T14:53:29.000Z
|
clinicadl/clinicadl/tools/tsv/tsv_utils.py
|
yogeshmj/AD-DL
|
76b9b564061581effe8f3698992bfea3ffb055fa
|
[
"MIT"
] | 49
|
2019-11-26T13:57:52.000Z
|
2022-03-20T13:17:42.000Z
|
# coding: utf8
from copy import copy
import numpy as np
import pandas as pd
def neighbour_session(session, session_list, neighbour):
if session not in session_list:
temp_list = session_list + [session]
temp_list.sort()
else:
temp_list = copy(session_list)
temp_list.sort()
index_session = temp_list.index(session)
if index_session + neighbour < 0 or index_session + neighbour >= len(temp_list):
return None
else:
if temp_list[index_session + neighbour] < 10:
return 'ses-M0' + str(temp_list[index_session + neighbour])
else:
return 'ses-M' + str(temp_list[index_session + neighbour])
def after_end_screening(session, session_list):
if session in session_list:
return False
else:
temp_list = session_list + [session]
temp_list.sort()
index_session = temp_list.index(session)
return index_session == len(temp_list) - 1
def last_session(session_list):
temp_list = copy(session_list)
temp_list.sort()
if temp_list[-1] < 10:
return 'ses-M0' + str(temp_list[-1])
else:
return 'ses-M' + str(temp_list[-1])
def complementary_list(total_list, sub_list):
result_list = []
for element in total_list:
if element not in sub_list:
result_list.append(element)
return result_list
def first_session(subject_df):
session_list = [int(session[5:]) for _, session in subject_df.index.values]
session_list.sort()
first_session = session_list[0]
if first_session < 10:
return 'ses-M0' + str(first_session)
else:
return 'ses-M' + str(first_session)
def next_session(subject_df, session_orig):
session_list = [int(session[5:]) for _, session in subject_df.index.values]
session_list.sort()
session_id_list = []
for session in session_list:
if session < 10:
session_id_list.append('ses-M0' + str(session))
else:
session_id_list.append('ses-M' + str(session))
index = session_id_list.index(session_orig)
if index < len(session_id_list) - 1:
return session_id_list[index + 1]
else:
raise ValueError('The argument session is the last session')
def baseline_df(diagnosis_df, diagnosis, set_index=True):
from copy import deepcopy
if set_index:
all_df = diagnosis_df.set_index(['participant_id', 'session_id'])
else:
all_df = deepcopy(diagnosis_df)
columns = ['participant_id', 'session_id', 'diagnosis']
result_df = pd.DataFrame()
for subject, subject_df in all_df.groupby(level=0):
first_session_id = first_session(subject_df)
data = np.array([subject, first_session_id, diagnosis]).reshape(1, 3)
subject_baseline_df = pd.DataFrame(data, columns=columns)
result_df = pd.concat([result_df, subject_baseline_df])
result_df.reset_index(inplace=True, drop=True)
return result_df
def chi2(x_test, x_train):
# Look for chi2 computation
p_expectedF = np.sum(x_train) / len(x_train)
p_expectedM = 1 - p_expectedF
expectedF = p_expectedF * len(x_test)
expectedM = p_expectedM * len(x_test)
observedF = np.sum(x_test)
observedM = len(x_test) - np.sum(x_test)
T = (expectedF - observedF) ** 2 / expectedF + (expectedM - observedM) ** 2 / expectedM
return T
def add_demographics(df, demographics_df, diagnosis):
out_df = pd.DataFrame()
tmp_demo_df = copy(demographics_df)
tmp_demo_df.reset_index(inplace=True)
for idx in df.index.values:
participant = df.loc[idx, "participant_id"]
session = df.loc[idx, "session_id"]
row_df = tmp_demo_df[(tmp_demo_df.participant_id == participant) & (tmp_demo_df.session_id == session)]
out_df = pd.concat([out_df, row_df])
out_df.reset_index(inplace=True, drop=True)
out_df.diagnosis = [diagnosis] * len(out_df)
return out_df
| 31.793651
| 111
| 0.65976
|
47948ff300d1ad508eb1cb083fbee1afef8e3d4a
| 28,184
|
py
|
Python
|
python_modules/dagster/dagster_tests/core_tests/config_types_tests/test_config_type_system.py
|
withshubh/dagster
|
ff4a0db53e126f44097a337eecef54988cc718ef
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster_tests/core_tests/config_types_tests/test_config_type_system.py
|
withshubh/dagster
|
ff4a0db53e126f44097a337eecef54988cc718ef
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster_tests/core_tests/config_types_tests/test_config_type_system.py
|
withshubh/dagster
|
ff4a0db53e126f44097a337eecef54988cc718ef
|
[
"Apache-2.0"
] | null | null | null |
import re
import typing
import pytest
from dagster import (
Any,
DagsterInvalidConfigDefinitionError,
DagsterInvalidConfigError,
DagsterInvalidDefinitionError,
Field,
Float,
Int,
List,
ModeDefinition,
Noneable,
Permissive,
PipelineDefinition,
ResourceDefinition,
Set,
String,
Tuple,
composite_solid,
execute_pipeline,
execute_solid,
pipeline,
solid,
)
from dagster.config.errors import DagsterEvaluationErrorReason
from dagster.config.field_utils import convert_potential_field
from dagster.config.validate import process_config, validate_config
def test_noop_config():
assert Field(Any)
def test_int_field():
config_field = convert_potential_field({"int_field": Int})
assert validate_config(config_field.config_type, {"int_field": 1}).value == {"int_field": 1}
def test_float_field():
config_field = convert_potential_field({"float_field": Float})
assert validate_config(config_field.config_type, {"float_field": 1.0}).value == {
"float_field": 1.0
}
assert process_config(config_field.config_type, {"float_field": 1.0}).value == {
"float_field": 1.0
}
assert validate_config(config_field.config_type, {"float_field": 1}).value == {"float_field": 1}
assert process_config(config_field.config_type, {"float_field": 1}).value == {
"float_field": 1.0
}
def assert_config_value_success(config_type, config_value, expected):
result = process_config(config_type, config_value)
assert result.success
assert result.value == expected
def assert_eval_failure(config_type, value):
assert not validate_config(config_type, value).success
def test_int_fails():
config_field = convert_potential_field({"int_field": Int})
assert_eval_failure(config_field.config_type, {"int_field": "fjkdj"})
assert_eval_failure(config_field.config_type, {"int_field": True})
def test_default_arg():
config_field = convert_potential_field(
{"int_field": Field(Int, default_value=2, is_required=False)}
)
assert_config_value_success(config_field.config_type, {}, {"int_field": 2})
def test_default_float_arg():
config_field = convert_potential_field(
{"float_field": Field(Float, default_value=2.0, is_required=False)}
)
assert_config_value_success(config_field.config_type, {}, {"float_field": 2.0})
config_field = convert_potential_field(
{"float_field": Field(Float, default_value=2, is_required=False)}
)
assert_config_value_success(config_field.config_type, {}, {"float_field": 2})
def _single_required_string_config_dict():
return convert_potential_field({"string_field": String})
def _multiple_required_fields_config_dict():
return convert_potential_field({"field_one": String, "field_two": String})
def _single_optional_string_config_dict():
return convert_potential_field({"optional_field": Field(String, is_required=False)})
def _single_optional_string_field_config_dict_with_default():
optional_field_def = Field(String, is_required=False, default_value="some_default")
return convert_potential_field({"optional_field": optional_field_def})
def _mixed_required_optional_string_config_dict_with_default():
return convert_potential_field(
{
"optional_arg": Field(String, is_required=False, default_value="some_default"),
"required_arg": Field(String, is_required=True),
"optional_arg_no_default": Field(String, is_required=False),
}
)
def _multiple_required_fields_config_permissive_dict():
return Field(Permissive({"field_one": Field(String), "field_two": Field(String)}))
def _validate(config_field, value):
res = process_config(config_field.config_type, value)
assert res.success, res.errors[0].message
return res.value
def test_single_required_string_field_config_type():
assert _validate(_single_required_string_config_dict(), {"string_field": "value"}) == {
"string_field": "value"
}
with pytest.raises(
AssertionError,
match='Missing required config entry "string_field" at the root.',
):
_validate(_single_required_string_config_dict(), {})
with pytest.raises(AssertionError):
_validate(_single_required_string_config_dict(), {"extra": "yup"})
with pytest.raises(AssertionError):
_validate(_single_required_string_config_dict(), {"string_field": "yupup", "extra": "yup"})
with pytest.raises(AssertionError):
_validate(_single_required_string_config_dict(), {"string_field": 1})
def test_undefined_field_error():
with pytest.raises(
AssertionError,
match=(
'Received unexpected config entry "extra" at the root. Expected: "{ string_field: '
'String }".'
),
):
_validate(
_single_required_string_config_dict(), {"string_field": "value", "extra": "extra"}
)
def test_multiple_required_fields_passing():
assert (
_validate(
_multiple_required_fields_config_dict(),
{"field_one": "value_one", "field_two": "value_two"},
)
== {"field_one": "value_one", "field_two": "value_two"}
)
def test_multiple_required_fields_failing():
with pytest.raises(AssertionError):
_validate(_multiple_required_fields_config_dict(), {})
with pytest.raises(AssertionError):
_validate(_multiple_required_fields_config_dict(), {"field_one": "yup"})
with pytest.raises(AssertionError):
_validate(_multiple_required_fields_config_dict(), {"field_one": "yup", "extra": "yup"})
with pytest.raises(AssertionError):
_validate(
_multiple_required_fields_config_dict(),
{"field_one": "yup", "field_two": "yup", "extra": "should_not_exist"},
)
with pytest.raises(AssertionError):
_validate(
_multiple_required_fields_config_dict(), {"field_one": "value_one", "field_two": 2}
)
def test_single_optional_field_passing():
assert _validate(_single_optional_string_config_dict(), {"optional_field": "value"}) == {
"optional_field": "value"
}
assert _validate(_single_optional_string_config_dict(), {}) == {}
with pytest.raises(AssertionError):
assert _validate(_single_optional_string_config_dict(), {"optional_field": None}) == {
"optional_field": None
}
def test_single_optional_field_failing():
with pytest.raises(AssertionError):
_validate(_single_optional_string_config_dict(), {"optional_field": 1})
with pytest.raises(AssertionError):
_validate(_single_optional_string_config_dict(), {"dlkjfalksdjflksaj": 1})
def test_single_optional_field_passing_with_default():
assert _validate(_single_optional_string_field_config_dict_with_default(), {}) == {
"optional_field": "some_default"
}
assert _validate(
_single_optional_string_field_config_dict_with_default(), {"optional_field": "override"}
) == {"optional_field": "override"}
def test_permissive_multiple_required_fields_passing():
assert _validate(
_multiple_required_fields_config_permissive_dict(),
{
"field_one": "value_one",
"field_two": "value_two",
"previously_unspecified": "should_exist",
},
) == {
"field_one": "value_one",
"field_two": "value_two",
"previously_unspecified": "should_exist",
}
def test_permissive_multiple_required_fields_nested_passing():
assert _validate(
_multiple_required_fields_config_permissive_dict(),
{
"field_one": "value_one",
"field_two": "value_two",
"previously_unspecified": {"nested": "value", "with_int": 2},
},
) == {
"field_one": "value_one",
"field_two": "value_two",
"previously_unspecified": {"nested": "value", "with_int": 2},
}
def test_permissive_multiple_required_fields_failing():
with pytest.raises(AssertionError):
_validate(_multiple_required_fields_config_permissive_dict(), {})
with pytest.raises(AssertionError):
_validate(_multiple_required_fields_config_permissive_dict(), {"field_one": "yup"})
with pytest.raises(AssertionError):
_validate(
_multiple_required_fields_config_permissive_dict(),
{"field_one": "value_one", "field_two": 2},
)
def test_mixed_args_passing():
assert (
_validate(
_mixed_required_optional_string_config_dict_with_default(),
{"optional_arg": "value_one", "required_arg": "value_two"},
)
== {"optional_arg": "value_one", "required_arg": "value_two"}
)
assert _validate(
_mixed_required_optional_string_config_dict_with_default(), {"required_arg": "value_two"}
) == {"optional_arg": "some_default", "required_arg": "value_two"}
assert _validate(
_mixed_required_optional_string_config_dict_with_default(),
{"required_arg": "value_two", "optional_arg_no_default": "value_three"},
) == {
"optional_arg": "some_default",
"required_arg": "value_two",
"optional_arg_no_default": "value_three",
}
def _single_nested_config():
return convert_potential_field({"nested": {"int_field": Int}})
def _nested_optional_config_with_default():
return convert_potential_field(
{"nested": {"int_field": Field(Int, is_required=False, default_value=3)}}
)
def _nested_optional_config_with_no_default():
return convert_potential_field({"nested": {"int_field": Field(Int, is_required=False)}})
def test_single_nested_config():
assert _validate(_single_nested_config(), {"nested": {"int_field": 2}}) == {
"nested": {"int_field": 2}
}
def test_single_nested_config_undefined_errors():
with pytest.raises(
AssertionError,
match='Value at path root:nested must be dict. Expected: "{ int_field: Int }".',
):
_validate(_single_nested_config(), {"nested": "dkjfdk"})
with pytest.raises(
AssertionError,
match='Invalid scalar at path root:nested:int_field. Value "dkjfdk" of type .* is not valid for expected type "Int".',
):
_validate(_single_nested_config(), {"nested": {"int_field": "dkjfdk"}})
with pytest.raises(
AssertionError,
match=(
'Received unexpected config entry "not_a_field" at path root:nested. Expected: '
'"{ int_field: Int }".'
),
):
_validate(_single_nested_config(), {"nested": {"int_field": 2, "not_a_field": 1}})
with pytest.raises(
AssertionError,
match="Invalid scalar at path root:nested:int_field. Value \"{'too_nested': 'dkjfdk'}\" of type .* is not valid for expected type \"Int\".",
):
_validate(_single_nested_config(), {"nested": {"int_field": {"too_nested": "dkjfdk"}}})
def test_nested_optional_with_default():
assert _validate(_nested_optional_config_with_default(), {"nested": {"int_field": 2}}) == {
"nested": {"int_field": 2}
}
assert _validate(_nested_optional_config_with_default(), {"nested": {}}) == {
"nested": {"int_field": 3}
}
def test_nested_optional_with_no_default():
assert _validate(_nested_optional_config_with_no_default(), {"nested": {"int_field": 2}}) == {
"nested": {"int_field": 2}
}
assert _validate(_nested_optional_config_with_no_default(), {"nested": {}}) == {"nested": {}}
def test_config_defaults():
@solid(config_schema={"sum": Int})
def two(_context):
assert _context.solid_config["sum"] == 6
return _context.solid_config["sum"]
@solid(config_schema={"sum": Int})
def one(_context, prev_sum):
assert prev_sum == 6
return prev_sum + _context.solid_config["sum"]
# addition_composite_solid
def addition_composite_solid_config_fn(config):
child_config = {"config": {"sum": config["a"] + config["b"] + config["c"]}}
return {"one": child_config, "two": child_config}
@composite_solid(
config_fn=addition_composite_solid_config_fn,
config_schema={
"a": Field(Int, is_required=False, default_value=1),
"b": Field(Int, is_required=False, default_value=2),
"c": Int,
},
)
def addition_composite_solid():
return one(two())
@pipeline
def addition_pipeline():
addition_composite_solid()
result = execute_pipeline(
addition_pipeline, {"solids": {"addition_composite_solid": {"config": {"c": 3}}}}
)
assert result.success
def test_config_with_and_without_config():
@solid(config_schema={"prefix": Field(str, is_required=False, default_value="_")})
def prefix_value(context, v):
return "{prefix}{v}".format(prefix=context.solid_config["prefix"], v=v)
@composite_solid(
config_fn=lambda cfg: {"prefix_value": {"config": {"prefix": cfg["prefix"]}}},
config_schema={"prefix": Field(str, is_required=False, default_value="_id_")},
)
def prefix_id(val):
return prefix_value(val)
@solid
def print_value(_, v):
return str(v)
@pipeline
def config_issue_pipeline():
v = prefix_id()
print_value(v)
result = execute_pipeline(
config_issue_pipeline,
{
"solids": {
"prefix_id": {
"config": {"prefix": "_customprefix_"},
"inputs": {"val": {"value": "12345"}},
}
}
},
)
assert result.success
assert result.result_for_solid("print_value").output_value() == "_customprefix_12345"
result_using_default = execute_pipeline(
config_issue_pipeline,
{"solids": {"prefix_id": {"config": {}, "inputs": {"val": {"value": "12345"}}}}},
)
assert result_using_default.success
assert result_using_default.result_for_solid("print_value").output_value() == "_id_12345"
def test_build_optionality():
optional_test_type = convert_potential_field(
{
"required": {"value": String},
"optional": {"value": Field(String, is_required=False)},
}
).config_type
assert optional_test_type.fields["required"].is_required
assert optional_test_type.fields["optional"].is_required is False
def test_wrong_solid_name():
@solid(name="some_solid", input_defs=[], output_defs=[], config_schema=Int)
def some_solid(_):
return None
@pipeline(name="pipeline_wrong_solid_name")
def pipeline_def():
some_solid()
env_config = {"solids": {"another_name": {"config": {}}}}
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(pipeline_def, env_config)
pe = pe_info.value
assert 'Received unexpected config entry "another_name" at path root:solids' in str(pe)
def fail_me():
assert False
def dummy_resource(config_schema=None):
return ResourceDefinition(lambda _: None, config_schema=config_schema)
def test_wrong_resources():
pipeline_def = PipelineDefinition(
name="pipeline_test_multiple_context",
mode_defs=[
ModeDefinition(
resource_defs={"resource_one": dummy_resource(), "resource_two": dummy_resource()}
)
],
solid_defs=[],
)
with pytest.raises(
DagsterInvalidConfigError,
match='Received unexpected config entry "nope" at path root:resources',
):
execute_pipeline(pipeline_def, {"resources": {"nope": {}}})
def test_solid_list_config():
value = [1, 2]
called = {}
@solid(name="solid_list_config", input_defs=[], output_defs=[], config_schema=[int])
def solid_list_config(context):
assert context.solid_config == value
called["yup"] = True
@pipeline(name="solid_list_config_pipeline")
def pipeline_def():
solid_list_config()
result = execute_pipeline(
pipeline_def, run_config={"solids": {"solid_list_config": {"config": value}}}
)
assert result.success
assert called["yup"]
def test_two_list_types():
@solid(
input_defs=[],
config_schema={"list_one": [int], "list_two": [int]},
)
def two_list_type(context):
return context.solid_config
assert (
execute_solid(
two_list_type,
run_config={
"solids": {"two_list_type": {"config": {"list_one": [1], "list_two": [2]}}}
},
).output_value()
== {"list_one": [1], "list_two": [2]}
)
@solid(
input_defs=[],
config_schema={"list_one": [Int], "list_two": [Int]},
)
def two_list_type_condensed_syntax(context):
return context.solid_config
assert (
execute_solid(
two_list_type_condensed_syntax,
run_config={
"solids": {
"two_list_type_condensed_syntax": {"config": {"list_one": [1], "list_two": [2]}}
}
},
).output_value()
== {"list_one": [1], "list_two": [2]}
)
@solid(
input_defs=[],
config_schema={"list_one": [int], "list_two": [int]},
)
def two_list_type_condensed_syntax_primitives(context):
return context.solid_config
assert (
execute_solid(
two_list_type_condensed_syntax_primitives,
run_config={
"solids": {
"two_list_type_condensed_syntax_primitives": {
"config": {"list_one": [1], "list_two": [2]}
}
}
},
).output_value()
== {"list_one": [1], "list_two": [2]}
)
def test_multilevel_default_handling():
@solid(config_schema=Field(Int, is_required=False, default_value=234))
def has_default_value(context):
assert context.solid_config == 234
pipeline_def = PipelineDefinition(
name="multilevel_default_handling", solid_defs=[has_default_value]
)
assert execute_pipeline(pipeline_def).success
assert execute_pipeline(pipeline_def, run_config=None).success
assert execute_pipeline(pipeline_def, run_config={}).success
assert execute_pipeline(pipeline_def, run_config={"solids": {}}).success
assert execute_pipeline(pipeline_def, run_config={"solids": {"has_default_value": {}}}).success
assert execute_pipeline(
pipeline_def, run_config={"solids": {"has_default_value": {"config": 234}}}
).success
def test_no_env_missing_required_error_handling():
@solid(config_schema=Int)
def required_int_solid(_context):
pass
pipeline_def = PipelineDefinition(
name="no_env_missing_required_error", solid_defs=[required_int_solid]
)
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(pipeline_def)
assert isinstance(pe_info.value, DagsterInvalidConfigError)
pe = pe_info.value
assert len(pe.errors) == 1
mfe = pe.errors[0]
assert mfe.reason == DagsterEvaluationErrorReason.MISSING_REQUIRED_FIELD
assert len(pe.errors) == 1
assert pe.errors[0].message == 'Missing required config entry "solids" at the root.'
def test_root_extra_field():
@solid(config_schema=Int)
def required_int_solid(_context):
pass
@pipeline
def pipeline_def():
required_int_solid()
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(
pipeline_def,
run_config={"solids": {"required_int_solid": {"config": 948594}}, "nope": None},
)
pe = pe_info.value
assert len(pe.errors) == 1
fnd = pe.errors[0]
assert fnd.reason == DagsterEvaluationErrorReason.FIELD_NOT_DEFINED
assert 'Received unexpected config entry "nope"' in pe.message
def test_deeper_path():
@solid(config_schema=Int)
def required_int_solid(_context):
pass
@pipeline
def pipeline_def():
required_int_solid()
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(
pipeline_def, run_config={"solids": {"required_int_solid": {"config": "asdf"}}}
)
pe = pe_info.value
assert len(pe.errors) == 1
rtm = pe.errors[0]
assert rtm.reason == DagsterEvaluationErrorReason.RUNTIME_TYPE_MISMATCH
def test_working_list_path():
called = {}
@solid(config_schema=[int])
def required_list_int_solid(context):
assert context.solid_config == [1, 2]
called["yup"] = True
@pipeline
def pipeline_def():
required_list_int_solid()
result = execute_pipeline(
pipeline_def, run_config={"solids": {"required_list_int_solid": {"config": [1, 2]}}}
)
assert result.success
assert called["yup"]
def test_item_error_list_path():
called = {}
@solid(config_schema=[int])
def required_list_int_solid(context):
assert context.solid_config == [1, 2]
called["yup"] = True
@pipeline
def pipeline_def():
required_list_int_solid()
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(
pipeline_def,
run_config={"solids": {"required_list_int_solid": {"config": [1, "nope"]}}},
)
pe = pe_info.value
assert len(pe.errors) == 1
rtm = pe.errors[0]
assert rtm.reason == DagsterEvaluationErrorReason.RUNTIME_TYPE_MISMATCH
assert "Invalid scalar at path root:solids:required_list_int_solid:config[1]" in str(pe)
def test_list_in_config_error():
error_msg = (
"Cannot use List in the context of config. "
"Please use a python list (e.g. [int]) or dagster.Array (e.g. Array(int)) instead."
)
with pytest.raises(DagsterInvalidDefinitionError, match=re.escape(error_msg)):
@solid(config_schema=List[int])
def _no_runtime_list_in_config(_):
pass
def test_required_resource_not_given():
@pipeline(
name="required_resource_not_given",
mode_defs=[ModeDefinition(resource_defs={"required": dummy_resource(Int)})],
)
def pipeline_def():
pass
with pytest.raises(DagsterInvalidConfigError) as not_none_pe_info:
execute_pipeline(pipeline_def, run_config={"resources": None})
assert len(not_none_pe_info.value.errors) == 1
assert (
"Value at path root:resources must not be None." in not_none_pe_info.value.errors[0].message
)
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(pipeline_def, run_config={"resources": {}})
pe = pe_info.value
error = pe.errors[0]
assert error.reason == DagsterEvaluationErrorReason.MISSING_REQUIRED_FIELD
assert error.message == 'Missing required config entry "required" at path root:resources.'
def test_multilevel_good_error_handling_solids():
@solid(config_schema=Int)
def good_error_handling(_context):
pass
@pipeline
def pipeline_def():
good_error_handling()
with pytest.raises(DagsterInvalidConfigError) as not_none_pe_info:
execute_pipeline(pipeline_def, run_config={"solids": None})
assert len(not_none_pe_info.value.errors) == 1
assert "Value at path root:solids must not be None." in not_none_pe_info.value.errors[0].message
with pytest.raises(DagsterInvalidConfigError) as missing_field_pe_info:
execute_pipeline(pipeline_def, run_config={"solids": {}})
assert len(missing_field_pe_info.value.errors) == 1
assert missing_field_pe_info.value.errors[0].message == (
"""Missing required config entry "good_error_handling" at path root:solids."""
)
def test_multilevel_good_error_handling_solid_name_solids():
@solid(config_schema=Int)
def good_error_handling(_context):
pass
@pipeline
def pipeline_def():
good_error_handling()
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(pipeline_def, run_config={"solids": {"good_error_handling": {}}})
assert len(pe_info.value.errors) == 1
assert pe_info.value.errors[0].message == (
"""Missing required config entry "config" at path root:solids:good_error_handling."""
)
def test_multilevel_good_error_handling_config_solids_name_solids():
@solid(config_schema=Noneable(int))
def good_error_handling(_context):
pass
@pipeline
def pipeline_def():
good_error_handling()
execute_pipeline(pipeline_def, run_config={"solids": {"good_error_handling": {"config": None}}})
def test_invalid_default_values():
with pytest.raises(
DagsterInvalidConfigError,
match='Value "3" of type .* is not valid for expected type "Int"',
):
@solid(config_schema=Field(Int, default_value="3"))
def _solid(_):
pass
def test_typing_types_into_config():
match_str = re.escape(
"You have passed in typing.List to the config system. "
"Types from the typing module in python are not allowed "
"in the config system. You must use types that are imported "
"from dagster or primitive types such as bool, int, etc."
)
with pytest.raises(DagsterInvalidDefinitionError, match=match_str):
@solid(config_schema=Field(typing.List))
def _solid(_):
pass
with pytest.raises(DagsterInvalidDefinitionError, match=match_str):
@solid(config_schema=typing.List)
def _solid(_):
pass
match_str = re.escape(
"You have passed in typing.List[int] to the config system. Types "
"from the typing module in python are not allowed in the config system. "
"You must use types that are imported from dagster or primitive types "
"such as bool, int, etc."
)
with pytest.raises(DagsterInvalidDefinitionError, match=match_str):
@solid(config_schema=Field(typing.List[int]))
def _solid(_):
pass
with pytest.raises(DagsterInvalidDefinitionError, match=match_str):
@solid(config_schema=typing.List[int])
def _solid(_):
pass
for ttype in [
typing.Optional[int],
typing.Set,
typing.Set[int],
typing.Dict,
typing.Dict[int, str],
typing.Tuple,
typing.Tuple[int, int],
]:
with pytest.raises(DagsterInvalidDefinitionError):
@solid(config_schema=Field(ttype))
def _solid(_):
pass
def test_no_set_in_config_system():
set_error_msg = re.escape("Cannot use Set in the context of a config field.")
with pytest.raises(DagsterInvalidDefinitionError, match=set_error_msg):
@solid(config_schema=Field(Set))
def _bare_open_set(_):
pass
with pytest.raises(DagsterInvalidDefinitionError, match=set_error_msg):
@solid(config_schema=Set)
def _bare_open_set(_):
pass
with pytest.raises(DagsterInvalidDefinitionError, match=set_error_msg):
@solid(config_schema=Field(Set[int]))
def _bare_closed_set(_):
pass
with pytest.raises(DagsterInvalidDefinitionError, match=set_error_msg):
@solid(config_schema=Set[int])
def _bare_closed_set(_):
pass
def test_no_tuple_in_config_system():
tuple_error_msg = re.escape("Cannot use Tuple in the context of a config field.")
with pytest.raises(DagsterInvalidDefinitionError, match=tuple_error_msg):
@solid(config_schema=Field(Tuple))
def _bare_open_tuple(_):
pass
with pytest.raises(DagsterInvalidDefinitionError, match=tuple_error_msg):
@solid(config_schema=Field(Tuple[int]))
def _bare_closed_set(_):
pass
def test_field_is_none():
with pytest.raises(DagsterInvalidConfigDefinitionError) as exc_info:
@solid(config_schema={"none_field": None})
def _none_is_bad(_):
pass
assert "Fields cannot be None" in str(exc_info.value)
| 30.634783
| 148
| 0.662255
|
293d5187ca5b3531f4e4f0804e974028c28b7bb1
| 955
|
py
|
Python
|
src/SemBrain/Drivers/BaseServo.py
|
trkelch/sem
|
58abdbbaa8ac5f6b4e57b5f5ee890b3258203573
|
[
"Unlicense"
] | null | null | null |
src/SemBrain/Drivers/BaseServo.py
|
trkelch/sem
|
58abdbbaa8ac5f6b4e57b5f5ee890b3258203573
|
[
"Unlicense"
] | null | null | null |
src/SemBrain/Drivers/BaseServo.py
|
trkelch/sem
|
58abdbbaa8ac5f6b4e57b5f5ee890b3258203573
|
[
"Unlicense"
] | null | null | null |
from abc import ABCMeta, abstractmethod
import logging
class IServo:
__metaclass__ = ABCMeta
@classmethod
def version(self): return "1.0"
@abstractmethod
def show(self): raise NotImplementedError
@abstractmethod
def ConfigServo(self, frequency, leftPulseWidth, rightPulseWidth): raise NotImplementedError
@abstractmethod
def SetAngle(self, degree): raise NotImplementedError
class BaseServo(IServo):
"""Base Servo Class"""
log = logging.getLogger('BaseServo')
def __init__(self):
BaseServo.log.info("BaseServo.init")
def show(self):
BaseServo.log.info("BaseServo.show")
def ConfigServo(self, frequency, leftPulseWidth, rightPulseWidth):
BaseServo.log.info("BaseServo.ConfigServo {0} {1} {2}"
.format(frequency, leftPulseWidth, rightPulseWidth))
def SetAngle(self, degree):
BaseServo.log.info("BaseServo.SetAngle {0}".format(degree))
| 34.107143
| 96
| 0.696335
|
6ca071756ecdb751b7d087674977b5a6a3cdb119
| 22,683
|
py
|
Python
|
allennlp/common/params.py
|
MSLars/allennlp
|
2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475
|
[
"Apache-2.0"
] | 1
|
2022-01-06T02:06:23.000Z
|
2022-01-06T02:06:23.000Z
|
allennlp/common/params.py
|
MSLars/allennlp
|
2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475
|
[
"Apache-2.0"
] | 52
|
2020-11-11T13:08:25.000Z
|
2021-12-16T13:04:30.000Z
|
allennlp/common/params.py
|
MSLars/allennlp
|
2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475
|
[
"Apache-2.0"
] | null | null | null |
import copy
from itertools import chain
import json
import logging
import os
import zlib
from collections import OrderedDict
from collections.abc import MutableMapping
from os import PathLike
from typing import Any, Dict, List, Union, Optional, TypeVar, Iterable, Set
# _jsonnet doesn't work on Windows, so we have to use fakes.
try:
from _jsonnet import evaluate_file, evaluate_snippet
except ImportError:
def evaluate_file(filename: str, **_kwargs) -> str:
logger.warning(
f"error loading _jsonnet (this is expected on Windows), treating {filename} as plain json"
)
with open(filename, "r") as evaluation_file:
return evaluation_file.read()
def evaluate_snippet(_filename: str, expr: str, **_kwargs) -> str:
logger.warning(
"error loading _jsonnet (this is expected on Windows), treating snippet as plain json"
)
return expr
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
logger = logging.getLogger(__name__)
def infer_and_cast(value: Any):
"""
In some cases we'll be feeding params dicts to functions we don't own;
for example, PyTorch optimizers. In that case we can't use `pop_int`
or similar to force casts (which means you can't specify `int` parameters
using environment variables). This function takes something that looks JSON-like
and recursively casts things that look like (bool, int, float) to (bool, int, float).
"""
if isinstance(value, (int, float, bool)):
# Already one of our desired types, so leave as is.
return value
elif isinstance(value, list):
# Recursively call on each list element.
return [infer_and_cast(item) for item in value]
elif isinstance(value, dict):
# Recursively call on each dict value.
return {key: infer_and_cast(item) for key, item in value.items()}
elif isinstance(value, str):
# If it looks like a bool, make it a bool.
if value.lower() == "true":
return True
elif value.lower() == "false":
return False
else:
# See if it could be an int.
try:
return int(value)
except ValueError:
pass
# See if it could be a float.
try:
return float(value)
except ValueError:
# Just return it as a string.
return value
else:
raise ValueError(f"cannot infer type of {value}")
def _is_encodable(value: str) -> bool:
"""
We need to filter out environment variables that can't
be unicode-encoded to avoid a "surrogates not allowed"
error in jsonnet.
"""
# Idiomatically you'd like to not check the != b""
# but mypy doesn't like that.
return (value == "") or (value.encode("utf-8", "ignore") != b"")
def _environment_variables() -> Dict[str, str]:
"""
Wraps `os.environ` to filter out non-encodable values.
"""
return {key: value for key, value in os.environ.items() if _is_encodable(value)}
T = TypeVar("T", dict, list)
def with_overrides(original: T, overrides_dict: Dict[str, Any], prefix: str = "") -> T:
merged: T
keys: Union[Iterable[str], Iterable[int]]
if isinstance(original, list):
merged = [None] * len(original)
keys = range(len(original))
elif isinstance(original, dict):
merged = {}
keys = chain(
original.keys(), (k for k in overrides_dict if "." not in k and k not in original)
)
else:
if prefix:
raise ValueError(
f"overrides for '{prefix[:-1]}.*' expected list or dict in original, "
f"found {type(original)} instead"
)
else:
raise ValueError(f"expected list or dict, found {type(original)} instead")
used_override_keys: Set[str] = set()
for key in keys:
if str(key) in overrides_dict:
merged[key] = copy.deepcopy(overrides_dict[str(key)])
used_override_keys.add(str(key))
else:
overrides_subdict = {}
for o_key in overrides_dict:
if o_key.startswith(f"{key}."):
overrides_subdict[o_key[len(f"{key}.") :]] = overrides_dict[o_key]
used_override_keys.add(o_key)
if overrides_subdict:
merged[key] = with_overrides(
original[key], overrides_subdict, prefix=prefix + f"{key}."
)
else:
merged[key] = copy.deepcopy(original[key])
unused_override_keys = [prefix + key for key in set(overrides_dict.keys()) - used_override_keys]
if unused_override_keys:
raise ValueError(f"overrides dict contains unused keys: {unused_override_keys}")
return merged
def parse_overrides(
serialized_overrides: str, ext_vars: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
if serialized_overrides:
ext_vars = {**_environment_variables(), **(ext_vars or {})}
return json.loads(evaluate_snippet("", serialized_overrides, ext_vars=ext_vars))
else:
return {}
def _is_dict_free(obj: Any) -> bool:
"""
Returns False if obj is a dict, or if it's a list with an element that _has_dict.
"""
if isinstance(obj, dict):
return False
elif isinstance(obj, list):
return all(_is_dict_free(item) for item in obj)
else:
return True
class Params(MutableMapping):
"""
Represents a parameter dictionary with a history, and contains other functionality around
parameter passing and validation for AllenNLP.
There are currently two benefits of a `Params` object over a plain dictionary for parameter
passing:
1. We handle a few kinds of parameter validation, including making sure that parameters
representing discrete choices actually have acceptable values, and making sure no extra
parameters are passed.
2. We log all parameter reads, including default values. This gives a more complete
specification of the actual parameters used than is given in a JSON file, because
those may not specify what default values were used, whereas this will log them.
!!! Consumption
The convention for using a `Params` object in AllenNLP is that you will consume the parameters
as you read them, so that there are none left when you've read everything you expect. This
lets us easily validate that you didn't pass in any `extra` parameters, just by making sure
that the parameter dictionary is empty. You should do this when you're done handling
parameters, by calling `Params.assert_empty`.
"""
# This allows us to check for the presence of "None" as a default argument,
# which we require because we make a distinction between passing a value of "None"
# and passing no value to the default parameter of "pop".
DEFAULT = object()
def __init__(self, params: Dict[str, Any], history: str = "") -> None:
self.params = _replace_none(params)
self.history = history
def pop(self, key: str, default: Any = DEFAULT, keep_as_dict: bool = False) -> Any:
"""
Performs the functionality associated with dict.pop(key), along with checking for
returned dictionaries, replacing them with Param objects with an updated history
(unless keep_as_dict is True, in which case we leave them as dictionaries).
If `key` is not present in the dictionary, and no default was specified, we raise a
`ConfigurationError`, instead of the typical `KeyError`.
"""
if default is self.DEFAULT:
try:
value = self.params.pop(key)
except KeyError:
msg = f'key "{key}" is required'
if self.history:
msg += f' at location "{self.history}"'
raise ConfigurationError(msg)
else:
value = self.params.pop(key, default)
if keep_as_dict or _is_dict_free(value):
logger.info(f"{self.history}{key} = {value}")
return value
else:
return self._check_is_dict(key, value)
def pop_int(self, key: str, default: Any = DEFAULT) -> Optional[int]:
"""
Performs a pop and coerces to an int.
"""
value = self.pop(key, default)
if value is None:
return None
else:
return int(value)
def pop_float(self, key: str, default: Any = DEFAULT) -> Optional[float]:
"""
Performs a pop and coerces to a float.
"""
value = self.pop(key, default)
if value is None:
return None
else:
return float(value)
def pop_bool(self, key: str, default: Any = DEFAULT) -> Optional[bool]:
"""
Performs a pop and coerces to a bool.
"""
value = self.pop(key, default)
if value is None:
return None
elif isinstance(value, bool):
return value
elif value == "true":
return True
elif value == "false":
return False
else:
raise ValueError("Cannot convert variable to bool: " + value)
def get(self, key: str, default: Any = DEFAULT):
"""
Performs the functionality associated with dict.get(key) but also checks for returned
dicts and returns a Params object in their place with an updated history.
"""
default = None if default is self.DEFAULT else default
value = self.params.get(key, default)
return self._check_is_dict(key, value)
def pop_choice(
self,
key: str,
choices: List[Any],
default_to_first_choice: bool = False,
allow_class_names: bool = True,
) -> Any:
"""
Gets the value of `key` in the `params` dictionary, ensuring that the value is one of
the given choices. Note that this `pops` the key from params, modifying the dictionary,
consistent with how parameters are processed in this codebase.
# Parameters
key: `str`
Key to get the value from in the param dictionary
choices: `List[Any]`
A list of valid options for values corresponding to `key`. For example, if you're
specifying the type of encoder to use for some part of your model, the choices might be
the list of encoder classes we know about and can instantiate. If the value we find in
the param dictionary is not in `choices`, we raise a `ConfigurationError`, because
the user specified an invalid value in their parameter file.
default_to_first_choice: `bool`, optional (default = `False`)
If this is `True`, we allow the `key` to not be present in the parameter
dictionary. If the key is not present, we will use the return as the value the first
choice in the `choices` list. If this is `False`, we raise a
`ConfigurationError`, because specifying the `key` is required (e.g., you `have` to
specify your model class when running an experiment, but you can feel free to use
default settings for encoders if you want).
allow_class_names: `bool`, optional (default = `True`)
If this is `True`, then we allow unknown choices that look like fully-qualified class names.
This is to allow e.g. specifying a model type as my_library.my_model.MyModel
and importing it on the fly. Our check for "looks like" is extremely lenient
and consists of checking that the value contains a '.'.
"""
default = choices[0] if default_to_first_choice else self.DEFAULT
value = self.pop(key, default)
ok_because_class_name = allow_class_names and "." in value
if value not in choices and not ok_because_class_name:
key_str = self.history + key
message = (
f"{value} not in acceptable choices for {key_str}: {choices}. "
"You should either use the --include-package flag to make sure the correct module "
"is loaded, or use a fully qualified class name in your config file like "
"""{"model": "my_module.models.MyModel"} to have it imported automatically."""
)
raise ConfigurationError(message)
return value
def as_dict(self, quiet: bool = False, infer_type_and_cast: bool = False):
"""
Sometimes we need to just represent the parameters as a dict, for instance when we pass
them to PyTorch code.
# Parameters
quiet: `bool`, optional (default = `False`)
Whether to log the parameters before returning them as a dict.
infer_type_and_cast: `bool`, optional (default = `False`)
If True, we infer types and cast (e.g. things that look like floats to floats).
"""
if infer_type_and_cast:
params_as_dict = infer_and_cast(self.params)
else:
params_as_dict = self.params
if quiet:
return params_as_dict
def log_recursively(parameters, history):
for key, value in parameters.items():
if isinstance(value, dict):
new_local_history = history + key + "."
log_recursively(value, new_local_history)
else:
logger.info(f"{history}{key} = {value}")
log_recursively(self.params, self.history)
return params_as_dict
def as_flat_dict(self) -> Dict[str, Any]:
"""
Returns the parameters of a flat dictionary from keys to values.
Nested structure is collapsed with periods.
"""
flat_params = {}
def recurse(parameters, path):
for key, value in parameters.items():
newpath = path + [key]
if isinstance(value, dict):
recurse(value, newpath)
else:
flat_params[".".join(newpath)] = value
recurse(self.params, [])
return flat_params
def duplicate(self) -> "Params":
"""
Uses `copy.deepcopy()` to create a duplicate (but fully distinct)
copy of these Params.
"""
return copy.deepcopy(self)
def assert_empty(self, class_name: str):
"""
Raises a `ConfigurationError` if `self.params` is not empty. We take `class_name` as
an argument so that the error message gives some idea of where an error happened, if there
was one. `class_name` should be the name of the `calling` class, the one that got extra
parameters (if there are any).
"""
if self.params:
raise ConfigurationError(
"Extra parameters passed to {}: {}".format(class_name, self.params)
)
def __getitem__(self, key):
if key in self.params:
return self._check_is_dict(key, self.params[key])
else:
raise KeyError(str(key))
def __setitem__(self, key, value):
self.params[key] = value
def __delitem__(self, key):
del self.params[key]
def __iter__(self):
return iter(self.params)
def __len__(self):
return len(self.params)
def _check_is_dict(self, new_history, value):
if isinstance(value, dict):
new_history = self.history + new_history + "."
return Params(value, history=new_history)
if isinstance(value, list):
value = [self._check_is_dict(f"{new_history}.{i}", v) for i, v in enumerate(value)]
return value
@classmethod
def from_file(
cls,
params_file: Union[str, PathLike],
params_overrides: Union[str, Dict[str, Any]] = "",
ext_vars: dict = None,
) -> "Params":
"""
Load a `Params` object from a configuration file.
# Parameters
params_file: `str`
The path to the configuration file to load.
params_overrides: `Union[str, Dict[str, Any]]`, optional (default = `""`)
A dict of overrides that can be applied to final object.
e.g. `{"model.embedding_dim": 10}` will change the value of "embedding_dim"
within the "model" object of the config to 10. If you wanted to override the entire
"model" object of the config, you could do `{"model": {"type": "other_type", ...}}`.
ext_vars: `dict`, optional
Our config files are Jsonnet, which allows specifying external variables
for later substitution. Typically we substitute these using environment
variables; however, you can also specify them here, in which case they
take priority over environment variables.
e.g. {"HOME_DIR": "/Users/allennlp/home"}
"""
if ext_vars is None:
ext_vars = {}
# redirect to cache, if necessary
params_file = cached_path(params_file)
ext_vars = {**_environment_variables(), **ext_vars}
file_dict = json.loads(evaluate_file(params_file, ext_vars=ext_vars))
if isinstance(params_overrides, dict):
params_overrides = json.dumps(params_overrides)
overrides_dict = parse_overrides(params_overrides, ext_vars=ext_vars)
if overrides_dict:
param_dict = with_overrides(file_dict, overrides_dict)
else:
param_dict = file_dict
return cls(param_dict)
def to_file(self, params_file: str, preference_orders: List[List[str]] = None) -> None:
with open(params_file, "w") as handle:
json.dump(self.as_ordered_dict(preference_orders), handle, indent=4)
def as_ordered_dict(self, preference_orders: List[List[str]] = None) -> OrderedDict:
"""
Returns Ordered Dict of Params from list of partial order preferences.
# Parameters
preference_orders: `List[List[str]]`, optional
`preference_orders` is list of partial preference orders. ["A", "B", "C"] means
"A" > "B" > "C". For multiple preference_orders first will be considered first.
Keys not found, will have last but alphabetical preference. Default Preferences:
`[["dataset_reader", "iterator", "model", "train_data_path", "validation_data_path",
"test_data_path", "trainer", "vocabulary"], ["type"]]`
"""
params_dict = self.as_dict(quiet=True)
if not preference_orders:
preference_orders = []
preference_orders.append(
[
"dataset_reader",
"iterator",
"model",
"train_data_path",
"validation_data_path",
"test_data_path",
"trainer",
"vocabulary",
]
)
preference_orders.append(["type"])
def order_func(key):
# Makes a tuple to use for ordering. The tuple is an index into each of the `preference_orders`,
# followed by the key itself. This gives us integer sorting if you have a key in one of the
# `preference_orders`, followed by alphabetical ordering if not.
order_tuple = [
order.index(key) if key in order else len(order) for order in preference_orders
]
return order_tuple + [key]
def order_dict(dictionary, order_func):
# Recursively orders dictionary according to scoring order_func
result = OrderedDict()
for key, val in sorted(dictionary.items(), key=lambda item: order_func(item[0])):
result[key] = order_dict(val, order_func) if isinstance(val, dict) else val
return result
return order_dict(params_dict, order_func)
def get_hash(self) -> str:
"""
Returns a hash code representing the current state of this `Params` object. We don't
want to implement `__hash__` because that has deeper python implications (and this is a
mutable object), but this will give you a representation of the current state.
We use `zlib.adler32` instead of Python's builtin `hash` because the random seed for the
latter is reset on each new program invocation, as discussed here:
https://stackoverflow.com/questions/27954892/deterministic-hashing-in-python-3.
"""
dumped = json.dumps(self.params, sort_keys=True)
hashed = zlib.adler32(dumped.encode())
return str(hashed)
def __str__(self) -> str:
return f"{self.history}Params({self.params})"
def pop_choice(
params: Dict[str, Any],
key: str,
choices: List[Any],
default_to_first_choice: bool = False,
history: str = "?.",
allow_class_names: bool = True,
) -> Any:
"""
Performs the same function as `Params.pop_choice`, but is required in order to deal with
places that the Params object is not welcome, such as inside Keras layers. See the docstring
of that method for more detail on how this function works.
This method adds a `history` parameter, in the off-chance that you know it, so that we can
reproduce `Params.pop_choice` exactly. We default to using "?." if you don't know the
history, so you'll have to fix that in the log if you want to actually recover the logged
parameters.
"""
value = Params(params, history).pop_choice(
key, choices, default_to_first_choice, allow_class_names=allow_class_names
)
return value
def _replace_none(params: Any) -> Any:
if params == "None":
return None
elif isinstance(params, dict):
for key, value in params.items():
params[key] = _replace_none(value)
return params
elif isinstance(params, list):
return [_replace_none(value) for value in params]
return params
def remove_keys_from_params(params: Params, keys: List[str] = ["pretrained_file", "initializer"]):
if isinstance(params, Params): # The model could possibly be a string, for example.
param_keys = params.keys()
for key in keys:
if key in param_keys:
del params[key]
for value in params.values():
if isinstance(value, Params):
remove_keys_from_params(value, keys)
| 38.445763
| 109
| 0.617599
|
e0f0c0d4ff604017b9a83358f11c6d6159aa5f06
| 7,728
|
py
|
Python
|
tensorflow/contrib/tensor_forest/python/tensor_forest_test.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 848
|
2019-12-03T00:16:17.000Z
|
2022-03-31T22:53:17.000Z
|
tensorflow/contrib/tensor_forest/python/tensor_forest_test.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 656
|
2019-12-03T00:48:46.000Z
|
2022-03-31T18:41:54.000Z
|
tensorflow/contrib/tensor_forest/python/tensor_forest_test.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 506
|
2019-12-03T00:46:26.000Z
|
2022-03-30T10:34:56.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.tensor_forest.ops.tensor_forest."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf.json_format import ParseDict
from tensorflow.contrib.decision_trees.proto import generic_tree_model_pb2 as _tree_proto
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class TensorForestTest(test_util.TensorFlowTestCase):
def testForestHParams(self):
hparams = tensor_forest.ForestHParams(
num_classes=2,
num_trees=100,
max_nodes=1000,
split_after_samples=25,
num_features=60).fill()
self.assertEquals(2, hparams.num_classes)
self.assertEquals(3, hparams.num_output_columns)
self.assertEquals(10, hparams.num_splits_to_consider)
# Default value of valid_leaf_threshold
self.assertEquals(1, hparams.valid_leaf_threshold)
self.assertEquals(0, hparams.base_random_seed)
def testForestHParamsBigTree(self):
hparams = tensor_forest.ForestHParams(
num_classes=2,
num_trees=100,
max_nodes=1000000,
split_after_samples=25,
num_features=1000).fill()
self.assertEquals(31, hparams.num_splits_to_consider)
def testForestHParamsStringParams(self):
hparams = tensor_forest.ForestHParams(
num_classes=2,
num_trees=100,
max_nodes=1000000,
split_after_samples="25",
num_splits_to_consider="1000000",
num_features=1000).fill()
self.assertEquals("1000000", hparams.num_splits_to_consider)
def testTrainingConstructionClassification(self):
input_data = [[-1., 0.], [-1., 2.], # node 1
[1., 0.], [1., -2.]] # node 2
input_labels = [0, 1, 2, 3]
params = tensor_forest.ForestHParams(
num_classes=4,
num_features=2,
num_trees=10,
max_nodes=1000,
split_after_samples=25).fill()
graph_builder = tensor_forest.RandomForestGraphs(params)
graph = graph_builder.training_graph(input_data, input_labels)
self.assertTrue(isinstance(graph, ops.Operation))
def testTrainingConstructionRegression(self):
input_data = [[-1., 0.], [-1., 2.], # node 1
[1., 0.], [1., -2.]] # node 2
input_labels = [0, 1, 2, 3]
params = tensor_forest.ForestHParams(
num_classes=4,
num_features=2,
num_trees=10,
max_nodes=1000,
split_after_samples=25,
regression=True).fill()
graph_builder = tensor_forest.RandomForestGraphs(params)
graph = graph_builder.training_graph(input_data, input_labels)
self.assertTrue(isinstance(graph, ops.Operation))
def testInferenceConstruction(self):
input_data = [[-1., 0.], [-1., 2.], # node 1
[1., 0.], [1., -2.]] # node 2
params = tensor_forest.ForestHParams(
num_classes=4,
num_features=2,
num_trees=10,
max_nodes=1000,
split_after_samples=25).fill()
graph_builder = tensor_forest.RandomForestGraphs(params)
probs, paths, var = graph_builder.inference_graph(input_data)
self.assertTrue(isinstance(probs, ops.Tensor))
self.assertTrue(isinstance(paths, ops.Tensor))
self.assertTrue(isinstance(var, ops.Tensor))
def testInfrenceFromRestoredModel(self):
input_data = [[-1., 0.], [-1., 2.], # node 1
[1., 0.], [1., -2.]] # node 2
expected_prediction = [[0.0, 1.0], [0.0, 1.0],
[0.0, 1.0], [0.0, 1.0]]
hparams = tensor_forest.ForestHParams(
num_classes=2,
num_features=2,
num_trees=1,
max_nodes=1000,
split_after_samples=25).fill()
tree_weight = {'decisionTree':
{'nodes':
[{'binaryNode':
{'rightChildId': 2,
'leftChildId': 1,
'inequalityLeftChildTest':
{'featureId': {'id': '0'},
'threshold': {'floatValue': 0}}}},
{'leaf': {'vector':
{'value': [{'floatValue': 0.0},
{'floatValue': 1.0}]}},
'nodeId': 1},
{'leaf': {'vector':
{'value': [{'floatValue': 0.0},
{'floatValue': 1.0}]}},
'nodeId': 2}]}}
restored_tree_param = ParseDict(tree_weight,
_tree_proto.Model()).SerializeToString()
graph_builder = tensor_forest.RandomForestGraphs(hparams,
[restored_tree_param])
probs, paths, var = graph_builder.inference_graph(input_data)
self.assertTrue(isinstance(probs, ops.Tensor))
self.assertTrue(isinstance(paths, ops.Tensor))
self.assertTrue(isinstance(var, ops.Tensor))
with self.cached_session():
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
self.assertEquals(probs.eval().shape, (4, 2))
self.assertEquals(probs.eval().tolist(), expected_prediction)
def testTrainingConstructionClassificationSparse(self):
input_data = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 3], [1, 0], [1, 7], [2, 1], [3, 9]],
values=[-1.0, 0.0, -1., 2., 1., -2.0],
dense_shape=[4, 10])
input_labels = [0, 1, 2, 3]
params = tensor_forest.ForestHParams(
num_classes=4,
num_features=10,
num_trees=10,
max_nodes=1000,
split_after_samples=25).fill()
graph_builder = tensor_forest.RandomForestGraphs(params)
graph = graph_builder.training_graph(input_data, input_labels)
self.assertTrue(isinstance(graph, ops.Operation))
def testInferenceConstructionSparse(self):
input_data = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 3],
[1, 0], [1, 7],
[2, 1],
[3, 9]],
values=[-1.0, 0.0,
-1., 2.,
1.,
-2.0],
dense_shape=[4, 10])
params = tensor_forest.ForestHParams(
num_classes=4,
num_features=10,
num_trees=10,
max_nodes=1000,
regression=True,
split_after_samples=25).fill()
graph_builder = tensor_forest.RandomForestGraphs(params)
probs, paths, var = graph_builder.inference_graph(input_data)
self.assertTrue(isinstance(probs, ops.Tensor))
self.assertTrue(isinstance(paths, ops.Tensor))
self.assertTrue(isinstance(var, ops.Tensor))
if __name__ == "__main__":
googletest.main()
| 37.697561
| 89
| 0.616977
|
257b0f9cba0b970b5fd2fee1b9b3979e40721da6
| 626
|
py
|
Python
|
python-sdk/tutorials/automl-with-azureml/forecasting-bike-share/metrics_helper.py
|
0mza987/azureml-examples
|
2abb872f1278d4b4e65587e033f38a058512b2e3
|
[
"MIT"
] | 331
|
2020-08-26T15:54:23.000Z
|
2022-03-31T17:10:58.000Z
|
python-sdk/tutorials/automl-with-azureml/forecasting-bike-share/metrics_helper.py
|
0mza987/azureml-examples
|
2abb872f1278d4b4e65587e033f38a058512b2e3
|
[
"MIT"
] | 262
|
2020-08-25T23:17:17.000Z
|
2022-03-31T00:25:32.000Z
|
python-sdk/tutorials/automl-with-azureml/forecasting-bike-share/metrics_helper.py
|
0mza987/azureml-examples
|
2abb872f1278d4b4e65587e033f38a058512b2e3
|
[
"MIT"
] | 307
|
2020-09-04T01:02:11.000Z
|
2022-03-31T16:46:48.000Z
|
import pandas as pd
import numpy as np
def APE(actual, pred):
"""
Calculate absolute percentage error.
Returns a vector of APE values with same length as actual/pred.
"""
return 100 * np.abs((actual - pred) / actual)
def MAPE(actual, pred):
"""
Calculate mean absolute percentage error.
Remove NA and values where actual is close to zero
"""
not_na = ~(np.isnan(actual) | np.isnan(pred))
not_zero = ~np.isclose(actual, 0.0)
actual_safe = actual[not_na & not_zero]
pred_safe = pred[not_na & not_zero]
return np.mean(APE(actual_safe, pred_safe))
| 27.217391
| 68
| 0.642173
|
db6b98144f71b269141231c40edea8921bec4937
| 8,880
|
py
|
Python
|
sweetie_bot_flexbe_behaviors/src/sweetie_bot_flexbe_behaviors/galacon2018presentation2_sm.py
|
sweetie-bot-project/sweetie_bot_flexbe_behaviors
|
d8511564bb9d6125838b4373263fb68a8b858d70
|
[
"BSD-3-Clause"
] | null | null | null |
sweetie_bot_flexbe_behaviors/src/sweetie_bot_flexbe_behaviors/galacon2018presentation2_sm.py
|
sweetie-bot-project/sweetie_bot_flexbe_behaviors
|
d8511564bb9d6125838b4373263fb68a8b858d70
|
[
"BSD-3-Clause"
] | null | null | null |
sweetie_bot_flexbe_behaviors/src/sweetie_bot_flexbe_behaviors/galacon2018presentation2_sm.py
|
sweetie-bot-project/sweetie_bot_flexbe_behaviors
|
d8511564bb9d6125838b4373263fb68a8b858d70
|
[
"BSD-3-Clause"
] | 1
|
2019-12-23T05:06:26.000Z
|
2019-12-23T05:06:26.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from flexbe_states.operator_decision_state import OperatorDecisionState
from sweetie_bot_flexbe_states.compound_action_state import CompoundAction
from sweetie_bot_flexbe_states.wait_for_message_state import WaitForMessageState
from sweetie_bot_flexbe_behaviors.watchpresentaion_sm import WatchPresentaionSM
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Mon Jul 16 2018
@author: mutronics
'''
class Galacon2018Presentation2SM(Behavior):
'''
Second part of Galacon 2018 Presentation: Sweetie acts as a moderator.
'''
def __init__(self):
super(Galacon2018Presentation2SM, self).__init__()
self.name = 'Galacon2018Presentation2'
# parameters of this behavior
# references to used behaviors
self.add_behavior(WatchPresentaionSM, 'WatchPresentaion')
self.add_behavior(WatchPresentaionSM, 'WatchPresentaion_2')
self.add_behavior(WatchPresentaionSM, 'WatchPresentaion_3')
self.add_behavior(WatchPresentaionSM, 'WatchPresentaion_4')
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# set maximal and minimal head rotation angle for WatchPresentation
max2 = 0.2
min2 = -0.7
self.contains['WatchPresentaion'].max2 = max2
self.contains['WatchPresentaion_2'].max2 = max2
self.contains['WatchPresentaion_3'].max2 = max2
self.contains['WatchPresentaion_4'].max2 = max2
self.contains['WatchPresentaion'].min2 = min2
self.contains['WatchPresentaion_2'].min2 = min2
self.contains['WatchPresentaion_3'].min2 = min2
self.contains['WatchPresentaion_4'].min2 = min2
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
joy_topic = '/hmi/joystick'
# x:410 y:173, x:436 y:319
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
_state_machine.userdata.head_pose_joints = [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
_state_machine.userdata.rand_head_config = {'min2356':[-0.5,0.1,-1.0,-1.0], 'max2356':[0.5,0.5,1.0,1.0]}
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:322 y:22
OperatableStateMachine.add('SelectPart',
OperatorDecisionState(outcomes=['history', 'construction', 'electronics', 'software', 'ending'], hint='Select the part of presentation.', suggestion='history'),
transitions={'history': 'WatchPresentaion', 'construction': 'WatchPresentaion_2', 'electronics': 'WatchPresentaion_3', 'software': 'WatchPresentaion_4', 'ending': 'ImHere'},
autonomy={'history': Autonomy.Full, 'construction': Autonomy.Full, 'electronics': Autonomy.Full, 'software': Autonomy.Full, 'ending': Autonomy.Full})
# x:580 y:313
OperatableStateMachine.add('NewBody',
CompoundAction(t1=[0,0.0], type1='voice/play_wav', cmd1='thank_you_mutronics_then_renha_and_shiron_will_tell_about_my_future_body_proto3', t2=[0,0.0], type2='motion/joint_trajectory', cmd2='look_on_printer_fast', t3=[0,1.0], type3=None, cmd3='', t4=[0,0.0], type4=None, cmd4=''),
transitions={'success': 'WatchPresentaion_3', 'failure': 'failed'},
autonomy={'success': Autonomy.Off, 'failure': Autonomy.Off})
# x:174 y:592
OperatableStateMachine.add('Continue',
CompoundAction(t1=[0,0.0], type1='voice/play_wav', cmd1='but_we_have_to_continue_the_next_topic_is_my_electronic', t2=[0,0.0], type2='motion/joint_trajectory', cmd2='head_node', t3=[0,0.0], type3=None, cmd3='', t4=[0,0.0], type4=None, cmd4=''),
transitions={'success': 'WatchPresentaion_4', 'failure': 'failed'},
autonomy={'success': Autonomy.Off, 'failure': Autonomy.Off})
# x:44 y:382
OperatableStateMachine.add('Software',
CompoundAction(t1=[0,0.0], type1='voice/play_wav', cmd1='thank_you_zuviel_i_really_need_the_new_and_more_powerful_on_board_computer', t2=[0,0.0], type2='motion/joint_trajectory', cmd2='look_on_printer', t3=[0,0.0], type3=None, cmd3='', t4=[0,0.0], type4=None, cmd4=''),
transitions={'success': 'finished', 'failure': 'failed'},
autonomy={'success': Autonomy.Off, 'failure': Autonomy.Off})
# x:36 y:163
OperatableStateMachine.add('WaitKey13',
WaitForMessageState(topic=joy_topic, condition=lambda x: x.buttons[12], buffered=False, clear=False),
transitions={'received': 'TurnOff', 'unavailable': 'failed'},
autonomy={'received': Autonomy.Off, 'unavailable': Autonomy.Off},
remapping={'message': 'message'})
# x:31 y:243
OperatableStateMachine.add('ImHere',
CompoundAction(t1=[0,0.0], type1='motion/joint_trajectory', cmd1='look_on_hoof', t2=[0,6.0], type2='voice/play_wav', cmd2='hello_im_here_again_but_youre_monster_anyway', t3=[0,0.0], type3=None, cmd3='', t4=[0,0.0], type4=None, cmd4=''),
transitions={'success': 'WaitKey13', 'failure': 'failed'},
autonomy={'success': Autonomy.Off, 'failure': Autonomy.Off})
# x:37 y:83
OperatableStateMachine.add('TurnOff',
CompoundAction(t1=[0,0.0], type1='voice/play_wav', cmd1='thank_you_for_your_questions_and_attention_our_presentation_terminates_here', t2=[0,0.0], type2='motion/joint_trajectory', cmd2='bow_begin', t3=[0,3.0], type3='motion/joint_trajectory', cmd3='bow_end', t4=[0,4.5], type4='motion/joint_trajectory', cmd4='prance'),
transitions={'success': 'finished', 'failure': 'failed'},
autonomy={'success': Autonomy.Off, 'failure': Autonomy.Off})
# x:391 y:593
OperatableStateMachine.add('Interesting',
CompoundAction(t1=[0,0.0], type1='voice/play_wav', cmd1='its_very_interesting_i_cant_wait_when_my_data_can_be_transferred_inside_my_new_body', t2=[0,0.0], type2='motion/joint_trajectory', cmd2='head_lean_forward_begin', t3=[0,1.0], type3='motion/joint_trajectory', cmd3='head_suprised', t4=[0,2.5], type4='motion/joint_trajectory', cmd4='head_lean_forward_end'),
transitions={'success': 'Continue', 'failure': 'failed'},
autonomy={'success': Autonomy.Off, 'failure': Autonomy.Off})
# x:561 y:20
OperatableStateMachine.add('WatchPresentaion',
self.use_behavior(WatchPresentaionSM, 'WatchPresentaion'),
transitions={'finished': 'Chances', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'head_pose_joints': 'head_pose_joints', 'rand_head_config': 'rand_head_config'})
# x:557 y:211
OperatableStateMachine.add('WatchPresentaion_2',
self.use_behavior(WatchPresentaionSM, 'WatchPresentaion_2'),
transitions={'finished': 'NewBody', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'head_pose_joints': 'head_pose_joints', 'rand_head_config': 'rand_head_config'})
# x:549 y:436
OperatableStateMachine.add('WatchPresentaion_3',
self.use_behavior(WatchPresentaionSM, 'WatchPresentaion_3'),
transitions={'finished': 'Interesting', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'head_pose_joints': 'head_pose_joints', 'rand_head_config': 'rand_head_config'})
# x:49 y:468
OperatableStateMachine.add('WatchPresentaion_4',
self.use_behavior(WatchPresentaionSM, 'WatchPresentaion_4'),
transitions={'finished': 'Software', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'head_pose_joints': 'head_pose_joints', 'rand_head_config': 'rand_head_config'})
# x:581 y:118
OperatableStateMachine.add('Chances',
CompoundAction(t1=[0,0.0], type1='voice/play_wav', cmd1='the_chances_of_success_are_100_percent', t2=[0,0.0], type2='motion/joint_trajectory', cmd2='little_shake_fast', t3=[0,0.0], type3=None, cmd3='', t4=[0,0.0], type4=None, cmd4=''),
transitions={'success': 'WatchPresentaion_2', 'failure': 'failed'},
autonomy={'success': Autonomy.Off, 'failure': Autonomy.Off})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
| 51.929825
| 372
| 0.67714
|
807f029f898a434881f2d09adaac2250fc7dd2b5
| 22,383
|
py
|
Python
|
views/pathview/strand/xoveritem.py
|
turalaksel/cadnano2.1
|
5e5b0e9085755c2e810346d3e580994bf4fad174
|
[
"MIT"
] | 1
|
2019-08-20T01:41:18.000Z
|
2019-08-20T01:41:18.000Z
|
views/pathview/strand/xoveritem.py
|
turalaksel/cadnano2.1
|
5e5b0e9085755c2e810346d3e580994bf4fad174
|
[
"MIT"
] | null | null | null |
views/pathview/strand/xoveritem.py
|
turalaksel/cadnano2.1
|
5e5b0e9085755c2e810346d3e580994bf4fad174
|
[
"MIT"
] | null | null | null |
"""
xoveritem.py
Created by Nick on 2011-05-25.
"""
from views import styles
import util
# import Qt stuff into the module namespace with PySide, PyQt4 independence
util.qtWrapImport('QtCore', globals(), ['QPointF', 'QRectF', 'Qt', 'QEvent'])
util.qtWrapImport('QtGui', globals(), ['QBrush',
'QFont',
'QPen',
'QPolygonF',
'QPainterPath',
'QColor',
'QFontMetrics'])
util.qtWrapImport('QtWidgets', globals(), ['QGraphicsItem',
'QGraphicsSimpleTextItem',
'QGraphicsRectItem',
'QGraphicsPathItem'])
_baseWidth = styles.PATH_BASE_WIDTH
_toHelixNumFont = styles.XOVER_LABEL_FONT
# precalculate the height of a number font. Assumes a fixed font
# and that only numbers will be used for labels
_fm = QFontMetrics(_toHelixNumFont)
_enabbrush = QBrush(Qt.SolidPattern) # Also for the helix number label
_nobrush = QBrush(Qt.NoBrush)
# _rect = QRectF(0, 0, baseWidth, baseWidth)
_xScale = styles.PATH_XOVER_LINE_SCALE_X # control point x constant
_yScale = styles.PATH_XOVER_LINE_SCALE_Y # control point y constant
_rect = QRectF(0, 0, _baseWidth, _baseWidth)
class XoverNode3(QGraphicsRectItem):
"""
This is a QGraphicsRectItem to allow actions and also a
QGraphicsSimpleTextItem to allow a label to be drawn
"""
def __init__(self, virtualHelixItem, xoverItem, strand3p, idx):
super(XoverNode3, self).__init__(virtualHelixItem)
self._vhi = virtualHelixItem
self._xoverItem = xoverItem
self._idx = idx
self._isOnTop = virtualHelixItem.isStrandOnTop(strand3p)
self._isDrawn5to3 = strand3p.strandSet().isDrawn5to3()
self._strandType = strand3p.strandSet().strandType()
self.setPartnerVirtualHelix(strand3p)
self.setPen(QPen(Qt.NoPen))
self._label = None
self.setPen(QPen(Qt.NoPen))
self.setBrush(_nobrush)
self.setRect(_rect)
self.setZValue(styles.ZXOVERITEM)
# end def
### EVENT HANDLERS ###
def mousePressEvent(self, event):
"""
Parses a mousePressEvent to extract strandSet and base index,
forwarding them to approproate tool method as necessary.
"""
self.scene().views()[0].addToPressList(self)
self._vhi.setActive(self._idx)
xoi = self._xoverItem
toolMethodName = str(xoi.activeTool()) + "MousePress"
if hasattr(xoi, toolMethodName):
getattr(xoi, toolMethodName)()
# end def
def customMouseRelease(self, event):
pass
# end def
def virtualHelix(self):
return self._vhi.virtualHelix()
# end def
def strandType(self):
return self._strandType
# end def
def refreshXover(self):
self._xoverItem.refreshXover()
# end def
def setPartnerVirtualHelix(self,strand):
if strand.connection5p():
self._partnerVirtualHelix = strand.connection5p().virtualHelix()
else:
self._partnerVirtualHelix = None
# end def
def idx(self):
return self._idx
# end def
def setIdx(self, idx):
self._idx = idx
# end def
def virtualHelixItem(self):
return self._vhi
# end def
def point(self):
return self._vhi.upperLeftCornerOfBaseType(self._idx, self._strandType)
# end def
def floatPoint(self):
pt = self.pos()
return pt.x(), pt.y()
# end def
def isOnTop(self):
return self._isOnTop
# end def
def isDrawn5to3(self):
return self._isDrawn5to3
# end def
def updatePositionAndAppearance(self):
"""
Sets position by asking the VirtualHelixItem
Sets appearance by choosing among pre-defined painterpaths (from
normalstrandgraphicsitem) depending on drawing direction.
"""
self.setPos(*self.point())
# We can only expose a 5' end. But on which side?
isLeft = True if self._isDrawn5to3 else False
self._updateLabel(isLeft)
# end def
def updateConnectivity(self):
isLeft = True if self._isDrawn5to3 else False
self._updateLabel(isLeft)
# end def
def remove(self):
"""
Clean up this joint
"""
scene = self.scene()
if scene:
scene.removeItem(self._label)
self._label = None
scene.removeItem(self)
# end def
def _updateLabel(self, isLeft):
"""
Called by updatePositionAndAppearance during init, or later by
updateConnectivity. Updates drawing and position of the label.
"""
lbl = self._label
if self._idx != None:
if lbl == None:
bw = _baseWidth
num = self._partnerVirtualHelix.number()
tBR = _fm.tightBoundingRect(str(num))
halfLabelH = tBR.height()/2.0
halfLabelW = tBR.width()/2.0
# determine x and y positions
labelX = bw/2.0 - halfLabelW
if self._isOnTop:
labelY = -0.25*halfLabelH - 0.5 - 0.5*bw
else:
labelY = 2*halfLabelH + 0.5 + 0.5*bw
# adjust x for left vs right
labelXoffset = 0.25*bw if isLeft else -0.25*bw
labelX += labelXoffset
# adjust x for numeral 1
if num == 1: labelX -= halfLabelW/2.0
# create text item
lbl = QGraphicsSimpleTextItem(str(num), self)
lbl.setPos(labelX, labelY)
lbl.setBrush(_enabbrush)
lbl.setFont(_toHelixNumFont)
self._label = lbl
# end if
lbl.setText( str(self._partnerVirtualHelix.number()) )
# end if
# end def
# end class
class XoverNode5(XoverNode3):
"""
XoverNode5 is the partner of XoverNode3. It dif
XoverNode3 handles:
1. Drawing of the 5' end of an xover, and its text label. Drawing style
is determined by the location of the xover with in a vhelix (is it a top
or bottom vstrand?).
2. Notifying XoverStrands in the model when connectivity changes.
"""
def __init__(self, virtualHelixItem, xoverItem, strand5p, idx):
super(XoverNode5, self).__init__(virtualHelixItem, xoverItem, strand5p, idx)
# end def
def setPartnerVirtualHelix(self, strand):
if strand.connection3p():
self._partnerVirtualHelix = strand.connection3p().virtualHelix()
else:
self._partnerVirtualHelix = None
# end def
def updatePositionAndAppearance(self):
"""Same as XoverItem3, but exposes 3' end"""
self.setPos(*self.point())
# # We can only expose a 3' end. But on which side?
isLeft = False if self._isDrawn5to3 else True
self._updateLabel(isLeft)
# end def
# end class
class XoverItem(QGraphicsPathItem):
"""
This class handles:
1. Drawing the spline between the XoverNode3 and XoverNode5 graphics
items in the path view.
XoverItem should be a child of a PartItem.
"""
_filterName = "xover"
def __init__(self, strandItem, virtualHelixItem):
"""
strandItem is a the model representation of the 5prime most strand
of a Xover
"""
super(XoverItem, self).__init__(virtualHelixItem.partItem())
self._strandItem = strandItem
self._virtualHelixItem = virtualHelixItem
self._strand5p = None
self._node5 = None
self._node3 = None
self.hide()
self.setFlag(QGraphicsItem.ItemIsSelectable)
# for easier mouseclick
self._clickArea = cA = QGraphicsRectItem(self)
# self._clickArea.setAcceptHoverEvents(True)
# cA.hoverMoveEvent = self.hoverMoveEvent
cA.mousePressEvent = self.mousePressEvent
cA.mouseMoveEvent = self.mouseMoveEvent
cA.setPen(QPen(Qt.NoPen))
# end def
### SLOTS ###
### ACCESSORS ###
def activeTool(self):
return self._strandItem._activeTool()
# end def
def partItem(self):
return self._virtualHelixItem.partItem()
# end def
def remove(self):
scene = self.scene()
if self._node3:
self._node3.remove()
self._node5.remove()
self._node3 = None
self._node5 = None
self._strand5p = None
scene.removeItem(self._clickArea)
self._clickArea = None
scene.removeItem(self)
# end def
### PUBLIC SUPPORT METHODS ###
def hideIt(self):
self.hide()
if self._node3:
self._node3.hide()
self._node5.hide()
self._node3.remove()
self._node3 = None
# end def
def showIt(self):
self.show()
if self._node3:
self._node3.show()
self._node5.show()
# end def
def refreshXover(self):
strand5p = self._strand5p
node3 = self._node3
if strand5p:
strand3p = strand5p.connection3p()
if strand3p != None and node3:
if node3.virtualHelix():
self.update(self._strand5p)
else:
node3.remove()
self._node3 = None
elif node3:
node3.remove()
self._node3 = None
elif node3:
node3.remove()
self._node3 = None
# end def
def update(self, strand5p, idx=None):
"""
Pass idx to this method in order to install a floating
Xover for the forced xover tool
"""
self._strand5p = strand5p
strand3p = strand5p.connection3p()
vhi5p = self._virtualHelixItem
partItem = vhi5p.partItem()
# This condition is for floating xovers
idx3Prime = idx if idx else strand5p.idx3Prime()
if self._node5 == None:
self._node5 = XoverNode5(vhi5p, self, strand5p, idx3Prime)
if strand3p != None:
if self._node3 == None:
vhi3p = partItem.itemForVirtualHelix(strand3p.virtualHelix())
self._node3 = XoverNode3(vhi3p, self, strand3p, strand3p.idx5Prime())
else:
self._node5.setIdx(idx3Prime)
self._node3.setIdx(strand3p.idx5Prime())
self._node5.setPartnerVirtualHelix(strand5p)
self._updatePath(strand5p)
else:
if self._node3:
self._node3.remove()
self._node3 = None
# end if
# end def
### PRIVATE SUPPORT METHODS ###
def _updatePath(self, strand5p):
"""
Draws a quad curve from the edge of the fromBase
to the top or bottom of the toBase (q5), and
finally to the center of the toBase (toBaseEndpoint).
If floatPos!=None, this is a floatingXover and floatPos is the
destination point (where the mouse is) while toHelix, toIndex
are potentially None and represent the base at floatPos.
"""
group = self.group()
self.tempReparent()
node3 = self._node3
node5 = self._node5
bw = _baseWidth
parent = self.partItem()
vhi5 = self._virtualHelixItem
pt5 = vhi5.mapToItem(parent, *node5.point())
fiveIsTop = node5.isOnTop()
fiveIs5to3 = node5.isDrawn5to3()
vhi3 = node3.virtualHelixItem()
pt3 = vhi3.mapToItem(parent, *node3.point())
threeIsTop = node3.isOnTop()
threeIs5to3 = node3.isDrawn5to3()
sameStrand = (node5.strandType() == node3.strandType()) and vhi3 == vhi5
sameParity = fiveIs5to3 == threeIs5to3
# Enter/exit are relative to the direction that the path travels
# overall.
fiveEnterPt = pt5 + QPointF(0 if fiveIs5to3 else 1, .5)*bw
fiveCenterPt = pt5 + QPointF(.5, .5)*bw
fiveExitPt = pt5 + QPointF(.5, 0 if fiveIsTop else 1)*bw
threeEnterPt = pt3 + QPointF(.5, 0 if threeIsTop else 1)*bw
threeCenterPt = pt3 + QPointF(.5, .5)*bw
threeExitPt = pt3 + QPointF(1 if threeIs5to3 else 0, .5)*bw
c1 = QPointF()
# case 1: same strand
if sameStrand:
dx = abs(threeEnterPt.x() - fiveExitPt.x())
c1.setX(0.5 * (fiveExitPt.x() + threeEnterPt.x()))
if fiveIsTop:
c1.setY(fiveExitPt.y() - _yScale * dx)
else:
c1.setY(fiveExitPt.y() + _yScale * dx)
# case 2: same parity
elif sameParity:
dy = abs(threeEnterPt.y() - fiveExitPt.y())
c1.setX(fiveExitPt.x() + _xScale * dy)
c1.setY(0.5 * (fiveExitPt.y() + threeEnterPt.y()))
# case 3: different parity
else:
if fiveIsTop and fiveIs5to3:
c1.setX(fiveExitPt.x() - _xScale *\
abs(threeEnterPt.y() - fiveExitPt.y()))
else:
c1.setX(fiveExitPt.x() + _xScale *\
abs(threeEnterPt.y() - fiveExitPt.y()))
c1.setY(0.5 * (fiveExitPt.y() + threeEnterPt.y()))
# Construct painter path
painterpath = QPainterPath()
painterpath.moveTo(fiveEnterPt)
painterpath.lineTo(fiveCenterPt)
painterpath.lineTo(fiveExitPt)
# The xover5's non-crossing-over end (3') has a connection
painterpath.quadTo(c1, threeEnterPt)
painterpath.lineTo(threeCenterPt)
painterpath.lineTo(threeExitPt)
tempR = painterpath.boundingRect()
tempR.adjust(-bw/2, 0, bw, 0)
self._clickArea.setRect(tempR)
self.setPath(painterpath)
node3.updatePositionAndAppearance()
node5.updatePositionAndAppearance()
if group:
group.addToGroup(self)
self._updateColor(strand5p)
# end def
def updateLabels(self):
if self._node3:
self._node3._updateLabel()
if self._node5:
self._node5._updateLabel()
def _updateColor(self, strand):
oligo = strand.oligo()
color = self.pen().color() if self.isSelected() else QColor(oligo.color())
# print "update xover color", color.value(), self.isSelected(), self.group(), self.parentItem()
penWidth = styles.PATH_STRAND_STROKE_WIDTH
if oligo.shouldHighlight():
penWidth = styles.PATH_STRAND_HIGHLIGHT_STROKE_WIDTH
color.setAlpha(128)
pen = QPen(color, penWidth)
# pen.setCosmetic(True)
pen.setCapStyle(Qt.FlatCap)
self.setPen(pen)
# end def
### EVENT HANDERS ###
def mousePressEvent(self, event):
"""
Special case for xovers and select tool, for now
"""
if str(self.activeTool()) == "selectTool":
event.setAccepted(False)
sI = self._strandItem
viewroot = sI.viewroot()
currentFilterDict = viewroot.selectionFilterDict()
if sI.strandFilter() in currentFilterDict and self._filterName in currentFilterDict:
event.setAccepted(True)
selectionGroup = viewroot.strandItemSelectionGroup()
mod = Qt.MetaModifier
if not (event.modifiers() & mod):
selectionGroup.clearSelection(False)
selectionGroup.setSelectionLock(selectionGroup)
# self.setSelectedColor(True)
selectionGroup.pendToAdd(self)
selectionGroup.processPendingToAddList()
return selectionGroup.mousePressEvent(event)
else:
event.setAccepted(False)
# end def
def eraseToolMousePress(self):
"""Erase the strand."""
self._strandItem.eraseToolMousePress(None, None)
# end def
def paintToolMousePress(self):
"""Paint the strand."""
self._strandItem.paintToolMousePress(None, None)
# end def
def selectToolMousePress(self):
"""Remove the xover."""
# make sure the selection is clear
sI = self._strandItem
viewroot = sI.viewroot()
selectionGroup = viewroot.strandItemSelectionGroup()
selectionGroup.clearSelection(False)
strand5p = self._strand5p
strand3p = strand5p.connection3p()
self._virtualHelixItem.part().removeXover(strand5p, strand3p)
# end def
def restoreParent(self, pos=None):
"""
Required to restore parenting and positioning in the partItem
"""
# map the position
self.tempReparent(pos)
self.setSelectedColor(False)
self.setSelected(False)
# end def
def tempReparent(self, pos=None):
partItem = self.partItem()
if pos == None:
pos = self.scenePos()
self.setParentItem(partItem)
tempP = partItem.mapFromScene(pos)
self.setPos(tempP)
# end def
def setSelectedColor(self, value):
if value == True:
color = styles.selected_color
else:
oligo = self._strandItem.strand().oligo()
color = QColor(oligo.color())
if oligo.shouldHighlight():
color.setAlpha(128)
pen = self.pen()
pen.setColor(color)
self.setPen(pen)
# end def
def itemChange(self, change, value):
# for selection changes test against QGraphicsItem.ItemSelectedChange
# intercept the change instead of the has changed to enable features.
if change == QGraphicsItem.ItemSelectedChange and self.scene():
activeTool = self.activeTool()
if str(activeTool) == "selectTool":
sI = self._strandItem
viewroot = sI.viewroot()
currentFilterDict = viewroot.selectionFilterDict()
selectionGroup = viewroot.strandItemSelectionGroup()
# only add if the selectionGroup is not locked out
if value == True and (self._filterName in currentFilterDict or not selectionGroup.isNormalSelect()):
if sI.strandFilter() in currentFilterDict:
# print "might add a xoi"
if self.group() != selectionGroup and selectionGroup.isNormalSelect():
# print "adding an xoi"
selectionGroup.pendToAdd(self)
selectionGroup.setSelectionLock(selectionGroup)
self.setSelectedColor(True)
return True
else:
# print "Doh"
return False
# end if
elif value == True:
# print "DOink"
return False
else:
# Deselect
# Check if the strand is being added to the selection group still
if not selectionGroup.isPending(self._strandItem):
selectionGroup.pendToRemove(self)
self.tempReparent()
self.setSelectedColor(False)
return False
else: # don't deselect it, because the strand is selected still
return True
# end else
# end if
elif str(activeTool) == "paintTool":
sI = self._strandItem
viewroot = sI.viewroot()
currentFilterDict = viewroot.selectionFilterDict()
if sI.strandFilter() in currentFilterDict:
if not activeTool.isMacrod():
activeTool.setMacrod()
self.paintToolMousePress()
return False
# end if
return QGraphicsPathItem.itemChange(self, change, value)
# end def
def modelDeselect(self, document):
strand5p = self._strand5p
strand3p = strand5p.connection3p()
test5p = document.isModelStrandSelected(strand5p)
lowVal5p, highVal5p = document.getSelectedStrandValue(strand5p) if test5p else (False, False)
if strand5p.isDrawn5to3():
highVal5p = False
else:
lowVal5p = False
test3p = document.isModelStrandSelected(strand3p)
lowVal3p, highVal3p = document.getSelectedStrandValue(strand3p) if test3p else (False, False)
if strand3p.isDrawn5to3():
lowVal3p = False
else:
highVal3p = False
if not lowVal5p and not highVal5p and test5p:
document.removeStrandFromSelection(strand5p)
elif test5p:
document.addStrandToSelection(strand5p, (lowVal5p, highVal5p))
if not lowVal3p and not highVal3p and test3p:
document.removeStrandFromSelection(strand3p)
elif test3p:
document.addStrandToSelection(strand3p, (lowVal3p, highVal3p))
self.restoreParent()
# end def
def modelSelect(self, document):
strand5p = self._strand5p
strand3p = strand5p.connection3p()
test5p = document.isModelStrandSelected(strand5p)
lowVal5p, highVal5p = document.getSelectedStrandValue(strand5p) if test5p else (False, False)
if strand5p.isDrawn5to3():
highVal5p = True
else:
lowVal5p = True
test3p = document.isModelStrandSelected(strand3p)
lowVal3p, highVal3p = document.getSelectedStrandValue(strand3p) if test3p else (False, False)
if strand3p.isDrawn5to3():
lowVal3p = True
else:
highVal3p = True
self.setSelectedColor(True)
self.setSelected(True)
document.addStrandToSelection(strand5p, (lowVal5p, highVal5p))
document.addStrandToSelection(strand3p, (lowVal3p, highVal3p))
# end def
def paint(self, painter, option, widget):
painter.setPen(self.pen())
painter.setBrush(self.brush())
painter.drawPath(self.path())
# end def
# end class XoverItem
| 34.541667
| 116
| 0.58321
|
1897f354a6ddbe94eadc2c107a432cc8320344e5
| 4,684
|
py
|
Python
|
RcsPySim/setup.py
|
theogruner/SimuRLacra
|
4893514ccdeb10a736c55de9aa7753fd51c5afec
|
[
"DOC",
"Zlib",
"BSD-3-Clause"
] | 52
|
2020-05-02T13:55:09.000Z
|
2022-03-09T14:49:36.000Z
|
RcsPySim/setup.py
|
theogruner/SimuRLacra
|
4893514ccdeb10a736c55de9aa7753fd51c5afec
|
[
"DOC",
"Zlib",
"BSD-3-Clause"
] | 40
|
2020-09-01T15:19:22.000Z
|
2021-11-02T14:51:41.000Z
|
RcsPySim/setup.py
|
theogruner/SimuRLacra
|
4893514ccdeb10a736c55de9aa7753fd51c5afec
|
[
"DOC",
"Zlib",
"BSD-3-Clause"
] | 13
|
2020-07-03T11:39:21.000Z
|
2022-02-20T01:12:42.000Z
|
# Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import platform
import re
import subprocess
import sys
from distutils.version import LooseVersion
from setuptools import Extension, setup
from setuptools.command.build_ext import build_ext
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=""):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(["cmake", "--version"])
except OSError:
raise RuntimeError(
"CMake must be installed to build the following extensions: "
+ ", ".join(e.name for e in self.extensions)
)
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r"version\s*([\d.]+)", out.decode()).group(1))
if cmake_version < "3.1.0":
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
if isinstance(ext, CMakeExtension):
# Build a cmake extension
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ["-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=" + extdir, "-DPYTHON_EXECUTABLE=" + sys.executable]
cfg = "Debug" if self.debug else "Release"
build_args = ["--config", cfg]
build_args = ["--target", ext.name]
if platform.system() == "Windows":
cmake_args += ["-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}".format(cfg.upper(), extdir)]
if sys.maxsize > 2 ** 32:
cmake_args += ["-A", "x64"]
build_args += ["--", "/m"]
else:
cmake_args += ["-DCMAKE_BUILD_TYPE=" + cfg]
build_args += ["--", "-j2"]
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(["cmake", ext.sourcedir] + cmake_args, cwd=self.build_temp)
subprocess.check_call(["cmake", "--build", "."] + build_args, cwd=self.build_temp)
else:
super().build_extension(ext)
setup(
name="RcsPySim",
version="0.7",
description="Robotic simulation environments backed by Rcs",
author="Fabio Muratore & Felix Treede",
author_email="fabio.muratore@famura.net",
license="BSD 3-clause",
# Specify empty py_modules to exclude pkgConfig.py
py_modules=[],
packages=["rcsenv"],
# Source directory
package_dir={"": "src/python/"},
# Include config files
package_data={"rcsenv": ["config/**/*"]},
include_package_data=True,
# Build CMake extensions
ext_modules=[CMakeExtension("_rcsenv")],
cmdclass=dict(build_ext=CMakeBuild),
# Dependencies
install_requires=["numpy"],
)
| 41.821429
| 112
| 0.671435
|
c7f3010b9ffecef55a2f5c45eddc461f82a349a2
| 2,305
|
py
|
Python
|
cicd/database/databaseArtifactChecks.py
|
consag/build-and-deploy-informatica
|
d7caaf5374f4e179cc0fc0a507d3e5da4b00c610
|
[
"MIT"
] | 4
|
2019-08-20T11:50:21.000Z
|
2020-07-26T18:26:00.000Z
|
cicd/database/databaseArtifactChecks.py
|
consag/build-and-deploy-informatica
|
d7caaf5374f4e179cc0fc0a507d3e5da4b00c610
|
[
"MIT"
] | 1
|
2019-07-05T21:08:55.000Z
|
2019-07-05T21:08:55.000Z
|
cicd/database/databaseArtifactChecks.py
|
consag/build-and-deploy-informatica
|
d7caaf5374f4e179cc0fc0a507d3e5da4b00c610
|
[
"MIT"
] | 3
|
2019-11-20T00:29:02.000Z
|
2020-12-09T15:05:03.000Z
|
# MIT License
#
# Copyright (c) 2019 Jac. Beekers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
##
# Database Artifact Checks
# @Since: 22-MAR-2019
# @Author: Jac. Beekers
# @Version: 20190322.0 - JBE - Initial
import supporting.errorcodes as err
import supporting, logging
import cicd.database.dbSettings as settings
from pathlib import Path
logger = logging.getLogger(__name__)
def databaseartifactchecks():
thisproc = "databaseartifactchecks"
supporting.log(logger, logging.DEBUG, thisproc, 'started')
result = err.OK
if not settings.dbdeploylist:
supporting.log(logger, err.IGNORE.level, thisproc, err.NO_DEPLOYLIST.message)
supporting.log(logger, err.IGNORE.level, thisproc, "Assuming Oracle is NOT part of the solution.")
result = err.IGNORE
else:
deploylistFile = Path(settings.dbdeploylist)
if not deploylistFile.is_file():
supporting.log(logger, err.IGNORE.level, thisproc,
"dbdeploylist is >" + settings.dbdeploylist + "<. "
+ err.DEPLOYLIST_NF.message + " - Oracle artifact IGNORED.")
result = err.IGNORE
supporting.log(logger, logging.DEBUG, thisproc, 'completed with >' + str(result.rc) + "<.")
return result
| 40.438596
| 106
| 0.716269
|
a364043d3d9dd81f9edc291c99b5554b3733cba9
| 109,374
|
py
|
Python
|
Lib/test/test_tarfile.py
|
IshayuRay-Github/cpython
|
8136606769661c103c46d142e52ecbbbb88803f6
|
[
"0BSD"
] | 1
|
2021-11-05T12:29:12.000Z
|
2021-11-05T12:29:12.000Z
|
Lib/test/test_tarfile.py
|
IshayuRay-Github/cpython
|
8136606769661c103c46d142e52ecbbbb88803f6
|
[
"0BSD"
] | null | null | null |
Lib/test/test_tarfile.py
|
IshayuRay-Github/cpython
|
8136606769661c103c46d142e52ecbbbb88803f6
|
[
"0BSD"
] | null | null | null |
import sys
import os
import io
from hashlib import sha256
from contextlib import contextmanager
from random import Random
import pathlib
import unittest
import unittest.mock
import tarfile
from test import support
from test.support import os_helper
from test.support import script_helper
# Check for our compression modules.
try:
import gzip
except ImportError:
gzip = None
try:
import zlib
except ImportError:
zlib = None
try:
import bz2
except ImportError:
bz2 = None
try:
import lzma
except ImportError:
lzma = None
def sha256sum(data):
return sha256(data).hexdigest()
TEMPDIR = os.path.abspath(os_helper.TESTFN) + "-tardir"
tarextdir = TEMPDIR + '-extract-test'
tarname = support.findfile("testtar.tar")
gzipname = os.path.join(TEMPDIR, "testtar.tar.gz")
bz2name = os.path.join(TEMPDIR, "testtar.tar.bz2")
xzname = os.path.join(TEMPDIR, "testtar.tar.xz")
tmpname = os.path.join(TEMPDIR, "tmp.tar")
dotlessname = os.path.join(TEMPDIR, "testtar")
sha256_regtype = (
"e09e4bc8b3c9d9177e77256353b36c159f5f040531bbd4b024a8f9b9196c71ce"
)
sha256_sparse = (
"4f05a776071146756345ceee937b33fc5644f5a96b9780d1c7d6a32cdf164d7b"
)
class TarTest:
tarname = tarname
suffix = ''
open = io.FileIO
taropen = tarfile.TarFile.taropen
@property
def mode(self):
return self.prefix + self.suffix
@support.requires_gzip()
class GzipTest:
tarname = gzipname
suffix = 'gz'
open = gzip.GzipFile if gzip else None
taropen = tarfile.TarFile.gzopen
@support.requires_bz2()
class Bz2Test:
tarname = bz2name
suffix = 'bz2'
open = bz2.BZ2File if bz2 else None
taropen = tarfile.TarFile.bz2open
@support.requires_lzma()
class LzmaTest:
tarname = xzname
suffix = 'xz'
open = lzma.LZMAFile if lzma else None
taropen = tarfile.TarFile.xzopen
class ReadTest(TarTest):
prefix = "r:"
def setUp(self):
self.tar = tarfile.open(self.tarname, mode=self.mode,
encoding="iso8859-1")
def tearDown(self):
self.tar.close()
class UstarReadTest(ReadTest, unittest.TestCase):
def test_fileobj_regular_file(self):
tarinfo = self.tar.getmember("ustar/regtype")
with self.tar.extractfile(tarinfo) as fobj:
data = fobj.read()
self.assertEqual(len(data), tarinfo.size,
"regular file extraction failed")
self.assertEqual(sha256sum(data), sha256_regtype,
"regular file extraction failed")
def test_fileobj_readlines(self):
self.tar.extract("ustar/regtype", TEMPDIR)
tarinfo = self.tar.getmember("ustar/regtype")
with open(os.path.join(TEMPDIR, "ustar/regtype"), "r") as fobj1:
lines1 = fobj1.readlines()
with self.tar.extractfile(tarinfo) as fobj:
fobj2 = io.TextIOWrapper(fobj)
lines2 = fobj2.readlines()
self.assertEqual(lines1, lines2,
"fileobj.readlines() failed")
self.assertEqual(len(lines2), 114,
"fileobj.readlines() failed")
self.assertEqual(lines2[83],
"I will gladly admit that Python is not the fastest "
"running scripting language.\n",
"fileobj.readlines() failed")
def test_fileobj_iter(self):
self.tar.extract("ustar/regtype", TEMPDIR)
tarinfo = self.tar.getmember("ustar/regtype")
with open(os.path.join(TEMPDIR, "ustar/regtype"), "r") as fobj1:
lines1 = fobj1.readlines()
with self.tar.extractfile(tarinfo) as fobj2:
lines2 = list(io.TextIOWrapper(fobj2))
self.assertEqual(lines1, lines2,
"fileobj.__iter__() failed")
def test_fileobj_seek(self):
self.tar.extract("ustar/regtype", TEMPDIR)
with open(os.path.join(TEMPDIR, "ustar/regtype"), "rb") as fobj:
data = fobj.read()
tarinfo = self.tar.getmember("ustar/regtype")
with self.tar.extractfile(tarinfo) as fobj:
text = fobj.read()
fobj.seek(0)
self.assertEqual(0, fobj.tell(),
"seek() to file's start failed")
fobj.seek(2048, 0)
self.assertEqual(2048, fobj.tell(),
"seek() to absolute position failed")
fobj.seek(-1024, 1)
self.assertEqual(1024, fobj.tell(),
"seek() to negative relative position failed")
fobj.seek(1024, 1)
self.assertEqual(2048, fobj.tell(),
"seek() to positive relative position failed")
s = fobj.read(10)
self.assertEqual(s, data[2048:2058],
"read() after seek failed")
fobj.seek(0, 2)
self.assertEqual(tarinfo.size, fobj.tell(),
"seek() to file's end failed")
self.assertEqual(fobj.read(), b"",
"read() at file's end did not return empty string")
fobj.seek(-tarinfo.size, 2)
self.assertEqual(0, fobj.tell(),
"relative seek() to file's end failed")
fobj.seek(512)
s1 = fobj.readlines()
fobj.seek(512)
s2 = fobj.readlines()
self.assertEqual(s1, s2,
"readlines() after seek failed")
fobj.seek(0)
self.assertEqual(len(fobj.readline()), fobj.tell(),
"tell() after readline() failed")
fobj.seek(512)
self.assertEqual(len(fobj.readline()) + 512, fobj.tell(),
"tell() after seek() and readline() failed")
fobj.seek(0)
line = fobj.readline()
self.assertEqual(fobj.read(), data[len(line):],
"read() after readline() failed")
def test_fileobj_text(self):
with self.tar.extractfile("ustar/regtype") as fobj:
fobj = io.TextIOWrapper(fobj)
data = fobj.read().encode("iso8859-1")
self.assertEqual(sha256sum(data), sha256_regtype)
try:
fobj.seek(100)
except AttributeError:
# Issue #13815: seek() complained about a missing
# flush() method.
self.fail("seeking failed in text mode")
# Test if symbolic and hard links are resolved by extractfile(). The
# test link members each point to a regular member whose data is
# supposed to be exported.
def _test_fileobj_link(self, lnktype, regtype):
with self.tar.extractfile(lnktype) as a, \
self.tar.extractfile(regtype) as b:
self.assertEqual(a.name, b.name)
def test_fileobj_link1(self):
self._test_fileobj_link("ustar/lnktype", "ustar/regtype")
def test_fileobj_link2(self):
self._test_fileobj_link("./ustar/linktest2/lnktype",
"ustar/linktest1/regtype")
def test_fileobj_symlink1(self):
self._test_fileobj_link("ustar/symtype", "ustar/regtype")
def test_fileobj_symlink2(self):
self._test_fileobj_link("./ustar/linktest2/symtype",
"ustar/linktest1/regtype")
def test_issue14160(self):
self._test_fileobj_link("symtype2", "ustar/regtype")
def test_add_dir_getmember(self):
# bpo-21987
self.add_dir_and_getmember('bar')
self.add_dir_and_getmember('a'*101)
def add_dir_and_getmember(self, name):
with os_helper.temp_cwd():
with tarfile.open(tmpname, 'w') as tar:
try:
os.mkdir(name)
tar.add(name)
finally:
os.rmdir(name)
with tarfile.open(tmpname) as tar:
self.assertEqual(
tar.getmember(name),
tar.getmember(name + '/')
)
class GzipUstarReadTest(GzipTest, UstarReadTest):
pass
class Bz2UstarReadTest(Bz2Test, UstarReadTest):
pass
class LzmaUstarReadTest(LzmaTest, UstarReadTest):
pass
class ListTest(ReadTest, unittest.TestCase):
# Override setUp to use default encoding (UTF-8)
def setUp(self):
self.tar = tarfile.open(self.tarname, mode=self.mode)
def test_list(self):
tio = io.TextIOWrapper(io.BytesIO(), 'ascii', newline='\n')
with support.swap_attr(sys, 'stdout', tio):
self.tar.list(verbose=False)
out = tio.detach().getvalue()
self.assertIn(b'ustar/conttype', out)
self.assertIn(b'ustar/regtype', out)
self.assertIn(b'ustar/lnktype', out)
self.assertIn(b'ustar' + (b'/12345' * 40) + b'67/longname', out)
self.assertIn(b'./ustar/linktest2/symtype', out)
self.assertIn(b'./ustar/linktest2/lnktype', out)
# Make sure it puts trailing slash for directory
self.assertIn(b'ustar/dirtype/', out)
self.assertIn(b'ustar/dirtype-with-size/', out)
# Make sure it is able to print unencodable characters
def conv(b):
s = b.decode(self.tar.encoding, 'surrogateescape')
return s.encode('ascii', 'backslashreplace')
self.assertIn(conv(b'ustar/umlauts-\xc4\xd6\xdc\xe4\xf6\xfc\xdf'), out)
self.assertIn(conv(b'misc/regtype-hpux-signed-chksum-'
b'\xc4\xd6\xdc\xe4\xf6\xfc\xdf'), out)
self.assertIn(conv(b'misc/regtype-old-v7-signed-chksum-'
b'\xc4\xd6\xdc\xe4\xf6\xfc\xdf'), out)
self.assertIn(conv(b'pax/bad-pax-\xe4\xf6\xfc'), out)
self.assertIn(conv(b'pax/hdrcharset-\xe4\xf6\xfc'), out)
# Make sure it prints files separated by one newline without any
# 'ls -l'-like accessories if verbose flag is not being used
# ...
# ustar/conttype
# ustar/regtype
# ...
self.assertRegex(out, br'ustar/conttype ?\r?\n'
br'ustar/regtype ?\r?\n')
# Make sure it does not print the source of link without verbose flag
self.assertNotIn(b'link to', out)
self.assertNotIn(b'->', out)
def test_list_verbose(self):
tio = io.TextIOWrapper(io.BytesIO(), 'ascii', newline='\n')
with support.swap_attr(sys, 'stdout', tio):
self.tar.list(verbose=True)
out = tio.detach().getvalue()
# Make sure it prints files separated by one newline with 'ls -l'-like
# accessories if verbose flag is being used
# ...
# ?rw-r--r-- tarfile/tarfile 7011 2003-01-06 07:19:43 ustar/conttype
# ?rw-r--r-- tarfile/tarfile 7011 2003-01-06 07:19:43 ustar/regtype
# ...
self.assertRegex(out, (br'\?rw-r--r-- tarfile/tarfile\s+7011 '
br'\d{4}-\d\d-\d\d\s+\d\d:\d\d:\d\d '
br'ustar/\w+type ?\r?\n') * 2)
# Make sure it prints the source of link with verbose flag
self.assertIn(b'ustar/symtype -> regtype', out)
self.assertIn(b'./ustar/linktest2/symtype -> ../linktest1/regtype', out)
self.assertIn(b'./ustar/linktest2/lnktype link to '
b'./ustar/linktest1/regtype', out)
self.assertIn(b'gnu' + (b'/123' * 125) + b'/longlink link to gnu' +
(b'/123' * 125) + b'/longname', out)
self.assertIn(b'pax' + (b'/123' * 125) + b'/longlink link to pax' +
(b'/123' * 125) + b'/longname', out)
def test_list_members(self):
tio = io.TextIOWrapper(io.BytesIO(), 'ascii', newline='\n')
def members(tar):
for tarinfo in tar.getmembers():
if 'reg' in tarinfo.name:
yield tarinfo
with support.swap_attr(sys, 'stdout', tio):
self.tar.list(verbose=False, members=members(self.tar))
out = tio.detach().getvalue()
self.assertIn(b'ustar/regtype', out)
self.assertNotIn(b'ustar/conttype', out)
class GzipListTest(GzipTest, ListTest):
pass
class Bz2ListTest(Bz2Test, ListTest):
pass
class LzmaListTest(LzmaTest, ListTest):
pass
class CommonReadTest(ReadTest):
def test_is_tarfile_erroneous(self):
with open(tmpname, "wb"):
pass
# is_tarfile works on filenames
self.assertFalse(tarfile.is_tarfile(tmpname))
# is_tarfile works on path-like objects
self.assertFalse(tarfile.is_tarfile(pathlib.Path(tmpname)))
# is_tarfile works on file objects
with open(tmpname, "rb") as fobj:
self.assertFalse(tarfile.is_tarfile(fobj))
# is_tarfile works on file-like objects
self.assertFalse(tarfile.is_tarfile(io.BytesIO(b"invalid")))
def test_is_tarfile_valid(self):
# is_tarfile works on filenames
self.assertTrue(tarfile.is_tarfile(self.tarname))
# is_tarfile works on path-like objects
self.assertTrue(tarfile.is_tarfile(pathlib.Path(self.tarname)))
# is_tarfile works on file objects
with open(self.tarname, "rb") as fobj:
self.assertTrue(tarfile.is_tarfile(fobj))
# is_tarfile works on file-like objects
with open(self.tarname, "rb") as fobj:
self.assertTrue(tarfile.is_tarfile(io.BytesIO(fobj.read())))
def test_is_tarfile_keeps_position(self):
# Test for issue44289: tarfile.is_tarfile() modifies
# file object's current position
with open(self.tarname, "rb") as fobj:
tarfile.is_tarfile(fobj)
self.assertEqual(fobj.tell(), 0)
with open(self.tarname, "rb") as fobj:
file_like = io.BytesIO(fobj.read())
tarfile.is_tarfile(file_like)
self.assertEqual(file_like.tell(), 0)
def test_empty_tarfile(self):
# Test for issue6123: Allow opening empty archives.
# This test checks if tarfile.open() is able to open an empty tar
# archive successfully. Note that an empty tar archive is not the
# same as an empty file!
with tarfile.open(tmpname, self.mode.replace("r", "w")):
pass
try:
tar = tarfile.open(tmpname, self.mode)
tar.getnames()
except tarfile.ReadError:
self.fail("tarfile.open() failed on empty archive")
else:
self.assertListEqual(tar.getmembers(), [])
finally:
tar.close()
def test_non_existent_tarfile(self):
# Test for issue11513: prevent non-existent gzipped tarfiles raising
# multiple exceptions.
with self.assertRaisesRegex(FileNotFoundError, "xxx"):
tarfile.open("xxx", self.mode)
def test_null_tarfile(self):
# Test for issue6123: Allow opening empty archives.
# This test guarantees that tarfile.open() does not treat an empty
# file as an empty tar archive.
with open(tmpname, "wb"):
pass
self.assertRaises(tarfile.ReadError, tarfile.open, tmpname, self.mode)
self.assertRaises(tarfile.ReadError, tarfile.open, tmpname)
def test_ignore_zeros(self):
# Test TarFile's ignore_zeros option.
# generate 512 pseudorandom bytes
data = Random(0).randbytes(512)
for char in (b'\0', b'a'):
# Test if EOFHeaderError ('\0') and InvalidHeaderError ('a')
# are ignored correctly.
with self.open(tmpname, "w") as fobj:
fobj.write(char * 1024)
tarinfo = tarfile.TarInfo("foo")
tarinfo.size = len(data)
fobj.write(tarinfo.tobuf())
fobj.write(data)
tar = tarfile.open(tmpname, mode="r", ignore_zeros=True)
try:
self.assertListEqual(tar.getnames(), ["foo"],
"ignore_zeros=True should have skipped the %r-blocks" %
char)
finally:
tar.close()
def test_premature_end_of_archive(self):
for size in (512, 600, 1024, 1200):
with tarfile.open(tmpname, "w:") as tar:
t = tarfile.TarInfo("foo")
t.size = 1024
tar.addfile(t, io.BytesIO(b"a" * 1024))
with open(tmpname, "r+b") as fobj:
fobj.truncate(size)
with tarfile.open(tmpname) as tar:
with self.assertRaisesRegex(tarfile.ReadError, "unexpected end of data"):
for t in tar:
pass
with tarfile.open(tmpname) as tar:
t = tar.next()
with self.assertRaisesRegex(tarfile.ReadError, "unexpected end of data"):
tar.extract(t, TEMPDIR)
with self.assertRaisesRegex(tarfile.ReadError, "unexpected end of data"):
tar.extractfile(t).read()
def test_length_zero_header(self):
# bpo-39017 (CVE-2019-20907): reading a zero-length header should fail
# with an exception
with self.assertRaisesRegex(tarfile.ReadError, "file could not be opened successfully"):
with tarfile.open(support.findfile('recursion.tar')) as tar:
pass
class MiscReadTestBase(CommonReadTest):
def requires_name_attribute(self):
pass
def test_no_name_argument(self):
self.requires_name_attribute()
with open(self.tarname, "rb") as fobj:
self.assertIsInstance(fobj.name, str)
with tarfile.open(fileobj=fobj, mode=self.mode) as tar:
self.assertIsInstance(tar.name, str)
self.assertEqual(tar.name, os.path.abspath(fobj.name))
def test_no_name_attribute(self):
with open(self.tarname, "rb") as fobj:
data = fobj.read()
fobj = io.BytesIO(data)
self.assertRaises(AttributeError, getattr, fobj, "name")
tar = tarfile.open(fileobj=fobj, mode=self.mode)
self.assertIsNone(tar.name)
def test_empty_name_attribute(self):
with open(self.tarname, "rb") as fobj:
data = fobj.read()
fobj = io.BytesIO(data)
fobj.name = ""
with tarfile.open(fileobj=fobj, mode=self.mode) as tar:
self.assertIsNone(tar.name)
def test_int_name_attribute(self):
# Issue 21044: tarfile.open() should handle fileobj with an integer
# 'name' attribute.
fd = os.open(self.tarname, os.O_RDONLY)
with open(fd, 'rb') as fobj:
self.assertIsInstance(fobj.name, int)
with tarfile.open(fileobj=fobj, mode=self.mode) as tar:
self.assertIsNone(tar.name)
def test_bytes_name_attribute(self):
self.requires_name_attribute()
tarname = os.fsencode(self.tarname)
with open(tarname, 'rb') as fobj:
self.assertIsInstance(fobj.name, bytes)
with tarfile.open(fileobj=fobj, mode=self.mode) as tar:
self.assertIsInstance(tar.name, bytes)
self.assertEqual(tar.name, os.path.abspath(fobj.name))
def test_pathlike_name(self):
tarname = pathlib.Path(self.tarname)
with tarfile.open(tarname, mode=self.mode) as tar:
self.assertIsInstance(tar.name, str)
self.assertEqual(tar.name, os.path.abspath(os.fspath(tarname)))
with self.taropen(tarname) as tar:
self.assertIsInstance(tar.name, str)
self.assertEqual(tar.name, os.path.abspath(os.fspath(tarname)))
with tarfile.TarFile.open(tarname, mode=self.mode) as tar:
self.assertIsInstance(tar.name, str)
self.assertEqual(tar.name, os.path.abspath(os.fspath(tarname)))
if self.suffix == '':
with tarfile.TarFile(tarname, mode='r') as tar:
self.assertIsInstance(tar.name, str)
self.assertEqual(tar.name, os.path.abspath(os.fspath(tarname)))
def test_illegal_mode_arg(self):
with open(tmpname, 'wb'):
pass
with self.assertRaisesRegex(ValueError, 'mode must be '):
tar = self.taropen(tmpname, 'q')
with self.assertRaisesRegex(ValueError, 'mode must be '):
tar = self.taropen(tmpname, 'rw')
with self.assertRaisesRegex(ValueError, 'mode must be '):
tar = self.taropen(tmpname, '')
def test_fileobj_with_offset(self):
# Skip the first member and store values from the second member
# of the testtar.
tar = tarfile.open(self.tarname, mode=self.mode)
try:
tar.next()
t = tar.next()
name = t.name
offset = t.offset
with tar.extractfile(t) as f:
data = f.read()
finally:
tar.close()
# Open the testtar and seek to the offset of the second member.
with self.open(self.tarname) as fobj:
fobj.seek(offset)
# Test if the tarfile starts with the second member.
with tar.open(self.tarname, mode="r:", fileobj=fobj) as tar:
t = tar.next()
self.assertEqual(t.name, name)
# Read to the end of fileobj and test if seeking back to the
# beginning works.
tar.getmembers()
self.assertEqual(tar.extractfile(t).read(), data,
"seek back did not work")
def test_fail_comp(self):
# For Gzip and Bz2 Tests: fail with a ReadError on an uncompressed file.
self.assertRaises(tarfile.ReadError, tarfile.open, tarname, self.mode)
with open(tarname, "rb") as fobj:
self.assertRaises(tarfile.ReadError, tarfile.open,
fileobj=fobj, mode=self.mode)
def test_v7_dirtype(self):
# Test old style dirtype member (bug #1336623):
# Old V7 tars create directory members using an AREGTYPE
# header with a "/" appended to the filename field.
tarinfo = self.tar.getmember("misc/dirtype-old-v7")
self.assertEqual(tarinfo.type, tarfile.DIRTYPE,
"v7 dirtype failed")
def test_xstar_type(self):
# The xstar format stores extra atime and ctime fields inside the
# space reserved for the prefix field. The prefix field must be
# ignored in this case, otherwise it will mess up the name.
try:
self.tar.getmember("misc/regtype-xstar")
except KeyError:
self.fail("failed to find misc/regtype-xstar (mangled prefix?)")
def test_check_members(self):
for tarinfo in self.tar:
self.assertEqual(int(tarinfo.mtime), 0o7606136617,
"wrong mtime for %s" % tarinfo.name)
if not tarinfo.name.startswith("ustar/"):
continue
self.assertEqual(tarinfo.uname, "tarfile",
"wrong uname for %s" % tarinfo.name)
def test_find_members(self):
self.assertEqual(self.tar.getmembers()[-1].name, "misc/eof",
"could not find all members")
@unittest.skipUnless(hasattr(os, "link"),
"Missing hardlink implementation")
@os_helper.skip_unless_symlink
def test_extract_hardlink(self):
# Test hardlink extraction (e.g. bug #857297).
with tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") as tar:
tar.extract("ustar/regtype", TEMPDIR)
self.addCleanup(os_helper.unlink, os.path.join(TEMPDIR, "ustar/regtype"))
tar.extract("ustar/lnktype", TEMPDIR)
self.addCleanup(os_helper.unlink, os.path.join(TEMPDIR, "ustar/lnktype"))
with open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb") as f:
data = f.read()
self.assertEqual(sha256sum(data), sha256_regtype)
tar.extract("ustar/symtype", TEMPDIR)
self.addCleanup(os_helper.unlink, os.path.join(TEMPDIR, "ustar/symtype"))
with open(os.path.join(TEMPDIR, "ustar/symtype"), "rb") as f:
data = f.read()
self.assertEqual(sha256sum(data), sha256_regtype)
def test_extractall(self):
# Test if extractall() correctly restores directory permissions
# and times (see issue1735).
tar = tarfile.open(tarname, encoding="iso8859-1")
DIR = os.path.join(TEMPDIR, "extractall")
os.mkdir(DIR)
try:
directories = [t for t in tar if t.isdir()]
tar.extractall(DIR, directories)
for tarinfo in directories:
path = os.path.join(DIR, tarinfo.name)
if sys.platform != "win32":
# Win32 has no support for fine grained permissions.
self.assertEqual(tarinfo.mode & 0o777,
os.stat(path).st_mode & 0o777)
def format_mtime(mtime):
if isinstance(mtime, float):
return "{} ({})".format(mtime, mtime.hex())
else:
return "{!r} (int)".format(mtime)
file_mtime = os.path.getmtime(path)
errmsg = "tar mtime {0} != file time {1} of path {2!a}".format(
format_mtime(tarinfo.mtime),
format_mtime(file_mtime),
path)
self.assertEqual(tarinfo.mtime, file_mtime, errmsg)
finally:
tar.close()
os_helper.rmtree(DIR)
def test_extract_directory(self):
dirtype = "ustar/dirtype"
DIR = os.path.join(TEMPDIR, "extractdir")
os.mkdir(DIR)
try:
with tarfile.open(tarname, encoding="iso8859-1") as tar:
tarinfo = tar.getmember(dirtype)
tar.extract(tarinfo, path=DIR)
extracted = os.path.join(DIR, dirtype)
self.assertEqual(os.path.getmtime(extracted), tarinfo.mtime)
if sys.platform != "win32":
self.assertEqual(os.stat(extracted).st_mode & 0o777, 0o755)
finally:
os_helper.rmtree(DIR)
def test_extractall_pathlike_name(self):
DIR = pathlib.Path(TEMPDIR) / "extractall"
with os_helper.temp_dir(DIR), \
tarfile.open(tarname, encoding="iso8859-1") as tar:
directories = [t for t in tar if t.isdir()]
tar.extractall(DIR, directories)
for tarinfo in directories:
path = DIR / tarinfo.name
self.assertEqual(os.path.getmtime(path), tarinfo.mtime)
def test_extract_pathlike_name(self):
dirtype = "ustar/dirtype"
DIR = pathlib.Path(TEMPDIR) / "extractall"
with os_helper.temp_dir(DIR), \
tarfile.open(tarname, encoding="iso8859-1") as tar:
tarinfo = tar.getmember(dirtype)
tar.extract(tarinfo, path=DIR)
extracted = DIR / dirtype
self.assertEqual(os.path.getmtime(extracted), tarinfo.mtime)
def test_init_close_fobj(self):
# Issue #7341: Close the internal file object in the TarFile
# constructor in case of an error. For the test we rely on
# the fact that opening an empty file raises a ReadError.
empty = os.path.join(TEMPDIR, "empty")
with open(empty, "wb") as fobj:
fobj.write(b"")
try:
tar = object.__new__(tarfile.TarFile)
try:
tar.__init__(empty)
except tarfile.ReadError:
self.assertTrue(tar.fileobj.closed)
else:
self.fail("ReadError not raised")
finally:
os_helper.unlink(empty)
def test_parallel_iteration(self):
# Issue #16601: Restarting iteration over tarfile continued
# from where it left off.
with tarfile.open(self.tarname) as tar:
for m1, m2 in zip(tar, tar):
self.assertEqual(m1.offset, m2.offset)
self.assertEqual(m1.get_info(), m2.get_info())
@unittest.skipIf(zlib is None, "requires zlib")
def test_zlib_error_does_not_leak(self):
# bpo-39039: tarfile.open allowed zlib exceptions to bubble up when
# parsing certain types of invalid data
with unittest.mock.patch("tarfile.TarInfo.fromtarfile") as mock:
mock.side_effect = zlib.error
with self.assertRaises(tarfile.ReadError):
tarfile.open(self.tarname)
class MiscReadTest(MiscReadTestBase, unittest.TestCase):
test_fail_comp = None
class GzipMiscReadTest(GzipTest, MiscReadTestBase, unittest.TestCase):
pass
class Bz2MiscReadTest(Bz2Test, MiscReadTestBase, unittest.TestCase):
def requires_name_attribute(self):
self.skipTest("BZ2File have no name attribute")
class LzmaMiscReadTest(LzmaTest, MiscReadTestBase, unittest.TestCase):
def requires_name_attribute(self):
self.skipTest("LZMAFile have no name attribute")
class StreamReadTest(CommonReadTest, unittest.TestCase):
prefix="r|"
def test_read_through(self):
# Issue #11224: A poorly designed _FileInFile.read() method
# caused seeking errors with stream tar files.
for tarinfo in self.tar:
if not tarinfo.isreg():
continue
with self.tar.extractfile(tarinfo) as fobj:
while True:
try:
buf = fobj.read(512)
except tarfile.StreamError:
self.fail("simple read-through using "
"TarFile.extractfile() failed")
if not buf:
break
def test_fileobj_regular_file(self):
tarinfo = self.tar.next() # get "regtype" (can't use getmember)
with self.tar.extractfile(tarinfo) as fobj:
data = fobj.read()
self.assertEqual(len(data), tarinfo.size,
"regular file extraction failed")
self.assertEqual(sha256sum(data), sha256_regtype,
"regular file extraction failed")
def test_provoke_stream_error(self):
tarinfos = self.tar.getmembers()
with self.tar.extractfile(tarinfos[0]) as f: # read the first member
self.assertRaises(tarfile.StreamError, f.read)
def test_compare_members(self):
tar1 = tarfile.open(tarname, encoding="iso8859-1")
try:
tar2 = self.tar
while True:
t1 = tar1.next()
t2 = tar2.next()
if t1 is None:
break
self.assertIsNotNone(t2, "stream.next() failed.")
if t2.islnk() or t2.issym():
with self.assertRaises(tarfile.StreamError):
tar2.extractfile(t2)
continue
v1 = tar1.extractfile(t1)
v2 = tar2.extractfile(t2)
if v1 is None:
continue
self.assertIsNotNone(v2, "stream.extractfile() failed")
self.assertEqual(v1.read(), v2.read(),
"stream extraction failed")
finally:
tar1.close()
class GzipStreamReadTest(GzipTest, StreamReadTest):
pass
class Bz2StreamReadTest(Bz2Test, StreamReadTest):
pass
class LzmaStreamReadTest(LzmaTest, StreamReadTest):
pass
class DetectReadTest(TarTest, unittest.TestCase):
def _testfunc_file(self, name, mode):
try:
tar = tarfile.open(name, mode)
except tarfile.ReadError as e:
self.fail()
else:
tar.close()
def _testfunc_fileobj(self, name, mode):
try:
with open(name, "rb") as f:
tar = tarfile.open(name, mode, fileobj=f)
except tarfile.ReadError as e:
self.fail()
else:
tar.close()
def _test_modes(self, testfunc):
if self.suffix:
with self.assertRaises(tarfile.ReadError):
tarfile.open(tarname, mode="r:" + self.suffix)
with self.assertRaises(tarfile.ReadError):
tarfile.open(tarname, mode="r|" + self.suffix)
with self.assertRaises(tarfile.ReadError):
tarfile.open(self.tarname, mode="r:")
with self.assertRaises(tarfile.ReadError):
tarfile.open(self.tarname, mode="r|")
testfunc(self.tarname, "r")
testfunc(self.tarname, "r:" + self.suffix)
testfunc(self.tarname, "r:*")
testfunc(self.tarname, "r|" + self.suffix)
testfunc(self.tarname, "r|*")
def test_detect_file(self):
self._test_modes(self._testfunc_file)
def test_detect_fileobj(self):
self._test_modes(self._testfunc_fileobj)
class GzipDetectReadTest(GzipTest, DetectReadTest):
pass
class Bz2DetectReadTest(Bz2Test, DetectReadTest):
def test_detect_stream_bz2(self):
# Originally, tarfile's stream detection looked for the string
# "BZh91" at the start of the file. This is incorrect because
# the '9' represents the blocksize (900,000 bytes). If the file was
# compressed using another blocksize autodetection fails.
with open(tarname, "rb") as fobj:
data = fobj.read()
# Compress with blocksize 100,000 bytes, the file starts with "BZh11".
with bz2.BZ2File(tmpname, "wb", compresslevel=1) as fobj:
fobj.write(data)
self._testfunc_file(tmpname, "r|*")
class LzmaDetectReadTest(LzmaTest, DetectReadTest):
pass
class MemberReadTest(ReadTest, unittest.TestCase):
def _test_member(self, tarinfo, chksum=None, **kwargs):
if chksum is not None:
with self.tar.extractfile(tarinfo) as f:
self.assertEqual(sha256sum(f.read()), chksum,
"wrong sha256sum for %s" % tarinfo.name)
kwargs["mtime"] = 0o7606136617
kwargs["uid"] = 1000
kwargs["gid"] = 100
if "old-v7" not in tarinfo.name:
# V7 tar can't handle alphabetic owners.
kwargs["uname"] = "tarfile"
kwargs["gname"] = "tarfile"
for k, v in kwargs.items():
self.assertEqual(getattr(tarinfo, k), v,
"wrong value in %s field of %s" % (k, tarinfo.name))
def test_find_regtype(self):
tarinfo = self.tar.getmember("ustar/regtype")
self._test_member(tarinfo, size=7011, chksum=sha256_regtype)
def test_find_conttype(self):
tarinfo = self.tar.getmember("ustar/conttype")
self._test_member(tarinfo, size=7011, chksum=sha256_regtype)
def test_find_dirtype(self):
tarinfo = self.tar.getmember("ustar/dirtype")
self._test_member(tarinfo, size=0)
def test_find_dirtype_with_size(self):
tarinfo = self.tar.getmember("ustar/dirtype-with-size")
self._test_member(tarinfo, size=255)
def test_find_lnktype(self):
tarinfo = self.tar.getmember("ustar/lnktype")
self._test_member(tarinfo, size=0, linkname="ustar/regtype")
def test_find_symtype(self):
tarinfo = self.tar.getmember("ustar/symtype")
self._test_member(tarinfo, size=0, linkname="regtype")
def test_find_blktype(self):
tarinfo = self.tar.getmember("ustar/blktype")
self._test_member(tarinfo, size=0, devmajor=3, devminor=0)
def test_find_chrtype(self):
tarinfo = self.tar.getmember("ustar/chrtype")
self._test_member(tarinfo, size=0, devmajor=1, devminor=3)
def test_find_fifotype(self):
tarinfo = self.tar.getmember("ustar/fifotype")
self._test_member(tarinfo, size=0)
def test_find_sparse(self):
tarinfo = self.tar.getmember("ustar/sparse")
self._test_member(tarinfo, size=86016, chksum=sha256_sparse)
def test_find_gnusparse(self):
tarinfo = self.tar.getmember("gnu/sparse")
self._test_member(tarinfo, size=86016, chksum=sha256_sparse)
def test_find_gnusparse_00(self):
tarinfo = self.tar.getmember("gnu/sparse-0.0")
self._test_member(tarinfo, size=86016, chksum=sha256_sparse)
def test_find_gnusparse_01(self):
tarinfo = self.tar.getmember("gnu/sparse-0.1")
self._test_member(tarinfo, size=86016, chksum=sha256_sparse)
def test_find_gnusparse_10(self):
tarinfo = self.tar.getmember("gnu/sparse-1.0")
self._test_member(tarinfo, size=86016, chksum=sha256_sparse)
def test_find_umlauts(self):
tarinfo = self.tar.getmember("ustar/umlauts-"
"\xc4\xd6\xdc\xe4\xf6\xfc\xdf")
self._test_member(tarinfo, size=7011, chksum=sha256_regtype)
def test_find_ustar_longname(self):
name = "ustar/" + "12345/" * 39 + "1234567/longname"
self.assertIn(name, self.tar.getnames())
def test_find_regtype_oldv7(self):
tarinfo = self.tar.getmember("misc/regtype-old-v7")
self._test_member(tarinfo, size=7011, chksum=sha256_regtype)
def test_find_pax_umlauts(self):
self.tar.close()
self.tar = tarfile.open(self.tarname, mode=self.mode,
encoding="iso8859-1")
tarinfo = self.tar.getmember("pax/umlauts-"
"\xc4\xd6\xdc\xe4\xf6\xfc\xdf")
self._test_member(tarinfo, size=7011, chksum=sha256_regtype)
class LongnameTest:
def test_read_longname(self):
# Test reading of longname (bug #1471427).
longname = self.subdir + "/" + "123/" * 125 + "longname"
try:
tarinfo = self.tar.getmember(longname)
except KeyError:
self.fail("longname not found")
self.assertNotEqual(tarinfo.type, tarfile.DIRTYPE,
"read longname as dirtype")
def test_read_longlink(self):
longname = self.subdir + "/" + "123/" * 125 + "longname"
longlink = self.subdir + "/" + "123/" * 125 + "longlink"
try:
tarinfo = self.tar.getmember(longlink)
except KeyError:
self.fail("longlink not found")
self.assertEqual(tarinfo.linkname, longname, "linkname wrong")
def test_truncated_longname(self):
longname = self.subdir + "/" + "123/" * 125 + "longname"
tarinfo = self.tar.getmember(longname)
offset = tarinfo.offset
self.tar.fileobj.seek(offset)
fobj = io.BytesIO(self.tar.fileobj.read(3 * 512))
with self.assertRaises(tarfile.ReadError):
tarfile.open(name="foo.tar", fileobj=fobj)
def test_header_offset(self):
# Test if the start offset of the TarInfo object includes
# the preceding extended header.
longname = self.subdir + "/" + "123/" * 125 + "longname"
offset = self.tar.getmember(longname).offset
with open(tarname, "rb") as fobj:
fobj.seek(offset)
tarinfo = tarfile.TarInfo.frombuf(fobj.read(512),
"iso8859-1", "strict")
self.assertEqual(tarinfo.type, self.longnametype)
class GNUReadTest(LongnameTest, ReadTest, unittest.TestCase):
subdir = "gnu"
longnametype = tarfile.GNUTYPE_LONGNAME
# Since 3.2 tarfile is supposed to accurately restore sparse members and
# produce files with holes. This is what we actually want to test here.
# Unfortunately, not all platforms/filesystems support sparse files, and
# even on platforms that do it is non-trivial to make reliable assertions
# about holes in files. Therefore, we first do one basic test which works
# an all platforms, and after that a test that will work only on
# platforms/filesystems that prove to support sparse files.
def _test_sparse_file(self, name):
self.tar.extract(name, TEMPDIR)
filename = os.path.join(TEMPDIR, name)
with open(filename, "rb") as fobj:
data = fobj.read()
self.assertEqual(sha256sum(data), sha256_sparse,
"wrong sha256sum for %s" % name)
if self._fs_supports_holes():
s = os.stat(filename)
self.assertLess(s.st_blocks * 512, s.st_size)
def test_sparse_file_old(self):
self._test_sparse_file("gnu/sparse")
def test_sparse_file_00(self):
self._test_sparse_file("gnu/sparse-0.0")
def test_sparse_file_01(self):
self._test_sparse_file("gnu/sparse-0.1")
def test_sparse_file_10(self):
self._test_sparse_file("gnu/sparse-1.0")
@staticmethod
def _fs_supports_holes():
# Return True if the platform knows the st_blocks stat attribute and
# uses st_blocks units of 512 bytes, and if the filesystem is able to
# store holes of 4 KiB in files.
#
# The function returns False if page size is larger than 4 KiB.
# For example, ppc64 uses pages of 64 KiB.
if sys.platform.startswith("linux"):
# Linux evidentially has 512 byte st_blocks units.
name = os.path.join(TEMPDIR, "sparse-test")
with open(name, "wb") as fobj:
# Seek to "punch a hole" of 4 KiB
fobj.seek(4096)
fobj.write(b'x' * 4096)
fobj.truncate()
s = os.stat(name)
os_helper.unlink(name)
return (s.st_blocks * 512 < s.st_size)
else:
return False
class PaxReadTest(LongnameTest, ReadTest, unittest.TestCase):
subdir = "pax"
longnametype = tarfile.XHDTYPE
def test_pax_global_headers(self):
tar = tarfile.open(tarname, encoding="iso8859-1")
try:
tarinfo = tar.getmember("pax/regtype1")
self.assertEqual(tarinfo.uname, "foo")
self.assertEqual(tarinfo.gname, "bar")
self.assertEqual(tarinfo.pax_headers.get("VENDOR.umlauts"),
"\xc4\xd6\xdc\xe4\xf6\xfc\xdf")
tarinfo = tar.getmember("pax/regtype2")
self.assertEqual(tarinfo.uname, "")
self.assertEqual(tarinfo.gname, "bar")
self.assertEqual(tarinfo.pax_headers.get("VENDOR.umlauts"),
"\xc4\xd6\xdc\xe4\xf6\xfc\xdf")
tarinfo = tar.getmember("pax/regtype3")
self.assertEqual(tarinfo.uname, "tarfile")
self.assertEqual(tarinfo.gname, "tarfile")
self.assertEqual(tarinfo.pax_headers.get("VENDOR.umlauts"),
"\xc4\xd6\xdc\xe4\xf6\xfc\xdf")
finally:
tar.close()
def test_pax_number_fields(self):
# All following number fields are read from the pax header.
tar = tarfile.open(tarname, encoding="iso8859-1")
try:
tarinfo = tar.getmember("pax/regtype4")
self.assertEqual(tarinfo.size, 7011)
self.assertEqual(tarinfo.uid, 123)
self.assertEqual(tarinfo.gid, 123)
self.assertEqual(tarinfo.mtime, 1041808783.0)
self.assertEqual(type(tarinfo.mtime), float)
self.assertEqual(float(tarinfo.pax_headers["atime"]), 1041808783.0)
self.assertEqual(float(tarinfo.pax_headers["ctime"]), 1041808783.0)
finally:
tar.close()
class WriteTestBase(TarTest):
# Put all write tests in here that are supposed to be tested
# in all possible mode combinations.
def test_fileobj_no_close(self):
fobj = io.BytesIO()
with tarfile.open(fileobj=fobj, mode=self.mode) as tar:
tar.addfile(tarfile.TarInfo("foo"))
self.assertFalse(fobj.closed, "external fileobjs must never closed")
# Issue #20238: Incomplete gzip output with mode="w:gz"
data = fobj.getvalue()
del tar
support.gc_collect()
self.assertFalse(fobj.closed)
self.assertEqual(data, fobj.getvalue())
def test_eof_marker(self):
# Make sure an end of archive marker is written (two zero blocks).
# tarfile insists on aligning archives to a 20 * 512 byte recordsize.
# So, we create an archive that has exactly 10240 bytes without the
# marker, and has 20480 bytes once the marker is written.
with tarfile.open(tmpname, self.mode) as tar:
t = tarfile.TarInfo("foo")
t.size = tarfile.RECORDSIZE - tarfile.BLOCKSIZE
tar.addfile(t, io.BytesIO(b"a" * t.size))
with self.open(tmpname, "rb") as fobj:
self.assertEqual(len(fobj.read()), tarfile.RECORDSIZE * 2)
class WriteTest(WriteTestBase, unittest.TestCase):
prefix = "w:"
def test_100_char_name(self):
# The name field in a tar header stores strings of at most 100 chars.
# If a string is shorter than 100 chars it has to be padded with '\0',
# which implies that a string of exactly 100 chars is stored without
# a trailing '\0'.
name = "0123456789" * 10
tar = tarfile.open(tmpname, self.mode)
try:
t = tarfile.TarInfo(name)
tar.addfile(t)
finally:
tar.close()
tar = tarfile.open(tmpname)
try:
self.assertEqual(tar.getnames()[0], name,
"failed to store 100 char filename")
finally:
tar.close()
def test_tar_size(self):
# Test for bug #1013882.
tar = tarfile.open(tmpname, self.mode)
try:
path = os.path.join(TEMPDIR, "file")
with open(path, "wb") as fobj:
fobj.write(b"aaa")
tar.add(path)
finally:
tar.close()
self.assertGreater(os.path.getsize(tmpname), 0,
"tarfile is empty")
# The test_*_size tests test for bug #1167128.
def test_file_size(self):
tar = tarfile.open(tmpname, self.mode)
try:
path = os.path.join(TEMPDIR, "file")
with open(path, "wb"):
pass
tarinfo = tar.gettarinfo(path)
self.assertEqual(tarinfo.size, 0)
with open(path, "wb") as fobj:
fobj.write(b"aaa")
tarinfo = tar.gettarinfo(path)
self.assertEqual(tarinfo.size, 3)
finally:
tar.close()
def test_directory_size(self):
path = os.path.join(TEMPDIR, "directory")
os.mkdir(path)
try:
tar = tarfile.open(tmpname, self.mode)
try:
tarinfo = tar.gettarinfo(path)
self.assertEqual(tarinfo.size, 0)
finally:
tar.close()
finally:
os_helper.rmdir(path)
# mock the following:
# os.listdir: so we know that files are in the wrong order
def test_ordered_recursion(self):
path = os.path.join(TEMPDIR, "directory")
os.mkdir(path)
open(os.path.join(path, "1"), "a").close()
open(os.path.join(path, "2"), "a").close()
try:
tar = tarfile.open(tmpname, self.mode)
try:
with unittest.mock.patch('os.listdir') as mock_listdir:
mock_listdir.return_value = ["2", "1"]
tar.add(path)
paths = []
for m in tar.getmembers():
paths.append(os.path.split(m.name)[-1])
self.assertEqual(paths, ["directory", "1", "2"]);
finally:
tar.close()
finally:
os_helper.unlink(os.path.join(path, "1"))
os_helper.unlink(os.path.join(path, "2"))
os_helper.rmdir(path)
def test_gettarinfo_pathlike_name(self):
with tarfile.open(tmpname, self.mode) as tar:
path = pathlib.Path(TEMPDIR) / "file"
with open(path, "wb") as fobj:
fobj.write(b"aaa")
tarinfo = tar.gettarinfo(path)
tarinfo2 = tar.gettarinfo(os.fspath(path))
self.assertIsInstance(tarinfo.name, str)
self.assertEqual(tarinfo.name, tarinfo2.name)
self.assertEqual(tarinfo.size, 3)
@unittest.skipUnless(hasattr(os, "link"),
"Missing hardlink implementation")
def test_link_size(self):
link = os.path.join(TEMPDIR, "link")
target = os.path.join(TEMPDIR, "link_target")
with open(target, "wb") as fobj:
fobj.write(b"aaa")
try:
os.link(target, link)
except PermissionError as e:
self.skipTest('os.link(): %s' % e)
try:
tar = tarfile.open(tmpname, self.mode)
try:
# Record the link target in the inodes list.
tar.gettarinfo(target)
tarinfo = tar.gettarinfo(link)
self.assertEqual(tarinfo.size, 0)
finally:
tar.close()
finally:
os_helper.unlink(target)
os_helper.unlink(link)
@os_helper.skip_unless_symlink
def test_symlink_size(self):
path = os.path.join(TEMPDIR, "symlink")
os.symlink("link_target", path)
try:
tar = tarfile.open(tmpname, self.mode)
try:
tarinfo = tar.gettarinfo(path)
self.assertEqual(tarinfo.size, 0)
finally:
tar.close()
finally:
os_helper.unlink(path)
def test_add_self(self):
# Test for #1257255.
dstname = os.path.abspath(tmpname)
tar = tarfile.open(tmpname, self.mode)
try:
self.assertEqual(tar.name, dstname,
"archive name must be absolute")
tar.add(dstname)
self.assertEqual(tar.getnames(), [],
"added the archive to itself")
with os_helper.change_cwd(TEMPDIR):
tar.add(dstname)
self.assertEqual(tar.getnames(), [],
"added the archive to itself")
finally:
tar.close()
def test_filter(self):
tempdir = os.path.join(TEMPDIR, "filter")
os.mkdir(tempdir)
try:
for name in ("foo", "bar", "baz"):
name = os.path.join(tempdir, name)
os_helper.create_empty_file(name)
def filter(tarinfo):
if os.path.basename(tarinfo.name) == "bar":
return
tarinfo.uid = 123
tarinfo.uname = "foo"
return tarinfo
tar = tarfile.open(tmpname, self.mode, encoding="iso8859-1")
try:
tar.add(tempdir, arcname="empty_dir", filter=filter)
finally:
tar.close()
# Verify that filter is a keyword-only argument
with self.assertRaises(TypeError):
tar.add(tempdir, "empty_dir", True, None, filter)
tar = tarfile.open(tmpname, "r")
try:
for tarinfo in tar:
self.assertEqual(tarinfo.uid, 123)
self.assertEqual(tarinfo.uname, "foo")
self.assertEqual(len(tar.getmembers()), 3)
finally:
tar.close()
finally:
os_helper.rmtree(tempdir)
# Guarantee that stored pathnames are not modified. Don't
# remove ./ or ../ or double slashes. Still make absolute
# pathnames relative.
# For details see bug #6054.
def _test_pathname(self, path, cmp_path=None, dir=False):
# Create a tarfile with an empty member named path
# and compare the stored name with the original.
foo = os.path.join(TEMPDIR, "foo")
if not dir:
os_helper.create_empty_file(foo)
else:
os.mkdir(foo)
tar = tarfile.open(tmpname, self.mode)
try:
tar.add(foo, arcname=path)
finally:
tar.close()
tar = tarfile.open(tmpname, "r")
try:
t = tar.next()
finally:
tar.close()
if not dir:
os_helper.unlink(foo)
else:
os_helper.rmdir(foo)
self.assertEqual(t.name, cmp_path or path.replace(os.sep, "/"))
@os_helper.skip_unless_symlink
def test_extractall_symlinks(self):
# Test if extractall works properly when tarfile contains symlinks
tempdir = os.path.join(TEMPDIR, "testsymlinks")
temparchive = os.path.join(TEMPDIR, "testsymlinks.tar")
os.mkdir(tempdir)
try:
source_file = os.path.join(tempdir,'source')
target_file = os.path.join(tempdir,'symlink')
with open(source_file,'w') as f:
f.write('something\n')
os.symlink(source_file, target_file)
with tarfile.open(temparchive, 'w') as tar:
tar.add(source_file, arcname="source")
tar.add(target_file, arcname="symlink")
# Let's extract it to the location which contains the symlink
with tarfile.open(temparchive, errorlevel=2) as tar:
# this should not raise OSError: [Errno 17] File exists
try:
tar.extractall(path=tempdir)
except OSError:
self.fail("extractall failed with symlinked files")
finally:
os_helper.unlink(temparchive)
os_helper.rmtree(tempdir)
def test_pathnames(self):
self._test_pathname("foo")
self._test_pathname(os.path.join("foo", ".", "bar"))
self._test_pathname(os.path.join("foo", "..", "bar"))
self._test_pathname(os.path.join(".", "foo"))
self._test_pathname(os.path.join(".", "foo", "."))
self._test_pathname(os.path.join(".", "foo", ".", "bar"))
self._test_pathname(os.path.join(".", "foo", "..", "bar"))
self._test_pathname(os.path.join(".", "foo", "..", "bar"))
self._test_pathname(os.path.join("..", "foo"))
self._test_pathname(os.path.join("..", "foo", ".."))
self._test_pathname(os.path.join("..", "foo", ".", "bar"))
self._test_pathname(os.path.join("..", "foo", "..", "bar"))
self._test_pathname("foo" + os.sep + os.sep + "bar")
self._test_pathname("foo" + os.sep + os.sep, "foo", dir=True)
def test_abs_pathnames(self):
if sys.platform == "win32":
self._test_pathname("C:\\foo", "foo")
else:
self._test_pathname("/foo", "foo")
self._test_pathname("///foo", "foo")
def test_cwd(self):
# Test adding the current working directory.
with os_helper.change_cwd(TEMPDIR):
tar = tarfile.open(tmpname, self.mode)
try:
tar.add(".")
finally:
tar.close()
tar = tarfile.open(tmpname, "r")
try:
for t in tar:
if t.name != ".":
self.assertTrue(t.name.startswith("./"), t.name)
finally:
tar.close()
def test_open_nonwritable_fileobj(self):
for exctype in OSError, EOFError, RuntimeError:
class BadFile(io.BytesIO):
first = True
def write(self, data):
if self.first:
self.first = False
raise exctype
f = BadFile()
with self.assertRaises(exctype):
tar = tarfile.open(tmpname, self.mode, fileobj=f,
format=tarfile.PAX_FORMAT,
pax_headers={'non': 'empty'})
self.assertFalse(f.closed)
class GzipWriteTest(GzipTest, WriteTest):
pass
class Bz2WriteTest(Bz2Test, WriteTest):
pass
class LzmaWriteTest(LzmaTest, WriteTest):
pass
class StreamWriteTest(WriteTestBase, unittest.TestCase):
prefix = "w|"
decompressor = None
def test_stream_padding(self):
# Test for bug #1543303.
tar = tarfile.open(tmpname, self.mode)
tar.close()
if self.decompressor:
dec = self.decompressor()
with open(tmpname, "rb") as fobj:
data = fobj.read()
data = dec.decompress(data)
self.assertFalse(dec.unused_data, "found trailing data")
else:
with self.open(tmpname) as fobj:
data = fobj.read()
self.assertEqual(data.count(b"\0"), tarfile.RECORDSIZE,
"incorrect zero padding")
@unittest.skipUnless(sys.platform != "win32" and hasattr(os, "umask"),
"Missing umask implementation")
@unittest.skipIf(
support.is_emscripten or support.is_wasi,
"Emscripten's/WASI's umask is a stub."
)
def test_file_mode(self):
# Test for issue #8464: Create files with correct
# permissions.
if os.path.exists(tmpname):
os_helper.unlink(tmpname)
original_umask = os.umask(0o022)
try:
tar = tarfile.open(tmpname, self.mode)
tar.close()
mode = os.stat(tmpname).st_mode & 0o777
self.assertEqual(mode, 0o644, "wrong file permissions")
finally:
os.umask(original_umask)
class GzipStreamWriteTest(GzipTest, StreamWriteTest):
def test_source_directory_not_leaked(self):
"""
Ensure the source directory is not included in the tar header
per bpo-41316.
"""
tarfile.open(tmpname, self.mode).close()
payload = pathlib.Path(tmpname).read_text(encoding='latin-1')
assert os.path.dirname(tmpname) not in payload
class Bz2StreamWriteTest(Bz2Test, StreamWriteTest):
decompressor = bz2.BZ2Decompressor if bz2 else None
class LzmaStreamWriteTest(LzmaTest, StreamWriteTest):
decompressor = lzma.LZMADecompressor if lzma else None
class GNUWriteTest(unittest.TestCase):
# This testcase checks for correct creation of GNU Longname
# and Longlink extended headers (cp. bug #812325).
def _length(self, s):
blocks = len(s) // 512 + 1
return blocks * 512
def _calc_size(self, name, link=None):
# Initial tar header
count = 512
if len(name) > tarfile.LENGTH_NAME:
# GNU longname extended header + longname
count += 512
count += self._length(name)
if link is not None and len(link) > tarfile.LENGTH_LINK:
# GNU longlink extended header + longlink
count += 512
count += self._length(link)
return count
def _test(self, name, link=None):
tarinfo = tarfile.TarInfo(name)
if link:
tarinfo.linkname = link
tarinfo.type = tarfile.LNKTYPE
tar = tarfile.open(tmpname, "w")
try:
tar.format = tarfile.GNU_FORMAT
tar.addfile(tarinfo)
v1 = self._calc_size(name, link)
v2 = tar.offset
self.assertEqual(v1, v2, "GNU longname/longlink creation failed")
finally:
tar.close()
tar = tarfile.open(tmpname)
try:
member = tar.next()
self.assertIsNotNone(member,
"unable to read longname member")
self.assertEqual(tarinfo.name, member.name,
"unable to read longname member")
self.assertEqual(tarinfo.linkname, member.linkname,
"unable to read longname member")
finally:
tar.close()
def test_longname_1023(self):
self._test(("longnam/" * 127) + "longnam")
def test_longname_1024(self):
self._test(("longnam/" * 127) + "longname")
def test_longname_1025(self):
self._test(("longnam/" * 127) + "longname_")
def test_longlink_1023(self):
self._test("name", ("longlnk/" * 127) + "longlnk")
def test_longlink_1024(self):
self._test("name", ("longlnk/" * 127) + "longlink")
def test_longlink_1025(self):
self._test("name", ("longlnk/" * 127) + "longlink_")
def test_longnamelink_1023(self):
self._test(("longnam/" * 127) + "longnam",
("longlnk/" * 127) + "longlnk")
def test_longnamelink_1024(self):
self._test(("longnam/" * 127) + "longname",
("longlnk/" * 127) + "longlink")
def test_longnamelink_1025(self):
self._test(("longnam/" * 127) + "longname_",
("longlnk/" * 127) + "longlink_")
class DeviceHeaderTest(WriteTestBase, unittest.TestCase):
prefix = "w:"
def test_headers_written_only_for_device_files(self):
# Regression test for bpo-18819.
tempdir = os.path.join(TEMPDIR, "device_header_test")
os.mkdir(tempdir)
try:
tar = tarfile.open(tmpname, self.mode)
try:
input_blk = tarfile.TarInfo(name="my_block_device")
input_reg = tarfile.TarInfo(name="my_regular_file")
input_blk.type = tarfile.BLKTYPE
input_reg.type = tarfile.REGTYPE
tar.addfile(input_blk)
tar.addfile(input_reg)
finally:
tar.close()
# devmajor and devminor should be *interpreted* as 0 in both...
tar = tarfile.open(tmpname, "r")
try:
output_blk = tar.getmember("my_block_device")
output_reg = tar.getmember("my_regular_file")
finally:
tar.close()
self.assertEqual(output_blk.devmajor, 0)
self.assertEqual(output_blk.devminor, 0)
self.assertEqual(output_reg.devmajor, 0)
self.assertEqual(output_reg.devminor, 0)
# ...but the fields should not actually be set on regular files:
with open(tmpname, "rb") as infile:
buf = infile.read()
buf_blk = buf[output_blk.offset:output_blk.offset_data]
buf_reg = buf[output_reg.offset:output_reg.offset_data]
# See `struct posixheader` in GNU docs for byte offsets:
# <https://www.gnu.org/software/tar/manual/html_node/Standard.html>
device_headers = slice(329, 329 + 16)
self.assertEqual(buf_blk[device_headers], b"0000000\0" * 2)
self.assertEqual(buf_reg[device_headers], b"\0" * 16)
finally:
os_helper.rmtree(tempdir)
class CreateTest(WriteTestBase, unittest.TestCase):
prefix = "x:"
file_path = os.path.join(TEMPDIR, "spameggs42")
def setUp(self):
os_helper.unlink(tmpname)
@classmethod
def setUpClass(cls):
with open(cls.file_path, "wb") as fobj:
fobj.write(b"aaa")
@classmethod
def tearDownClass(cls):
os_helper.unlink(cls.file_path)
def test_create(self):
with tarfile.open(tmpname, self.mode) as tobj:
tobj.add(self.file_path)
with self.taropen(tmpname) as tobj:
names = tobj.getnames()
self.assertEqual(len(names), 1)
self.assertIn('spameggs42', names[0])
def test_create_existing(self):
with tarfile.open(tmpname, self.mode) as tobj:
tobj.add(self.file_path)
with self.assertRaises(FileExistsError):
tobj = tarfile.open(tmpname, self.mode)
with self.taropen(tmpname) as tobj:
names = tobj.getnames()
self.assertEqual(len(names), 1)
self.assertIn('spameggs42', names[0])
def test_create_taropen(self):
with self.taropen(tmpname, "x") as tobj:
tobj.add(self.file_path)
with self.taropen(tmpname) as tobj:
names = tobj.getnames()
self.assertEqual(len(names), 1)
self.assertIn('spameggs42', names[0])
def test_create_existing_taropen(self):
with self.taropen(tmpname, "x") as tobj:
tobj.add(self.file_path)
with self.assertRaises(FileExistsError):
with self.taropen(tmpname, "x"):
pass
with self.taropen(tmpname) as tobj:
names = tobj.getnames()
self.assertEqual(len(names), 1)
self.assertIn("spameggs42", names[0])
def test_create_pathlike_name(self):
with tarfile.open(pathlib.Path(tmpname), self.mode) as tobj:
self.assertIsInstance(tobj.name, str)
self.assertEqual(tobj.name, os.path.abspath(tmpname))
tobj.add(pathlib.Path(self.file_path))
names = tobj.getnames()
self.assertEqual(len(names), 1)
self.assertIn('spameggs42', names[0])
with self.taropen(tmpname) as tobj:
names = tobj.getnames()
self.assertEqual(len(names), 1)
self.assertIn('spameggs42', names[0])
def test_create_taropen_pathlike_name(self):
with self.taropen(pathlib.Path(tmpname), "x") as tobj:
self.assertIsInstance(tobj.name, str)
self.assertEqual(tobj.name, os.path.abspath(tmpname))
tobj.add(pathlib.Path(self.file_path))
names = tobj.getnames()
self.assertEqual(len(names), 1)
self.assertIn('spameggs42', names[0])
with self.taropen(tmpname) as tobj:
names = tobj.getnames()
self.assertEqual(len(names), 1)
self.assertIn('spameggs42', names[0])
class GzipCreateTest(GzipTest, CreateTest):
def test_create_with_compresslevel(self):
with tarfile.open(tmpname, self.mode, compresslevel=1) as tobj:
tobj.add(self.file_path)
with tarfile.open(tmpname, 'r:gz', compresslevel=1) as tobj:
pass
class Bz2CreateTest(Bz2Test, CreateTest):
def test_create_with_compresslevel(self):
with tarfile.open(tmpname, self.mode, compresslevel=1) as tobj:
tobj.add(self.file_path)
with tarfile.open(tmpname, 'r:bz2', compresslevel=1) as tobj:
pass
class LzmaCreateTest(LzmaTest, CreateTest):
# Unlike gz and bz2, xz uses the preset keyword instead of compresslevel.
# It does not allow for preset to be specified when reading.
def test_create_with_preset(self):
with tarfile.open(tmpname, self.mode, preset=1) as tobj:
tobj.add(self.file_path)
class CreateWithXModeTest(CreateTest):
prefix = "x"
test_create_taropen = None
test_create_existing_taropen = None
@unittest.skipUnless(hasattr(os, "link"), "Missing hardlink implementation")
class HardlinkTest(unittest.TestCase):
# Test the creation of LNKTYPE (hardlink) members in an archive.
def setUp(self):
self.foo = os.path.join(TEMPDIR, "foo")
self.bar = os.path.join(TEMPDIR, "bar")
with open(self.foo, "wb") as fobj:
fobj.write(b"foo")
try:
os.link(self.foo, self.bar)
except PermissionError as e:
self.skipTest('os.link(): %s' % e)
self.tar = tarfile.open(tmpname, "w")
self.tar.add(self.foo)
def tearDown(self):
self.tar.close()
os_helper.unlink(self.foo)
os_helper.unlink(self.bar)
def test_add_twice(self):
# The same name will be added as a REGTYPE every
# time regardless of st_nlink.
tarinfo = self.tar.gettarinfo(self.foo)
self.assertEqual(tarinfo.type, tarfile.REGTYPE,
"add file as regular failed")
def test_add_hardlink(self):
tarinfo = self.tar.gettarinfo(self.bar)
self.assertEqual(tarinfo.type, tarfile.LNKTYPE,
"add file as hardlink failed")
def test_dereference_hardlink(self):
self.tar.dereference = True
tarinfo = self.tar.gettarinfo(self.bar)
self.assertEqual(tarinfo.type, tarfile.REGTYPE,
"dereferencing hardlink failed")
class PaxWriteTest(GNUWriteTest):
def _test(self, name, link=None):
# See GNUWriteTest.
tarinfo = tarfile.TarInfo(name)
if link:
tarinfo.linkname = link
tarinfo.type = tarfile.LNKTYPE
tar = tarfile.open(tmpname, "w", format=tarfile.PAX_FORMAT)
try:
tar.addfile(tarinfo)
finally:
tar.close()
tar = tarfile.open(tmpname)
try:
if link:
l = tar.getmembers()[0].linkname
self.assertEqual(link, l, "PAX longlink creation failed")
else:
n = tar.getmembers()[0].name
self.assertEqual(name, n, "PAX longname creation failed")
finally:
tar.close()
def test_pax_global_header(self):
pax_headers = {
"foo": "bar",
"uid": "0",
"mtime": "1.23",
"test": "\xe4\xf6\xfc",
"\xe4\xf6\xfc": "test"}
tar = tarfile.open(tmpname, "w", format=tarfile.PAX_FORMAT,
pax_headers=pax_headers)
try:
tar.addfile(tarfile.TarInfo("test"))
finally:
tar.close()
# Test if the global header was written correctly.
tar = tarfile.open(tmpname, encoding="iso8859-1")
try:
self.assertEqual(tar.pax_headers, pax_headers)
self.assertEqual(tar.getmembers()[0].pax_headers, pax_headers)
# Test if all the fields are strings.
for key, val in tar.pax_headers.items():
self.assertIsNot(type(key), bytes)
self.assertIsNot(type(val), bytes)
if key in tarfile.PAX_NUMBER_FIELDS:
try:
tarfile.PAX_NUMBER_FIELDS[key](val)
except (TypeError, ValueError):
self.fail("unable to convert pax header field")
finally:
tar.close()
def test_pax_extended_header(self):
# The fields from the pax header have priority over the
# TarInfo.
pax_headers = {"path": "foo", "uid": "123"}
tar = tarfile.open(tmpname, "w", format=tarfile.PAX_FORMAT,
encoding="iso8859-1")
try:
t = tarfile.TarInfo()
t.name = "\xe4\xf6\xfc" # non-ASCII
t.uid = 8**8 # too large
t.pax_headers = pax_headers
tar.addfile(t)
finally:
tar.close()
tar = tarfile.open(tmpname, encoding="iso8859-1")
try:
t = tar.getmembers()[0]
self.assertEqual(t.pax_headers, pax_headers)
self.assertEqual(t.name, "foo")
self.assertEqual(t.uid, 123)
finally:
tar.close()
def test_create_pax_header(self):
# The ustar header should contain values that can be
# represented reasonably, even if a better (e.g. higher
# precision) version is set in the pax header.
# Issue #45863
# values that should be kept
t = tarfile.TarInfo()
t.name = "foo"
t.mtime = 1000.1
t.size = 100
t.uid = 123
t.gid = 124
info = t.get_info()
header = t.create_pax_header(info, encoding="iso8859-1")
self.assertEqual(info['name'], "foo")
# mtime should be rounded to nearest second
self.assertIsInstance(info['mtime'], int)
self.assertEqual(info['mtime'], 1000)
self.assertEqual(info['size'], 100)
self.assertEqual(info['uid'], 123)
self.assertEqual(info['gid'], 124)
self.assertEqual(header,
b'././@PaxHeader' + bytes(86) \
+ b'0000000\x000000000\x000000000\x0000000000020\x0000000000000\x00010205\x00 x' \
+ bytes(100) + b'ustar\x0000'+ bytes(247) \
+ b'16 mtime=1000.1\n' + bytes(496) + b'foo' + bytes(97) \
+ b'0000644\x000000173\x000000174\x0000000000144\x0000000001750\x00006516\x00 0' \
+ bytes(100) + b'ustar\x0000' + bytes(247))
# values that should be changed
t = tarfile.TarInfo()
t.name = "foo\u3374" # can't be represented in ascii
t.mtime = 10**10 # too big
t.size = 10**10 # too big
t.uid = 8**8 # too big
t.gid = 8**8+1 # too big
info = t.get_info()
header = t.create_pax_header(info, encoding="iso8859-1")
# name is kept as-is in info but should be added to pax header
self.assertEqual(info['name'], "foo\u3374")
self.assertEqual(info['mtime'], 0)
self.assertEqual(info['size'], 0)
self.assertEqual(info['uid'], 0)
self.assertEqual(info['gid'], 0)
self.assertEqual(header,
b'././@PaxHeader' + bytes(86) \
+ b'0000000\x000000000\x000000000\x0000000000130\x0000000000000\x00010207\x00 x' \
+ bytes(100) + b'ustar\x0000' + bytes(247) \
+ b'15 path=foo\xe3\x8d\xb4\n16 uid=16777216\n' \
+ b'16 gid=16777217\n20 size=10000000000\n' \
+ b'21 mtime=10000000000\n'+ bytes(424) + b'foo?' + bytes(96) \
+ b'0000644\x000000000\x000000000\x0000000000000\x0000000000000\x00006540\x00 0' \
+ bytes(100) + b'ustar\x0000' + bytes(247))
class UnicodeTest:
def test_iso8859_1_filename(self):
self._test_unicode_filename("iso8859-1")
def test_utf7_filename(self):
self._test_unicode_filename("utf7")
def test_utf8_filename(self):
self._test_unicode_filename("utf-8")
def _test_unicode_filename(self, encoding):
tar = tarfile.open(tmpname, "w", format=self.format,
encoding=encoding, errors="strict")
try:
name = "\xe4\xf6\xfc"
tar.addfile(tarfile.TarInfo(name))
finally:
tar.close()
tar = tarfile.open(tmpname, encoding=encoding)
try:
self.assertEqual(tar.getmembers()[0].name, name)
finally:
tar.close()
def test_unicode_filename_error(self):
tar = tarfile.open(tmpname, "w", format=self.format,
encoding="ascii", errors="strict")
try:
tarinfo = tarfile.TarInfo()
tarinfo.name = "\xe4\xf6\xfc"
self.assertRaises(UnicodeError, tar.addfile, tarinfo)
tarinfo.name = "foo"
tarinfo.uname = "\xe4\xf6\xfc"
self.assertRaises(UnicodeError, tar.addfile, tarinfo)
finally:
tar.close()
def test_unicode_argument(self):
tar = tarfile.open(tarname, "r",
encoding="iso8859-1", errors="strict")
try:
for t in tar:
self.assertIs(type(t.name), str)
self.assertIs(type(t.linkname), str)
self.assertIs(type(t.uname), str)
self.assertIs(type(t.gname), str)
finally:
tar.close()
def test_uname_unicode(self):
t = tarfile.TarInfo("foo")
t.uname = "\xe4\xf6\xfc"
t.gname = "\xe4\xf6\xfc"
tar = tarfile.open(tmpname, mode="w", format=self.format,
encoding="iso8859-1")
try:
tar.addfile(t)
finally:
tar.close()
tar = tarfile.open(tmpname, encoding="iso8859-1")
try:
t = tar.getmember("foo")
self.assertEqual(t.uname, "\xe4\xf6\xfc")
self.assertEqual(t.gname, "\xe4\xf6\xfc")
if self.format != tarfile.PAX_FORMAT:
tar.close()
tar = tarfile.open(tmpname, encoding="ascii")
t = tar.getmember("foo")
self.assertEqual(t.uname, "\udce4\udcf6\udcfc")
self.assertEqual(t.gname, "\udce4\udcf6\udcfc")
finally:
tar.close()
class UstarUnicodeTest(UnicodeTest, unittest.TestCase):
format = tarfile.USTAR_FORMAT
# Test whether the utf-8 encoded version of a filename exceeds the 100
# bytes name field limit (every occurrence of '\xff' will be expanded to 2
# bytes).
def test_unicode_name1(self):
self._test_ustar_name("0123456789" * 10)
self._test_ustar_name("0123456789" * 10 + "0", ValueError)
self._test_ustar_name("0123456789" * 9 + "01234567\xff")
self._test_ustar_name("0123456789" * 9 + "012345678\xff", ValueError)
def test_unicode_name2(self):
self._test_ustar_name("0123456789" * 9 + "012345\xff\xff")
self._test_ustar_name("0123456789" * 9 + "0123456\xff\xff", ValueError)
# Test whether the utf-8 encoded version of a filename exceeds the 155
# bytes prefix + '/' + 100 bytes name limit.
def test_unicode_longname1(self):
self._test_ustar_name("0123456789" * 15 + "01234/" + "0123456789" * 10)
self._test_ustar_name("0123456789" * 15 + "0123/4" + "0123456789" * 10, ValueError)
self._test_ustar_name("0123456789" * 15 + "012\xff/" + "0123456789" * 10)
self._test_ustar_name("0123456789" * 15 + "0123\xff/" + "0123456789" * 10, ValueError)
def test_unicode_longname2(self):
self._test_ustar_name("0123456789" * 15 + "01\xff/2" + "0123456789" * 10, ValueError)
self._test_ustar_name("0123456789" * 15 + "01\xff\xff/" + "0123456789" * 10, ValueError)
def test_unicode_longname3(self):
self._test_ustar_name("0123456789" * 15 + "01\xff\xff/2" + "0123456789" * 10, ValueError)
self._test_ustar_name("0123456789" * 15 + "01234/" + "0123456789" * 9 + "01234567\xff")
self._test_ustar_name("0123456789" * 15 + "01234/" + "0123456789" * 9 + "012345678\xff", ValueError)
def test_unicode_longname4(self):
self._test_ustar_name("0123456789" * 15 + "01234/" + "0123456789" * 9 + "012345\xff\xff")
self._test_ustar_name("0123456789" * 15 + "01234/" + "0123456789" * 9 + "0123456\xff\xff", ValueError)
def _test_ustar_name(self, name, exc=None):
with tarfile.open(tmpname, "w", format=self.format, encoding="utf-8") as tar:
t = tarfile.TarInfo(name)
if exc is None:
tar.addfile(t)
else:
self.assertRaises(exc, tar.addfile, t)
if exc is None:
with tarfile.open(tmpname, "r", encoding="utf-8") as tar:
for t in tar:
self.assertEqual(name, t.name)
break
# Test the same as above for the 100 bytes link field.
def test_unicode_link1(self):
self._test_ustar_link("0123456789" * 10)
self._test_ustar_link("0123456789" * 10 + "0", ValueError)
self._test_ustar_link("0123456789" * 9 + "01234567\xff")
self._test_ustar_link("0123456789" * 9 + "012345678\xff", ValueError)
def test_unicode_link2(self):
self._test_ustar_link("0123456789" * 9 + "012345\xff\xff")
self._test_ustar_link("0123456789" * 9 + "0123456\xff\xff", ValueError)
def _test_ustar_link(self, name, exc=None):
with tarfile.open(tmpname, "w", format=self.format, encoding="utf-8") as tar:
t = tarfile.TarInfo("foo")
t.linkname = name
if exc is None:
tar.addfile(t)
else:
self.assertRaises(exc, tar.addfile, t)
if exc is None:
with tarfile.open(tmpname, "r", encoding="utf-8") as tar:
for t in tar:
self.assertEqual(name, t.linkname)
break
class GNUUnicodeTest(UnicodeTest, unittest.TestCase):
format = tarfile.GNU_FORMAT
def test_bad_pax_header(self):
# Test for issue #8633. GNU tar <= 1.23 creates raw binary fields
# without a hdrcharset=BINARY header.
for encoding, name in (
("utf-8", "pax/bad-pax-\udce4\udcf6\udcfc"),
("iso8859-1", "pax/bad-pax-\xe4\xf6\xfc"),):
with tarfile.open(tarname, encoding=encoding,
errors="surrogateescape") as tar:
try:
t = tar.getmember(name)
except KeyError:
self.fail("unable to read bad GNU tar pax header")
class PAXUnicodeTest(UnicodeTest, unittest.TestCase):
format = tarfile.PAX_FORMAT
# PAX_FORMAT ignores encoding in write mode.
test_unicode_filename_error = None
def test_binary_header(self):
# Test a POSIX.1-2008 compatible header with a hdrcharset=BINARY field.
for encoding, name in (
("utf-8", "pax/hdrcharset-\udce4\udcf6\udcfc"),
("iso8859-1", "pax/hdrcharset-\xe4\xf6\xfc"),):
with tarfile.open(tarname, encoding=encoding,
errors="surrogateescape") as tar:
try:
t = tar.getmember(name)
except KeyError:
self.fail("unable to read POSIX.1-2008 binary header")
class AppendTestBase:
# Test append mode (cp. patch #1652681).
def setUp(self):
self.tarname = tmpname
if os.path.exists(self.tarname):
os_helper.unlink(self.tarname)
def _create_testtar(self, mode="w:"):
with tarfile.open(tarname, encoding="iso8859-1") as src:
t = src.getmember("ustar/regtype")
t.name = "foo"
with src.extractfile(t) as f:
with tarfile.open(self.tarname, mode) as tar:
tar.addfile(t, f)
def test_append_compressed(self):
self._create_testtar("w:" + self.suffix)
self.assertRaises(tarfile.ReadError, tarfile.open, tmpname, "a")
class AppendTest(AppendTestBase, unittest.TestCase):
test_append_compressed = None
def _add_testfile(self, fileobj=None):
with tarfile.open(self.tarname, "a", fileobj=fileobj) as tar:
tar.addfile(tarfile.TarInfo("bar"))
def _test(self, names=["bar"], fileobj=None):
with tarfile.open(self.tarname, fileobj=fileobj) as tar:
self.assertEqual(tar.getnames(), names)
def test_non_existing(self):
self._add_testfile()
self._test()
def test_empty(self):
tarfile.open(self.tarname, "w:").close()
self._add_testfile()
self._test()
def test_empty_fileobj(self):
fobj = io.BytesIO(b"\0" * 1024)
self._add_testfile(fobj)
fobj.seek(0)
self._test(fileobj=fobj)
def test_fileobj(self):
self._create_testtar()
with open(self.tarname, "rb") as fobj:
data = fobj.read()
fobj = io.BytesIO(data)
self._add_testfile(fobj)
fobj.seek(0)
self._test(names=["foo", "bar"], fileobj=fobj)
def test_existing(self):
self._create_testtar()
self._add_testfile()
self._test(names=["foo", "bar"])
# Append mode is supposed to fail if the tarfile to append to
# does not end with a zero block.
def _test_error(self, data):
with open(self.tarname, "wb") as fobj:
fobj.write(data)
self.assertRaises(tarfile.ReadError, self._add_testfile)
def test_null(self):
self._test_error(b"")
def test_incomplete(self):
self._test_error(b"\0" * 13)
def test_premature_eof(self):
data = tarfile.TarInfo("foo").tobuf()
self._test_error(data)
def test_trailing_garbage(self):
data = tarfile.TarInfo("foo").tobuf()
self._test_error(data + b"\0" * 13)
def test_invalid(self):
self._test_error(b"a" * 512)
class GzipAppendTest(GzipTest, AppendTestBase, unittest.TestCase):
pass
class Bz2AppendTest(Bz2Test, AppendTestBase, unittest.TestCase):
pass
class LzmaAppendTest(LzmaTest, AppendTestBase, unittest.TestCase):
pass
class LimitsTest(unittest.TestCase):
def test_ustar_limits(self):
# 100 char name
tarinfo = tarfile.TarInfo("0123456789" * 10)
tarinfo.tobuf(tarfile.USTAR_FORMAT)
# 101 char name that cannot be stored
tarinfo = tarfile.TarInfo("0123456789" * 10 + "0")
self.assertRaises(ValueError, tarinfo.tobuf, tarfile.USTAR_FORMAT)
# 256 char name with a slash at pos 156
tarinfo = tarfile.TarInfo("123/" * 62 + "longname")
tarinfo.tobuf(tarfile.USTAR_FORMAT)
# 256 char name that cannot be stored
tarinfo = tarfile.TarInfo("1234567/" * 31 + "longname")
self.assertRaises(ValueError, tarinfo.tobuf, tarfile.USTAR_FORMAT)
# 512 char name
tarinfo = tarfile.TarInfo("123/" * 126 + "longname")
self.assertRaises(ValueError, tarinfo.tobuf, tarfile.USTAR_FORMAT)
# 512 char linkname
tarinfo = tarfile.TarInfo("longlink")
tarinfo.linkname = "123/" * 126 + "longname"
self.assertRaises(ValueError, tarinfo.tobuf, tarfile.USTAR_FORMAT)
# uid > 8 digits
tarinfo = tarfile.TarInfo("name")
tarinfo.uid = 0o10000000
self.assertRaises(ValueError, tarinfo.tobuf, tarfile.USTAR_FORMAT)
def test_gnu_limits(self):
tarinfo = tarfile.TarInfo("123/" * 126 + "longname")
tarinfo.tobuf(tarfile.GNU_FORMAT)
tarinfo = tarfile.TarInfo("longlink")
tarinfo.linkname = "123/" * 126 + "longname"
tarinfo.tobuf(tarfile.GNU_FORMAT)
# uid >= 256 ** 7
tarinfo = tarfile.TarInfo("name")
tarinfo.uid = 0o4000000000000000000
self.assertRaises(ValueError, tarinfo.tobuf, tarfile.GNU_FORMAT)
def test_pax_limits(self):
tarinfo = tarfile.TarInfo("123/" * 126 + "longname")
tarinfo.tobuf(tarfile.PAX_FORMAT)
tarinfo = tarfile.TarInfo("longlink")
tarinfo.linkname = "123/" * 126 + "longname"
tarinfo.tobuf(tarfile.PAX_FORMAT)
tarinfo = tarfile.TarInfo("name")
tarinfo.uid = 0o4000000000000000000
tarinfo.tobuf(tarfile.PAX_FORMAT)
class MiscTest(unittest.TestCase):
def test_char_fields(self):
self.assertEqual(tarfile.stn("foo", 8, "ascii", "strict"),
b"foo\0\0\0\0\0")
self.assertEqual(tarfile.stn("foobar", 3, "ascii", "strict"),
b"foo")
self.assertEqual(tarfile.nts(b"foo\0\0\0\0\0", "ascii", "strict"),
"foo")
self.assertEqual(tarfile.nts(b"foo\0bar\0", "ascii", "strict"),
"foo")
def test_read_number_fields(self):
# Issue 13158: Test if GNU tar specific base-256 number fields
# are decoded correctly.
self.assertEqual(tarfile.nti(b"0000001\x00"), 1)
self.assertEqual(tarfile.nti(b"7777777\x00"), 0o7777777)
self.assertEqual(tarfile.nti(b"\x80\x00\x00\x00\x00\x20\x00\x00"),
0o10000000)
self.assertEqual(tarfile.nti(b"\x80\x00\x00\x00\xff\xff\xff\xff"),
0xffffffff)
self.assertEqual(tarfile.nti(b"\xff\xff\xff\xff\xff\xff\xff\xff"),
-1)
self.assertEqual(tarfile.nti(b"\xff\xff\xff\xff\xff\xff\xff\x9c"),
-100)
self.assertEqual(tarfile.nti(b"\xff\x00\x00\x00\x00\x00\x00\x00"),
-0x100000000000000)
# Issue 24514: Test if empty number fields are converted to zero.
self.assertEqual(tarfile.nti(b"\0"), 0)
self.assertEqual(tarfile.nti(b" \0"), 0)
def test_write_number_fields(self):
self.assertEqual(tarfile.itn(1), b"0000001\x00")
self.assertEqual(tarfile.itn(0o7777777), b"7777777\x00")
self.assertEqual(tarfile.itn(0o10000000, format=tarfile.GNU_FORMAT),
b"\x80\x00\x00\x00\x00\x20\x00\x00")
self.assertEqual(tarfile.itn(0xffffffff, format=tarfile.GNU_FORMAT),
b"\x80\x00\x00\x00\xff\xff\xff\xff")
self.assertEqual(tarfile.itn(-1, format=tarfile.GNU_FORMAT),
b"\xff\xff\xff\xff\xff\xff\xff\xff")
self.assertEqual(tarfile.itn(-100, format=tarfile.GNU_FORMAT),
b"\xff\xff\xff\xff\xff\xff\xff\x9c")
self.assertEqual(tarfile.itn(-0x100000000000000,
format=tarfile.GNU_FORMAT),
b"\xff\x00\x00\x00\x00\x00\x00\x00")
# Issue 32713: Test if itn() supports float values outside the
# non-GNU format range
self.assertEqual(tarfile.itn(-100.0, format=tarfile.GNU_FORMAT),
b"\xff\xff\xff\xff\xff\xff\xff\x9c")
self.assertEqual(tarfile.itn(8 ** 12 + 0.0, format=tarfile.GNU_FORMAT),
b"\x80\x00\x00\x10\x00\x00\x00\x00")
self.assertEqual(tarfile.nti(tarfile.itn(-0.1, format=tarfile.GNU_FORMAT)), 0)
def test_number_field_limits(self):
with self.assertRaises(ValueError):
tarfile.itn(-1, 8, tarfile.USTAR_FORMAT)
with self.assertRaises(ValueError):
tarfile.itn(0o10000000, 8, tarfile.USTAR_FORMAT)
with self.assertRaises(ValueError):
tarfile.itn(-0x10000000001, 6, tarfile.GNU_FORMAT)
with self.assertRaises(ValueError):
tarfile.itn(0x10000000000, 6, tarfile.GNU_FORMAT)
def test__all__(self):
not_exported = {
'version', 'grp', 'pwd', 'symlink_exception', 'NUL', 'BLOCKSIZE',
'RECORDSIZE', 'GNU_MAGIC', 'POSIX_MAGIC', 'LENGTH_NAME',
'LENGTH_LINK', 'LENGTH_PREFIX', 'REGTYPE', 'AREGTYPE', 'LNKTYPE',
'SYMTYPE', 'CHRTYPE', 'BLKTYPE', 'DIRTYPE', 'FIFOTYPE', 'CONTTYPE',
'GNUTYPE_LONGNAME', 'GNUTYPE_LONGLINK', 'GNUTYPE_SPARSE',
'XHDTYPE', 'XGLTYPE', 'SOLARIS_XHDTYPE', 'SUPPORTED_TYPES',
'REGULAR_TYPES', 'GNU_TYPES', 'PAX_FIELDS', 'PAX_NAME_FIELDS',
'PAX_NUMBER_FIELDS', 'stn', 'nts', 'nti', 'itn', 'calc_chksums',
'copyfileobj', 'filemode', 'EmptyHeaderError',
'TruncatedHeaderError', 'EOFHeaderError', 'InvalidHeaderError',
'SubsequentHeaderError', 'ExFileObject', 'main'}
support.check__all__(self, tarfile, not_exported=not_exported)
def test_useful_error_message_when_modules_missing(self):
fname = os.path.join(os.path.dirname(__file__), 'testtar.tar.xz')
with self.assertRaises(tarfile.ReadError) as excinfo:
error = tarfile.CompressionError('lzma module is not available'),
with unittest.mock.patch.object(tarfile.TarFile, 'xzopen', side_effect=error):
tarfile.open(fname)
self.assertIn(
"\n- method xz: CompressionError('lzma module is not available')\n",
str(excinfo.exception),
)
class CommandLineTest(unittest.TestCase):
def tarfilecmd(self, *args, **kwargs):
rc, out, err = script_helper.assert_python_ok('-m', 'tarfile', *args,
**kwargs)
return out.replace(os.linesep.encode(), b'\n')
def tarfilecmd_failure(self, *args):
return script_helper.assert_python_failure('-m', 'tarfile', *args)
def make_simple_tarfile(self, tar_name):
files = [support.findfile('tokenize_tests.txt'),
support.findfile('tokenize_tests-no-coding-cookie-'
'and-utf8-bom-sig-only.txt')]
self.addCleanup(os_helper.unlink, tar_name)
with tarfile.open(tar_name, 'w') as tf:
for tardata in files:
tf.add(tardata, arcname=os.path.basename(tardata))
def test_bad_use(self):
rc, out, err = self.tarfilecmd_failure()
self.assertEqual(out, b'')
self.assertIn(b'usage', err.lower())
self.assertIn(b'error', err.lower())
self.assertIn(b'required', err.lower())
rc, out, err = self.tarfilecmd_failure('-l', '')
self.assertEqual(out, b'')
self.assertNotEqual(err.strip(), b'')
def test_test_command(self):
for tar_name in testtarnames:
for opt in '-t', '--test':
out = self.tarfilecmd(opt, tar_name)
self.assertEqual(out, b'')
def test_test_command_verbose(self):
for tar_name in testtarnames:
for opt in '-v', '--verbose':
out = self.tarfilecmd(opt, '-t', tar_name,
PYTHONIOENCODING='utf-8')
self.assertIn(b'is a tar archive.\n', out)
def test_test_command_invalid_file(self):
zipname = support.findfile('zipdir.zip')
rc, out, err = self.tarfilecmd_failure('-t', zipname)
self.assertIn(b' is not a tar archive.', err)
self.assertEqual(out, b'')
self.assertEqual(rc, 1)
for tar_name in testtarnames:
with self.subTest(tar_name=tar_name):
with open(tar_name, 'rb') as f:
data = f.read()
try:
with open(tmpname, 'wb') as f:
f.write(data[:511])
rc, out, err = self.tarfilecmd_failure('-t', tmpname)
self.assertEqual(out, b'')
self.assertEqual(rc, 1)
finally:
os_helper.unlink(tmpname)
def test_list_command(self):
for tar_name in testtarnames:
with support.captured_stdout() as t:
with tarfile.open(tar_name, 'r') as tf:
tf.list(verbose=False)
expected = t.getvalue().encode('ascii', 'backslashreplace')
for opt in '-l', '--list':
out = self.tarfilecmd(opt, tar_name,
PYTHONIOENCODING='ascii')
self.assertEqual(out, expected)
def test_list_command_verbose(self):
for tar_name in testtarnames:
with support.captured_stdout() as t:
with tarfile.open(tar_name, 'r') as tf:
tf.list(verbose=True)
expected = t.getvalue().encode('ascii', 'backslashreplace')
for opt in '-v', '--verbose':
out = self.tarfilecmd(opt, '-l', tar_name,
PYTHONIOENCODING='ascii')
self.assertEqual(out, expected)
def test_list_command_invalid_file(self):
zipname = support.findfile('zipdir.zip')
rc, out, err = self.tarfilecmd_failure('-l', zipname)
self.assertIn(b' is not a tar archive.', err)
self.assertEqual(out, b'')
self.assertEqual(rc, 1)
def test_create_command(self):
files = [support.findfile('tokenize_tests.txt'),
support.findfile('tokenize_tests-no-coding-cookie-'
'and-utf8-bom-sig-only.txt')]
for opt in '-c', '--create':
try:
out = self.tarfilecmd(opt, tmpname, *files)
self.assertEqual(out, b'')
with tarfile.open(tmpname) as tar:
tar.getmembers()
finally:
os_helper.unlink(tmpname)
def test_create_command_verbose(self):
files = [support.findfile('tokenize_tests.txt'),
support.findfile('tokenize_tests-no-coding-cookie-'
'and-utf8-bom-sig-only.txt')]
for opt in '-v', '--verbose':
try:
out = self.tarfilecmd(opt, '-c', tmpname, *files,
PYTHONIOENCODING='utf-8')
self.assertIn(b' file created.', out)
with tarfile.open(tmpname) as tar:
tar.getmembers()
finally:
os_helper.unlink(tmpname)
def test_create_command_dotless_filename(self):
files = [support.findfile('tokenize_tests.txt')]
try:
out = self.tarfilecmd('-c', dotlessname, *files)
self.assertEqual(out, b'')
with tarfile.open(dotlessname) as tar:
tar.getmembers()
finally:
os_helper.unlink(dotlessname)
def test_create_command_dot_started_filename(self):
tar_name = os.path.join(TEMPDIR, ".testtar")
files = [support.findfile('tokenize_tests.txt')]
try:
out = self.tarfilecmd('-c', tar_name, *files)
self.assertEqual(out, b'')
with tarfile.open(tar_name) as tar:
tar.getmembers()
finally:
os_helper.unlink(tar_name)
def test_create_command_compressed(self):
files = [support.findfile('tokenize_tests.txt'),
support.findfile('tokenize_tests-no-coding-cookie-'
'and-utf8-bom-sig-only.txt')]
for filetype in (GzipTest, Bz2Test, LzmaTest):
if not filetype.open:
continue
try:
tar_name = tmpname + '.' + filetype.suffix
out = self.tarfilecmd('-c', tar_name, *files)
with filetype.taropen(tar_name) as tar:
tar.getmembers()
finally:
os_helper.unlink(tar_name)
def test_extract_command(self):
self.make_simple_tarfile(tmpname)
for opt in '-e', '--extract':
try:
with os_helper.temp_cwd(tarextdir):
out = self.tarfilecmd(opt, tmpname)
self.assertEqual(out, b'')
finally:
os_helper.rmtree(tarextdir)
def test_extract_command_verbose(self):
self.make_simple_tarfile(tmpname)
for opt in '-v', '--verbose':
try:
with os_helper.temp_cwd(tarextdir):
out = self.tarfilecmd(opt, '-e', tmpname,
PYTHONIOENCODING='utf-8')
self.assertIn(b' file is extracted.', out)
finally:
os_helper.rmtree(tarextdir)
def test_extract_command_different_directory(self):
self.make_simple_tarfile(tmpname)
try:
with os_helper.temp_cwd(tarextdir):
out = self.tarfilecmd('-e', tmpname, 'spamdir')
self.assertEqual(out, b'')
finally:
os_helper.rmtree(tarextdir)
def test_extract_command_invalid_file(self):
zipname = support.findfile('zipdir.zip')
with os_helper.temp_cwd(tarextdir):
rc, out, err = self.tarfilecmd_failure('-e', zipname)
self.assertIn(b' is not a tar archive.', err)
self.assertEqual(out, b'')
self.assertEqual(rc, 1)
class ContextManagerTest(unittest.TestCase):
def test_basic(self):
with tarfile.open(tarname) as tar:
self.assertFalse(tar.closed, "closed inside runtime context")
self.assertTrue(tar.closed, "context manager failed")
def test_closed(self):
# The __enter__() method is supposed to raise OSError
# if the TarFile object is already closed.
tar = tarfile.open(tarname)
tar.close()
with self.assertRaises(OSError):
with tar:
pass
def test_exception(self):
# Test if the OSError exception is passed through properly.
with self.assertRaises(Exception) as exc:
with tarfile.open(tarname) as tar:
raise OSError
self.assertIsInstance(exc.exception, OSError,
"wrong exception raised in context manager")
self.assertTrue(tar.closed, "context manager failed")
def test_no_eof(self):
# __exit__() must not write end-of-archive blocks if an
# exception was raised.
try:
with tarfile.open(tmpname, "w") as tar:
raise Exception
except:
pass
self.assertEqual(os.path.getsize(tmpname), 0,
"context manager wrote an end-of-archive block")
self.assertTrue(tar.closed, "context manager failed")
def test_eof(self):
# __exit__() must write end-of-archive blocks, i.e. call
# TarFile.close() if there was no error.
with tarfile.open(tmpname, "w"):
pass
self.assertNotEqual(os.path.getsize(tmpname), 0,
"context manager wrote no end-of-archive block")
def test_fileobj(self):
# Test that __exit__() did not close the external file
# object.
with open(tmpname, "wb") as fobj:
try:
with tarfile.open(fileobj=fobj, mode="w") as tar:
raise Exception
except:
pass
self.assertFalse(fobj.closed, "external file object was closed")
self.assertTrue(tar.closed, "context manager failed")
@unittest.skipIf(hasattr(os, "link"), "requires os.link to be missing")
class LinkEmulationTest(ReadTest, unittest.TestCase):
# Test for issue #8741 regression. On platforms that do not support
# symbolic or hard links tarfile tries to extract these types of members
# as the regular files they point to.
def _test_link_extraction(self, name):
self.tar.extract(name, TEMPDIR)
with open(os.path.join(TEMPDIR, name), "rb") as f:
data = f.read()
self.assertEqual(sha256sum(data), sha256_regtype)
# See issues #1578269, #8879, and #17689 for some history on these skips
@unittest.skipIf(hasattr(os.path, "islink"),
"Skip emulation - has os.path.islink but not os.link")
def test_hardlink_extraction1(self):
self._test_link_extraction("ustar/lnktype")
@unittest.skipIf(hasattr(os.path, "islink"),
"Skip emulation - has os.path.islink but not os.link")
def test_hardlink_extraction2(self):
self._test_link_extraction("./ustar/linktest2/lnktype")
@unittest.skipIf(hasattr(os, "symlink"),
"Skip emulation if symlink exists")
def test_symlink_extraction1(self):
self._test_link_extraction("ustar/symtype")
@unittest.skipIf(hasattr(os, "symlink"),
"Skip emulation if symlink exists")
def test_symlink_extraction2(self):
self._test_link_extraction("./ustar/linktest2/symtype")
class Bz2PartialReadTest(Bz2Test, unittest.TestCase):
# Issue5068: The _BZ2Proxy.read() method loops forever
# on an empty or partial bzipped file.
def _test_partial_input(self, mode):
class MyBytesIO(io.BytesIO):
hit_eof = False
def read(self, n):
if self.hit_eof:
raise AssertionError("infinite loop detected in "
"tarfile.open()")
self.hit_eof = self.tell() == len(self.getvalue())
return super(MyBytesIO, self).read(n)
def seek(self, *args):
self.hit_eof = False
return super(MyBytesIO, self).seek(*args)
data = bz2.compress(tarfile.TarInfo("foo").tobuf())
for x in range(len(data) + 1):
try:
tarfile.open(fileobj=MyBytesIO(data[:x]), mode=mode)
except tarfile.ReadError:
pass # we have no interest in ReadErrors
def test_partial_input(self):
self._test_partial_input("r")
def test_partial_input_bz2(self):
self._test_partial_input("r:bz2")
def root_is_uid_gid_0():
try:
import pwd, grp
except ImportError:
return False
if pwd.getpwuid(0)[0] != 'root':
return False
if grp.getgrgid(0)[0] != 'root':
return False
return True
@unittest.skipUnless(hasattr(os, 'chown'), "missing os.chown")
@unittest.skipUnless(hasattr(os, 'geteuid'), "missing os.geteuid")
class NumericOwnerTest(unittest.TestCase):
# mock the following:
# os.chown: so we can test what's being called
# os.chmod: so the modes are not actually changed. if they are, we can't
# delete the files/directories
# os.geteuid: so we can lie and say we're root (uid = 0)
@staticmethod
def _make_test_archive(filename_1, dirname_1, filename_2):
# the file contents to write
fobj = io.BytesIO(b"content")
# create a tar file with a file, a directory, and a file within that
# directory. Assign various .uid/.gid values to them
items = [(filename_1, 99, 98, tarfile.REGTYPE, fobj),
(dirname_1, 77, 76, tarfile.DIRTYPE, None),
(filename_2, 88, 87, tarfile.REGTYPE, fobj),
]
with tarfile.open(tmpname, 'w') as tarfl:
for name, uid, gid, typ, contents in items:
t = tarfile.TarInfo(name)
t.uid = uid
t.gid = gid
t.uname = 'root'
t.gname = 'root'
t.type = typ
tarfl.addfile(t, contents)
# return the full pathname to the tar file
return tmpname
@staticmethod
@contextmanager
def _setup_test(mock_geteuid):
mock_geteuid.return_value = 0 # lie and say we're root
fname = 'numeric-owner-testfile'
dirname = 'dir'
# the names we want stored in the tarfile
filename_1 = fname
dirname_1 = dirname
filename_2 = os.path.join(dirname, fname)
# create the tarfile with the contents we're after
tar_filename = NumericOwnerTest._make_test_archive(filename_1,
dirname_1,
filename_2)
# open the tarfile for reading. yield it and the names of the items
# we stored into the file
with tarfile.open(tar_filename) as tarfl:
yield tarfl, filename_1, dirname_1, filename_2
@unittest.mock.patch('os.chown')
@unittest.mock.patch('os.chmod')
@unittest.mock.patch('os.geteuid')
def test_extract_with_numeric_owner(self, mock_geteuid, mock_chmod,
mock_chown):
with self._setup_test(mock_geteuid) as (tarfl, filename_1, _,
filename_2):
tarfl.extract(filename_1, TEMPDIR, numeric_owner=True)
tarfl.extract(filename_2 , TEMPDIR, numeric_owner=True)
# convert to filesystem paths
f_filename_1 = os.path.join(TEMPDIR, filename_1)
f_filename_2 = os.path.join(TEMPDIR, filename_2)
mock_chown.assert_has_calls([unittest.mock.call(f_filename_1, 99, 98),
unittest.mock.call(f_filename_2, 88, 87),
],
any_order=True)
@unittest.mock.patch('os.chown')
@unittest.mock.patch('os.chmod')
@unittest.mock.patch('os.geteuid')
def test_extractall_with_numeric_owner(self, mock_geteuid, mock_chmod,
mock_chown):
with self._setup_test(mock_geteuid) as (tarfl, filename_1, dirname_1,
filename_2):
tarfl.extractall(TEMPDIR, numeric_owner=True)
# convert to filesystem paths
f_filename_1 = os.path.join(TEMPDIR, filename_1)
f_dirname_1 = os.path.join(TEMPDIR, dirname_1)
f_filename_2 = os.path.join(TEMPDIR, filename_2)
mock_chown.assert_has_calls([unittest.mock.call(f_filename_1, 99, 98),
unittest.mock.call(f_dirname_1, 77, 76),
unittest.mock.call(f_filename_2, 88, 87),
],
any_order=True)
# this test requires that uid=0 and gid=0 really be named 'root'. that's
# because the uname and gname in the test file are 'root', and extract()
# will look them up using pwd and grp to find their uid and gid, which we
# test here to be 0.
@unittest.skipUnless(root_is_uid_gid_0(),
'uid=0,gid=0 must be named "root"')
@unittest.mock.patch('os.chown')
@unittest.mock.patch('os.chmod')
@unittest.mock.patch('os.geteuid')
def test_extract_without_numeric_owner(self, mock_geteuid, mock_chmod,
mock_chown):
with self._setup_test(mock_geteuid) as (tarfl, filename_1, _, _):
tarfl.extract(filename_1, TEMPDIR, numeric_owner=False)
# convert to filesystem paths
f_filename_1 = os.path.join(TEMPDIR, filename_1)
mock_chown.assert_called_with(f_filename_1, 0, 0)
@unittest.mock.patch('os.geteuid')
def test_keyword_only(self, mock_geteuid):
with self._setup_test(mock_geteuid) as (tarfl, filename_1, _, _):
self.assertRaises(TypeError,
tarfl.extract, filename_1, TEMPDIR, False, True)
def setUpModule():
os_helper.unlink(TEMPDIR)
os.makedirs(TEMPDIR)
global testtarnames
testtarnames = [tarname]
with open(tarname, "rb") as fobj:
data = fobj.read()
# Create compressed tarfiles.
for c in GzipTest, Bz2Test, LzmaTest:
if c.open:
os_helper.unlink(c.tarname)
testtarnames.append(c.tarname)
with c.open(c.tarname, "wb") as tar:
tar.write(data)
def tearDownModule():
if os.path.exists(TEMPDIR):
os_helper.rmtree(TEMPDIR)
if __name__ == "__main__":
unittest.main()
| 37.858775
| 110
| 0.584078
|
0a5ba84d37557d8a0bcbfba2b522999a849fb54a
| 1,583
|
py
|
Python
|
backend/apps/workorder/urls.py
|
bopopescu/Journey
|
654eb66e0e2df59e916eff4c75b68b183f9b58b5
|
[
"MIT"
] | 41
|
2019-01-02T09:36:54.000Z
|
2022-02-20T13:13:05.000Z
|
backend/apps/workorder/urls.py
|
bopopescu/Journey
|
654eb66e0e2df59e916eff4c75b68b183f9b58b5
|
[
"MIT"
] | 15
|
2019-09-30T05:40:20.000Z
|
2022-02-17T19:28:41.000Z
|
backend/apps/workorder/urls.py
|
bopopescu/Journey
|
654eb66e0e2df59e916eff4c75b68b183f9b58b5
|
[
"MIT"
] | 23
|
2019-02-18T10:50:10.000Z
|
2022-01-06T07:53:18.000Z
|
# -*- coding:utf-8 -*-
from django.conf.urls import url, include
from rest_framework import routers
from django.urls import path
from Journey.settings import MEDIA_ROOT
from django.conf import settings
from django.views.static import serve
from apps.workorder.views.sqlorder import *
from apps.workorder.views.autoorder import *
from apps.workorder.views.workorder import *
from apps.workorder.views.ag_views import *
router = routers.DefaultRouter()
# workorder-sqlorder
router.register(r'sqlordertype', SqlOrderTypeViewSet, basename="sqlordertype")
router.register(r'sqlorder', SqlOrderViewSet, basename="sqlorder")
router.register(r'sqltext', SqlTextViewSet, basename="sqltext")
router.register(r'sqlfile', SqlFileViewSet, basename="sqlfile")
# workorder-approvalgroup
router.register(r'approvalgroup', ApprovalGroupViewSet, basename="approvalgroup")
# workorder-autoorder
router.register(r'autoordertype', AutoOrderTypeViewSet, basename="autoordertype")
router.register(r'autoorderstep', AutoOrderStepViewSet, basename="autoorderstep")
router.register(r'autoorder', AutoOrderViewSet, basename="autoorder")
urlpatterns = [
url(r'^', include(router.urls)),
# url(r'mysqlorder', MySqlOrderViewSet.as_view()),
url(r'inception', InceptionViewSet.as_view()),
url(r'gitlabinfo', GitLabInfoViewSet.as_view()),
url(r'sqlorderdetail', SqlOrderDetailViewSet.as_view()),
# url(r'todosqlorder', ToDoSqlOrderViewSet.as_view()),
# url(r'allsqlorder', AllSqlOrderViewSet.as_view()),
# workorder
url(r'allworkorder', AllWorkOrderViewSet.as_view()),
]
| 35.977273
| 81
| 0.778269
|
25409ac1f1a070989bffc4d8514fd723cf238975
| 663
|
py
|
Python
|
event_manager/users/tests/test_urls.py
|
Tundzhel/event_manager
|
917dd80f74ba3bea878726a4cfc606300dc40af9
|
[
"MIT"
] | null | null | null |
event_manager/users/tests/test_urls.py
|
Tundzhel/event_manager
|
917dd80f74ba3bea878726a4cfc606300dc40af9
|
[
"MIT"
] | 25
|
2021-12-22T12:31:45.000Z
|
2022-03-31T12:26:24.000Z
|
event_manager/users/tests/test_urls.py
|
Tundzhel/event_manager
|
917dd80f74ba3bea878726a4cfc606300dc40af9
|
[
"MIT"
] | null | null | null |
import pytest
from django.urls import resolve, reverse
from event_manager.users.models import User
pytestmark = pytest.mark.django_db
def test_detail(user: User):
assert (
reverse("users:detail", kwargs={"username": user.username})
== f"/users/{user.username}/"
)
assert resolve(f"/users/{user.username}/").view_name == "users:detail"
def test_update():
assert reverse("users:update") == "/users/~update/"
assert resolve("/users/~update/").view_name == "users:update"
def test_redirect():
assert reverse("users:redirect") == "/users/~redirect/"
assert resolve("/users/~redirect/").view_name == "users:redirect"
| 26.52
| 74
| 0.678733
|
5e5183c1af404f1227763be373afe95a490d45af
| 2,799
|
py
|
Python
|
tests/test_cublasxt.py
|
Eric89GXL/scikits.cuda
|
d40e9a58daa16f353c6b9d794135ffb81519095e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_cublasxt.py
|
Eric89GXL/scikits.cuda
|
d40e9a58daa16f353c6b9d794135ffb81519095e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_cublasxt.py
|
Eric89GXL/scikits.cuda
|
d40e9a58daa16f353c6b9d794135ffb81519095e
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
"""
Unit tests for scikits.cuda.cublasxt
"""
from unittest import main, makeSuite, TestCase, TestSuite
import numpy as np
import skcuda.cublasxt as cublasxt
class test_cublasxt(TestCase):
def setUp(self):
self.handle = cublasxt.cublasXtCreate()
self.nbDevices = 1
self.deviceId = np.array([0], np.int32)
cublasxt.cublasXtDeviceSelect(self.handle, self.nbDevices,
self.deviceId.ctypes.data)
def tearDown(self):
cublasxt.cublasXtDestroy(self.handle)
def test_cublasXtSgemm(self):
a = np.random.rand(4, 4).astype(np.float32)
b = np.random.rand(4, 4).astype(np.float32)
c = np.zeros((4, 4), np.float32)
cublasxt.cublasXtSgemm(self.handle, cublasxt._CUBLAS_OP['N'],
cublasxt._CUBLAS_OP['N'], 4, 4, 4, np.float32(1.0),
a.ctypes.data, 4, b.ctypes.data, 4, np.float32(0.0),
c.ctypes.data, 4)
np.allclose(np.dot(b.T, a.T).T, c)
def test_cublasXtDgemm(self):
a = np.random.rand(4, 4).astype(np.float64)
b = np.random.rand(4, 4).astype(np.float64)
c = np.zeros((4, 4), np.float64)
cublasxt.cublasXtDgemm(self.handle, cublasxt._CUBLAS_OP['N'],
cublasxt._CUBLAS_OP['N'], 4, 4, 4, np.float64(1.0),
a.ctypes.data, 4, b.ctypes.data, 4, np.float64(0.0),
c.ctypes.data, 4)
np.allclose(np.dot(b.T, a.T).T, c)
def test_cublasXtCgemm(self):
a = (np.random.rand(4, 4)+1j*np.random.rand(4, 4)).astype(np.complex128)
b = (np.random.rand(4, 4)+1j*np.random.rand(4, 4)).astype(np.complex128)
c = np.zeros((4, 4), np.complex128)
cublasxt.cublasXtCgemm(self.handle, cublasxt._CUBLAS_OP['N'],
cublasxt._CUBLAS_OP['N'], 4, 4, 4, np.complex128(1.0),
a.ctypes.data, 4, b.ctypes.data, 4, np.complex128(0.0),
c.ctypes.data, 4)
np.allclose(np.dot(b.T, a.T).T, c)
def test_cublasXtZgemm(self):
a = (np.random.rand(4, 4)+1j*np.random.rand(4, 4)).astype(np.complex256)
b = (np.random.rand(4, 4)+1j*np.random.rand(4, 4)).astype(np.complex256)
c = np.zeros((4, 4), np.complex256)
cublasxt.cublasXtZgemm(self.handle, cublasxt._CUBLAS_OP['N'],
cublasxt._CUBLAS_OP['N'], 4, 4, 4, np.complex256(1.0),
a.ctypes.data, 4, b.ctypes.data, 4, np.complex256(0.0),
c.ctypes.data, 4)
np.allclose(np.dot(b.T, a.T).T, c)
if __name__ == '__main__':
main()
| 39.985714
| 86
| 0.538407
|
8643ee73ba1c179f49d3383dfbe69e524f3a66c9
| 5,930
|
py
|
Python
|
Tools/scripts/treesync.py
|
ystk/debian-python3.1
|
6241444a6994140621d1b143a2d6b311b184366a
|
[
"PSF-2.0"
] | 1
|
2015-05-21T23:47:54.000Z
|
2015-05-21T23:47:54.000Z
|
Tools/scripts/treesync.py
|
ystk/debian-python3.1
|
6241444a6994140621d1b143a2d6b311b184366a
|
[
"PSF-2.0"
] | 1
|
2015-10-29T20:51:31.000Z
|
2015-10-29T20:51:31.000Z
|
Tools/scripts/treesync.py
|
ystk/debian-python3.1
|
6241444a6994140621d1b143a2d6b311b184366a
|
[
"PSF-2.0"
] | 2
|
2018-08-06T04:37:38.000Z
|
2022-02-27T18:07:12.000Z
|
#! /usr/bin/env python
"""Script to synchronize two source trees.
Invoke with two arguments:
python treesync.py slave master
The assumption is that "master" contains CVS administration while
slave doesn't. All files in the slave tree that have a CVS/Entries
entry in the master tree are synchronized. This means:
If the files differ:
if the slave file is newer:
normalize the slave file
if the files still differ:
copy the slave to the master
else (the master is newer):
copy the master to the slave
normalizing the slave means replacing CRLF with LF when the master
doesn't use CRLF
"""
import os, sys, stat, getopt
# Interactivity options
default_answer = "ask"
create_files = "yes"
create_directories = "no"
write_slave = "ask"
write_master = "ask"
def main():
global always_no, always_yes
global create_directories, write_master, write_slave
opts, args = getopt.getopt(sys.argv[1:], "nym:s:d:f:a:")
for o, a in opts:
if o == '-y':
default_answer = "yes"
if o == '-n':
default_answer = "no"
if o == '-s':
write_slave = a
if o == '-m':
write_master = a
if o == '-d':
create_directories = a
if o == '-f':
create_files = a
if o == '-a':
create_files = create_directories = write_slave = write_master = a
try:
[slave, master] = args
except ValueError:
print("usage: python", sys.argv[0] or "treesync.py", end=' ')
print("[-n] [-y] [-m y|n|a] [-s y|n|a] [-d y|n|a] [-f n|y|a]", end=' ')
print("slavedir masterdir")
return
process(slave, master)
def process(slave, master):
cvsdir = os.path.join(master, "CVS")
if not os.path.isdir(cvsdir):
print("skipping master subdirectory", master)
print("-- not under CVS")
return
print("-"*40)
print("slave ", slave)
print("master", master)
if not os.path.isdir(slave):
if not okay("create slave directory %s?" % slave,
answer=create_directories):
print("skipping master subdirectory", master)
print("-- no corresponding slave", slave)
return
print("creating slave directory", slave)
try:
os.mkdir(slave)
except os.error as msg:
print("can't make slave directory", slave, ":", msg)
return
else:
print("made slave directory", slave)
cvsdir = None
subdirs = []
names = os.listdir(master)
for name in names:
mastername = os.path.join(master, name)
slavename = os.path.join(slave, name)
if name == "CVS":
cvsdir = mastername
else:
if os.path.isdir(mastername) and not os.path.islink(mastername):
subdirs.append((slavename, mastername))
if cvsdir:
entries = os.path.join(cvsdir, "Entries")
for e in open(entries).readlines():
words = e.split('/')
if words[0] == '' and words[1:]:
name = words[1]
s = os.path.join(slave, name)
m = os.path.join(master, name)
compare(s, m)
for (s, m) in subdirs:
process(s, m)
def compare(slave, master):
try:
sf = open(slave, 'r')
except IOError:
sf = None
try:
mf = open(master, 'rb')
except IOError:
mf = None
if not sf:
if not mf:
print("Neither master nor slave exists", master)
return
print("Creating missing slave", slave)
copy(master, slave, answer=create_files)
return
if not mf:
print("Not updating missing master", master)
return
if sf and mf:
if identical(sf, mf):
return
sft = mtime(sf)
mft = mtime(mf)
if mft > sft:
# Master is newer -- copy master to slave
sf.close()
mf.close()
print("Master ", master)
print("is newer than slave", slave)
copy(master, slave, answer=write_slave)
return
# Slave is newer -- copy slave to master
print("Slave is", sft-mft, "seconds newer than master")
# But first check what to do about CRLF
mf.seek(0)
fun = funnychars(mf)
mf.close()
sf.close()
if fun:
print("***UPDATING MASTER (BINARY COPY)***")
copy(slave, master, "rb", answer=write_master)
else:
print("***UPDATING MASTER***")
copy(slave, master, "r", answer=write_master)
BUFSIZE = 16*1024
def identical(sf, mf):
while 1:
sd = sf.read(BUFSIZE)
md = mf.read(BUFSIZE)
if sd != md: return 0
if not sd: break
return 1
def mtime(f):
st = os.fstat(f.fileno())
return st[stat.ST_MTIME]
def funnychars(f):
while 1:
buf = f.read(BUFSIZE)
if not buf: break
if '\r' in buf or '\0' in buf: return 1
return 0
def copy(src, dst, rmode="rb", wmode="wb", answer='ask'):
print("copying", src)
print(" to", dst)
if not okay("okay to copy? ", answer):
return
f = open(src, rmode)
g = open(dst, wmode)
while 1:
buf = f.read(BUFSIZE)
if not buf: break
g.write(buf)
f.close()
g.close()
def raw_input(prompt):
sys.stdout.write(prompt)
sys.stdout.flush()
return sys.stdin.readline()
def okay(prompt, answer='ask'):
answer = answer.strip().lower()
if not answer or answer[0] not in 'ny':
answer = input(prompt)
answer = answer.strip().lower()
if not answer:
answer = default_answer
if answer[:1] == 'y':
return 1
if answer[:1] == 'n':
return 0
print("Yes or No please -- try again:")
return okay(prompt)
if __name__ == '__main__':
main()
| 28.104265
| 79
| 0.555649
|
a8ed9e8e58042ef1a9cfd065632cca018afbb4d1
| 264
|
py
|
Python
|
manage.py
|
nemesisdesign/django-foss-dashboard
|
261fd44398a59c1c242583c2b758eb33b63d45bb
|
[
"BSD-3-Clause"
] | null | null | null |
manage.py
|
nemesisdesign/django-foss-dashboard
|
261fd44398a59c1c242583c2b758eb33b63d45bb
|
[
"BSD-3-Clause"
] | null | null | null |
manage.py
|
nemesisdesign/django-foss-dashboard
|
261fd44398a59c1c242583c2b758eb33b63d45bb
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django-foss-dashboard.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 24
| 85
| 0.776515
|
86fae209bd9d8afb10d0927805e71d11db516455
| 1,937
|
py
|
Python
|
workon/contrib/google/utils.py
|
dalou/django-workon
|
ef63c0a81c00ef560ed693e435cf3825f5170126
|
[
"BSD-3-Clause"
] | null | null | null |
workon/contrib/google/utils.py
|
dalou/django-workon
|
ef63c0a81c00ef560ed693e435cf3825f5170126
|
[
"BSD-3-Clause"
] | null | null | null |
workon/contrib/google/utils.py
|
dalou/django-workon
|
ef63c0a81c00ef560ed693e435cf3825f5170126
|
[
"BSD-3-Clause"
] | null | null | null |
import json
from django.conf import settings
from django import template
try:
from raven.contrib.django.raven_compat.models import client as raven
except:
raven = None
try:
from oauth2client.service_account import ServiceAccountCredentials
def get_google_account_token(account_key_file):
# The scope for the OAuth2 request.
SCOPE = 'https://www.googleapis.com/auth/analytics.readonly'
token = None
try:
_key_data = json.load(account_key_file)
_credentials = ServiceAccountCredentials.from_json_keyfile_dict(
_key_data,
scopes=['https://www.googleapis.com/auth/analytics.readonly'],
# token_uri='https://accounts.google.com/o/oauth2/token'
)
token = _credentials.get_access_token().access_token
except Exception, e:
if raven:
raven.captureException()
print "GOOGLE API TOKEN RETRIEVE ERROR:", e.message
token = None
return token
except:
from oauth2client.client import SignedJwtAssertionCredentials
def get_google_account_token(account_key_file):
# The scope for the OAuth2 request.
SCOPE = 'https://www.googleapis.com/auth/analytics.readonly'
token = None
try:
_key_data = json.load(account_key_file)
_credentials = SignedJwtAssertionCredentials(
_key_data['client_email'],
_key_data['private_key'],
'https://www.googleapis.com/auth/analytics.readonly',
# token_uri='https://accounts.google.com/o/oauth2/token'
)
token = _credentials.get_access_token().access_token
except Exception, e:
if raven:
raven.captureException()
print "GOOGLE API TOKEN RETRIEVE ERROR:", e.message
token = None
return token
| 35.87037
| 78
| 0.627259
|
89880cdd956a6e7ddada12dfa207e9d67914ada8
| 4,382
|
py
|
Python
|
pyIPXACT/DesignConfiguration.py
|
snmishra/pyIPXACT
|
ae0d98669a73ec208270710574bdaa110c54d6ee
|
[
"Apache-2.0"
] | 11
|
2016-11-10T23:23:55.000Z
|
2021-07-12T09:12:36.000Z
|
pyIPXACT/DesignConfiguration.py
|
snmishra/pyIPXACT
|
ae0d98669a73ec208270710574bdaa110c54d6ee
|
[
"Apache-2.0"
] | 4
|
2016-10-25T01:08:58.000Z
|
2020-09-28T04:38:57.000Z
|
pyIPXACT/DesignConfiguration.py
|
snmishra/pyIPXACT
|
ae0d98669a73ec208270710574bdaa110c54d6ee
|
[
"Apache-2.0"
] | 10
|
2017-01-17T07:12:25.000Z
|
2021-05-30T06:04:01.000Z
|
# EMACS settings: -*- tab-width: 2; indent-tabs-mode: t; python-indent-offset: 2 -*-
# vim: tabstop=2:shiftwidth=2:noexpandtab
# kate: tab-width 2; replace-tabs off; indent-width 2;
# =============================================================================
# ___ ______ __ _ ____ _____
# _ __ _ _|_ _| _ \ \/ / / \ / ___|_ _|
# | '_ \| | | || || |_) \ / / _ \| | | |
# | |_) | |_| || || __// \ / ___ \ |___ | |
# | .__/ \__, |___|_| /_/\_\/_/ \_\____| |_|
# |_| |___/
# =============================================================================
# Authors: Patrick Lehmann
#
# Python module: A DOM based IP-XACT implementation for Python
#
# Description:
# ------------------------------------
# TODO:
#
# License:
# ==============================================================================
# Copyright 2007-2016 Patrick Lehmann - Dresden, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
from textwrap import dedent
from pyIPXACT import RootElement, __DEFAULT_SCHEMA__, Vlnv
class DesignConfiguration(RootElement):
"""Represents an IP-XACT design configuration."""
def __init__(self, vlnv : Vlnv, description : str):
super().__init__(vlnv)
self._description = description
self._generatorChainConfiguration = None
self._interconnectionConfiguration = None
self._viewConfiguration = None
def SetItem(self, item):
if isinstance(item, GeneratorChainConfiguration): self._generatorChainConfiguration = item
elif isinstance(item, InterconnectionConfiguration): self._interconnectionConfiguration = item
elif isinstance(item, ViewConfiguration): self._viewConfiguration = item
else:
raise ValueError()
def ToXml(self):
"""Converts the object's data into XML format."""
buffer = dedent("""\
<?xml version="1.0" encoding="UTF-8"?>
<{xmlns}:designConfiguration
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:{xmlns}="{schemaUri}"
xsi:schemaLocation="{schemaUri} {schemaUrl}">
{versionedIdentifier}
<{xmlns}:description>{description}</{xmlns}:description>
""").format(
xmlns=__DEFAULT_SCHEMA__.NamespacePrefix,
schemaUri=__DEFAULT_SCHEMA__.SchemaUri,
schemaUrl=__DEFAULT_SCHEMA__.SchemaUrl,
versionedIdentifier=self._vlnv.ToXml(isVersionedIdentifier=True),
description=self._description
)
if self._generatorChainConfiguration:
buffer += "\t<{xmlns}:componentInstances>\n"
buffer += self._generatorChainConfiguration.ToXml(2)
buffer += "\t</{xmlns}:componentInstances>\n"
if self._interconnectionConfiguration:
buffer += "\t<{xmlns}:interconnectionConfiguration>\n"
buffer += self._interconnectionConfiguration.ToXml(2)
buffer += "\t</{xmlns}:interconnectionConfiguration>\n"
if self._viewConfiguration:
buffer += "\t<{xmlns}:viewConfiguration>\n"
buffer += self._viewConfiguration.ToXml(2)
buffer += "\t</{xmlns}:viewConfiguration>\n"
buffer += dedent("""\
</{xmlns}:designConfiguration>
""")
return buffer.format(xmlns=__DEFAULT_SCHEMA__.NamespacePrefix)
class GeneratorChainConfiguration:
"""Represents an IP-XACT generator chain configuration."""
def __init__(self):
pass
def ToXml(self, indent=0):
"""Converts the object's data into XML format."""
return ""
class InterconnectionConfiguration:
"""Represents an IP-XACT interconnection configuration."""
def __init__(self):
pass
def ToXml(self, indent=0):
"""Converts the object's data into XML format."""
return ""
class ViewConfiguration:
"""Represents an IP-XACT view configuration."""
def __init__(self):
pass
def ToXml(self, indent=0):
"""Converts the object's data into XML format."""
return ""
| 32.459259
| 98
| 0.630762
|
d44c126981ed09b6a488faaa05f90285464f2dd7
| 6,183
|
py
|
Python
|
lokahi_dropbox/search/views.py
|
y4ahmed/Crowdfunding-Web-Application
|
52beab945ee88f8fd773f942577137c770a601c1
|
[
"MIT"
] | 4
|
2017-09-28T04:26:33.000Z
|
2022-01-04T22:51:17.000Z
|
lokahi_dropbox/search/views.py
|
y4ahmed/Crowdfunding-Web-Application
|
52beab945ee88f8fd773f942577137c770a601c1
|
[
"MIT"
] | null | null | null |
lokahi_dropbox/search/views.py
|
y4ahmed/Crowdfunding-Web-Application
|
52beab945ee88f8fd773f942577137c770a601c1
|
[
"MIT"
] | 1
|
2021-01-17T23:11:21.000Z
|
2021-01-17T23:11:21.000Z
|
from django.views.decorators.csrf import csrf_protect
from django.shortcuts import render
from search.forms import BasicSearchForm, AdvancedSearchForm
from frontend.models import BaseUser
from django.http import HttpResponseRedirect
from django.core.exceptions import ValidationError
# from django.contrib.auth.models import User
# Create your views here.
@csrf_protect
def basic_search(request):
if request.method == "POST":
form = BasicSearchForm(request.POST)
if form.is_valid():
search = form.cleaned_data['search']
base = BaseUser.objects.get(user=request.user)
reports = base.reports.filter(title__icontains=search)
# reports = reports.filter()
# reports = Report.objects.filter(owner_id=request.user).filter(title__icontains=search)
# TEST TODO remove
# for report in reports:
# print(report.title)
return render(
request,
'searches/search_result.html',
{'report_list': reports, 'type': base.user_role}
)
base = BaseUser.objects.get(user=request.user)
return render(
request,
'home.html',
{
'form': BasicSearchForm(),
'user': base,
'type': base.user_role,
'invalid_search': True
}
)
@csrf_protect
def advanced_search(request):
if request.method == "POST":
form = AdvancedSearchForm(data=request.POST)
if form.is_valid():
title = form.cleaned_data['title']
company_name = form.cleaned_data['company_name']
ceo = form.cleaned_data['ceo']
location = form.cleaned_data['location']
country = form.cleaned_data['country']
sector = form.cleaned_data['sector']
projects = form.cleaned_data['projects']
time_created = form.cleaned_data['time_created']
reports = [[], [], [], [], [], [], [], []]
checks = [False, False, False, False, False, False, False, False]
# all of the users reports
base_user = BaseUser.objects.get(user=request.user)
all_reports = base_user.reports.all()
if title == "" and company_name == "" and ceo == "" and \
location == "" and country == "" and sector == "" and \
projects == "" and time_created == "":
return render(
request,
'searches/advanced_search.html',
{
'empty_field': True,
'type': base_user.user_role,
'form': AdvancedSearchForm()
}
)
if not title == "":
reports[0] = all_reports.filter(title__icontains=title)
# print(reports[0])
if not company_name == "":
reports[1] = all_reports.filter(
compName__icontains=company_name)
# print(reports[1])
if not ceo == "":
reports[2] = all_reports.filter(ceo__icontains=ceo)
# print(reports[2])
if not location == "":
reports[3] = all_reports.filter(location__icontains=location)
# print(reports[3])
if not country == "":
reports[4] = all_reports.filter(country__icontains=country)
# print(reports[4])
if not sector == "":
reports[5] = all_reports.filter(sector__icontains=sector)
# print(reports[5])
if not projects == "":
reports[6] = all_reports.filter(projects__icontains=projects)
# print(reports[6])
if not time_created == "":
try:
reports[7] = all_reports.filter(time_created=time_created)
except (NameError, ValueError, ValidationError):
print("here")
return render(
request,
'searches/advanced_search.html',
{
'time_error': True,
'type': base_user.user_role,
'form': AdvancedSearchForm()
}
)
# print(reports[7])
checks[0] = form.cleaned_data['and_title']
checks[1] = form.cleaned_data['and_company_name']
checks[2] = form.cleaned_data['and_ceo']
checks[3] = form.cleaned_data['and_location']
checks[4] = form.cleaned_data['and_country']
checks[5] = form.cleaned_data['and_sector']
checks[6] = form.cleaned_data['and_projects']
checks[7] = form.cleaned_data['and_time_created']
final_reports = []
for i in range(0, 8):
if checks[i] is False:
# print("doing union")
final_reports = union(final_reports, reports[i])
# print(final_reports)
else:
# print("doing intersection")
final_reports = intersection(final_reports, reports[i])
# print(len(final_reports))
# TODO comment out the next part!
for report in final_reports:
print(report.title)
return HttpResponseRedirect('/home/')
else:
return render(
request,
'searches/advanced_search.html',
{
'form': AdvancedSearchForm(),
'type': base_user.user_role,
}
)
else:
return render(
request,
'searches/advanced_search.html',
{
'form': AdvancedSearchForm(),
'type': base_user.user_role,
}
)
def union(lst1, lst2):
return list(set(lst1).union(set(lst2)))
def intersection(lst1, lst2):
return list(set(lst1).intersection(set(lst2)))
| 36.370588
| 100
| 0.513828
|
7bcf590d2d7368e5407568529bc8d8b889bc101f
| 32,281
|
py
|
Python
|
app/env/lib/python3.7/site-packages/socks.py
|
siyaochen/Tier1Health
|
536591a7534bbb3fb27fe889bfed9de152ec1864
|
[
"MIT"
] | 69
|
2020-03-31T06:40:17.000Z
|
2022-02-25T11:48:18.000Z
|
app/env/lib/python3.7/site-packages/socks.py
|
siyaochen/Tier1Health
|
536591a7534bbb3fb27fe889bfed9de152ec1864
|
[
"MIT"
] | 11
|
2019-12-26T17:21:03.000Z
|
2022-03-21T22:17:07.000Z
|
app/env/lib/python3.7/site-packages/socks.py
|
siyaochen/Tier1Health
|
536591a7534bbb3fb27fe889bfed9de152ec1864
|
[
"MIT"
] | 28
|
2020-04-15T15:24:17.000Z
|
2021-12-26T04:05:02.000Z
|
"""SocksiPy - Python SOCKS module.
Copyright 2006 Dan-Haim. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of Dan Haim nor the names of his contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
This module provides a standard socket-like interface for Python
for tunneling connections through SOCKS proxies.
===============================================================================
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
for use in PyLoris (http://pyloris.sourceforge.net/)
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
mainly to merge bug fixes found in Sourceforge
Modifications made by Anorov (https://github.com/Anorov)
-Forked and renamed to PySocks
-Fixed issue with HTTP proxy failure checking (same bug that was in the
old ___recvall() method)
-Included SocksiPyHandler (sockshandler.py), to be used as a urllib2 handler,
courtesy of e000 (https://github.com/e000):
https://gist.github.com/869791#file_socksipyhandler.py
-Re-styled code to make it readable
-Aliased PROXY_TYPE_SOCKS5 -> SOCKS5 etc.
-Improved exception handling and output
-Removed irritating use of sequence indexes, replaced with tuple unpacked
variables
-Fixed up Python 3 bytestring handling - chr(0x03).encode() -> b"\x03"
-Other general fixes
-Added clarification that the HTTP proxy connection method only supports
CONNECT-style tunneling HTTP proxies
-Various small bug fixes
"""
from base64 import b64encode
from collections import Callable
from errno import EOPNOTSUPP, EINVAL, EAGAIN
import functools
from io import BytesIO
import logging
import os
from os import SEEK_CUR
import socket
import struct
import sys
__version__ = "1.6.7"
if os.name == "nt" and sys.version_info < (3, 0):
try:
import win_inet_pton
except ImportError:
raise ImportError(
"To run PySocks on Windows you must install win_inet_pton")
log = logging.getLogger(__name__)
PROXY_TYPE_SOCKS4 = SOCKS4 = 1
PROXY_TYPE_SOCKS5 = SOCKS5 = 2
PROXY_TYPE_HTTP = HTTP = 3
PROXY_TYPES = {"SOCKS4": SOCKS4, "SOCKS5": SOCKS5, "HTTP": HTTP}
PRINTABLE_PROXY_TYPES = dict(zip(PROXY_TYPES.values(), PROXY_TYPES.keys()))
_orgsocket = _orig_socket = socket.socket
def set_self_blocking(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
self = args[0]
try:
_is_blocking = self.gettimeout()
if _is_blocking == 0:
self.setblocking(True)
return function(*args, **kwargs)
except Exception as e:
raise
finally:
# set orgin blocking
if _is_blocking == 0:
self.setblocking(False)
return wrapper
class ProxyError(IOError):
"""Socket_err contains original socket.error exception."""
def __init__(self, msg, socket_err=None):
self.msg = msg
self.socket_err = socket_err
if socket_err:
self.msg += ": {0}".format(socket_err)
def __str__(self):
return self.msg
class GeneralProxyError(ProxyError):
pass
class ProxyConnectionError(ProxyError):
pass
class SOCKS5AuthError(ProxyError):
pass
class SOCKS5Error(ProxyError):
pass
class SOCKS4Error(ProxyError):
pass
class HTTPError(ProxyError):
pass
SOCKS4_ERRORS = {
0x5B: "Request rejected or failed",
0x5C: ("Request rejected because SOCKS server cannot connect to identd on"
" the client"),
0x5D: ("Request rejected because the client program and identd report"
" different user-ids")
}
SOCKS5_ERRORS = {
0x01: "General SOCKS server failure",
0x02: "Connection not allowed by ruleset",
0x03: "Network unreachable",
0x04: "Host unreachable",
0x05: "Connection refused",
0x06: "TTL expired",
0x07: "Command not supported, or protocol error",
0x08: "Address type not supported"
}
DEFAULT_PORTS = {SOCKS4: 1080, SOCKS5: 1080, HTTP: 8080}
def set_default_proxy(proxy_type=None, addr=None, port=None, rdns=True,
username=None, password=None):
"""Sets a default proxy.
All further socksocket objects will use the default unless explicitly
changed. All parameters are as for socket.set_proxy()."""
socksocket.default_proxy = (proxy_type, addr, port, rdns,
username.encode() if username else None,
password.encode() if password else None)
def setdefaultproxy(*args, **kwargs):
if "proxytype" in kwargs:
kwargs["proxy_type"] = kwargs.pop("proxytype")
return set_default_proxy(*args, **kwargs)
def get_default_proxy():
"""Returns the default proxy, set by set_default_proxy."""
return socksocket.default_proxy
getdefaultproxy = get_default_proxy
def wrap_module(module):
"""Attempts to replace a module's socket library with a SOCKS socket.
Must set a default proxy using set_default_proxy(...) first. This will
only work on modules that import socket directly into the namespace;
most of the Python Standard Library falls into this category."""
if socksocket.default_proxy:
module.socket.socket = socksocket
else:
raise GeneralProxyError("No default proxy specified")
wrapmodule = wrap_module
def create_connection(dest_pair,
timeout=None, source_address=None,
proxy_type=None, proxy_addr=None,
proxy_port=None, proxy_rdns=True,
proxy_username=None, proxy_password=None,
socket_options=None):
"""create_connection(dest_pair, *[, timeout], **proxy_args) -> socket object
Like socket.create_connection(), but connects to proxy
before returning the socket object.
dest_pair - 2-tuple of (IP/hostname, port).
**proxy_args - Same args passed to socksocket.set_proxy() if present.
timeout - Optional socket timeout value, in seconds.
source_address - tuple (host, port) for the socket to bind to as its source
address before connecting (only for compatibility)
"""
# Remove IPv6 brackets on the remote address and proxy address.
remote_host, remote_port = dest_pair
if remote_host.startswith("["):
remote_host = remote_host.strip("[]")
if proxy_addr and proxy_addr.startswith("["):
proxy_addr = proxy_addr.strip("[]")
err = None
# Allow the SOCKS proxy to be on IPv4 or IPv6 addresses.
for r in socket.getaddrinfo(proxy_addr, proxy_port, 0, socket.SOCK_STREAM):
family, socket_type, proto, canonname, sa = r
sock = None
try:
sock = socksocket(family, socket_type, proto)
if socket_options:
for opt in socket_options:
sock.setsockopt(*opt)
if isinstance(timeout, (int, float)):
sock.settimeout(timeout)
if proxy_type:
sock.set_proxy(proxy_type, proxy_addr, proxy_port, proxy_rdns,
proxy_username, proxy_password)
if source_address:
sock.bind(source_address)
sock.connect((remote_host, remote_port))
return sock
except (socket.error, ProxyConnectionError) as e:
err = e
if sock:
sock.close()
sock = None
if err:
raise err
raise socket.error("gai returned empty list.")
class _BaseSocket(socket.socket):
"""Allows Python 2 delegated methods such as send() to be overridden."""
def __init__(self, *pos, **kw):
_orig_socket.__init__(self, *pos, **kw)
self._savedmethods = dict()
for name in self._savenames:
self._savedmethods[name] = getattr(self, name)
delattr(self, name) # Allows normal overriding mechanism to work
_savenames = list()
def _makemethod(name):
return lambda self, *pos, **kw: self._savedmethods[name](*pos, **kw)
for name in ("sendto", "send", "recvfrom", "recv"):
method = getattr(_BaseSocket, name, None)
# Determine if the method is not defined the usual way
# as a function in the class.
# Python 2 uses __slots__, so there are descriptors for each method,
# but they are not functions.
if not isinstance(method, Callable):
_BaseSocket._savenames.append(name)
setattr(_BaseSocket, name, _makemethod(name))
class socksocket(_BaseSocket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET and proto=0.
The "type" argument must be either SOCK_STREAM or SOCK_DGRAM.
"""
default_proxy = None
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM,
proto=0, *args, **kwargs):
if type not in (socket.SOCK_STREAM, socket.SOCK_DGRAM):
msg = "Socket type must be stream or datagram, not {!r}"
raise ValueError(msg.format(type))
super(socksocket, self).__init__(family, type, proto, *args, **kwargs)
self._proxyconn = None # TCP connection to keep UDP relay alive
if self.default_proxy:
self.proxy = self.default_proxy
else:
self.proxy = (None, None, None, None, None, None)
self.proxy_sockname = None
self.proxy_peername = None
self._timeout = None
def _readall(self, file, count):
"""Receive EXACTLY the number of bytes requested from the file object.
Blocks until the required number of bytes have been received."""
data = b""
while len(data) < count:
d = file.read(count - len(data))
if not d:
raise GeneralProxyError("Connection closed unexpectedly")
data += d
return data
def settimeout(self, timeout):
self._timeout = timeout
try:
# test if we're connected, if so apply timeout
peer = self.get_proxy_peername()
super(socksocket, self).settimeout(self._timeout)
except socket.error:
pass
def gettimeout(self):
return self._timeout
def setblocking(self, v):
if v:
self.settimeout(None)
else:
self.settimeout(0.0)
def set_proxy(self, proxy_type=None, addr=None, port=None, rdns=True,
username=None, password=None):
""" Sets the proxy to be used.
proxy_type - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be performed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided."""
self.proxy = (proxy_type, addr, port, rdns,
username.encode() if username else None,
password.encode() if password else None)
def setproxy(self, *args, **kwargs):
if "proxytype" in kwargs:
kwargs["proxy_type"] = kwargs.pop("proxytype")
return self.set_proxy(*args, **kwargs)
def bind(self, *pos, **kw):
"""Implements proxy connection for UDP sockets.
Happens during the bind() phase."""
(proxy_type, proxy_addr, proxy_port, rdns, username,
password) = self.proxy
if not proxy_type or self.type != socket.SOCK_DGRAM:
return _orig_socket.bind(self, *pos, **kw)
if self._proxyconn:
raise socket.error(EINVAL, "Socket already bound to an address")
if proxy_type != SOCKS5:
msg = "UDP only supported by SOCKS5 proxy type"
raise socket.error(EOPNOTSUPP, msg)
super(socksocket, self).bind(*pos, **kw)
# Need to specify actual local port because
# some relays drop packets if a port of zero is specified.
# Avoid specifying host address in case of NAT though.
_, port = self.getsockname()
dst = ("0", port)
self._proxyconn = _orig_socket()
proxy = self._proxy_addr()
self._proxyconn.connect(proxy)
UDP_ASSOCIATE = b"\x03"
_, relay = self._SOCKS5_request(self._proxyconn, UDP_ASSOCIATE, dst)
# The relay is most likely on the same host as the SOCKS proxy,
# but some proxies return a private IP address (10.x.y.z)
host, _ = proxy
_, port = relay
super(socksocket, self).connect((host, port))
super(socksocket, self).settimeout(self._timeout)
self.proxy_sockname = ("0.0.0.0", 0) # Unknown
def sendto(self, bytes, *args, **kwargs):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).sendto(bytes, *args, **kwargs)
if not self._proxyconn:
self.bind(("", 0))
address = args[-1]
flags = args[:-1]
header = BytesIO()
RSV = b"\x00\x00"
header.write(RSV)
STANDALONE = b"\x00"
header.write(STANDALONE)
self._write_SOCKS5_address(address, header)
sent = super(socksocket, self).send(header.getvalue() + bytes, *flags,
**kwargs)
return sent - header.tell()
def send(self, bytes, flags=0, **kwargs):
if self.type == socket.SOCK_DGRAM:
return self.sendto(bytes, flags, self.proxy_peername, **kwargs)
else:
return super(socksocket, self).send(bytes, flags, **kwargs)
def recvfrom(self, bufsize, flags=0):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).recvfrom(bufsize, flags)
if not self._proxyconn:
self.bind(("", 0))
buf = BytesIO(super(socksocket, self).recv(bufsize + 1024, flags))
buf.seek(2, SEEK_CUR)
frag = buf.read(1)
if ord(frag):
raise NotImplementedError("Received UDP packet fragment")
fromhost, fromport = self._read_SOCKS5_address(buf)
if self.proxy_peername:
peerhost, peerport = self.proxy_peername
if fromhost != peerhost or peerport not in (0, fromport):
raise socket.error(EAGAIN, "Packet filtered")
return (buf.read(bufsize), (fromhost, fromport))
def recv(self, *pos, **kw):
bytes, _ = self.recvfrom(*pos, **kw)
return bytes
def close(self):
if self._proxyconn:
self._proxyconn.close()
return super(socksocket, self).close()
def get_proxy_sockname(self):
"""Returns the bound IP address and port number at the proxy."""
return self.proxy_sockname
getproxysockname = get_proxy_sockname
def get_proxy_peername(self):
"""
Returns the IP and port number of the proxy.
"""
return self.getpeername()
getproxypeername = get_proxy_peername
def get_peername(self):
"""Returns the IP address and port number of the destination machine.
Note: get_proxy_peername returns the proxy."""
return self.proxy_peername
getpeername = get_peername
def _negotiate_SOCKS5(self, *dest_addr):
"""Negotiates a stream connection through a SOCKS5 server."""
CONNECT = b"\x01"
self.proxy_peername, self.proxy_sockname = self._SOCKS5_request(
self, CONNECT, dest_addr)
def _SOCKS5_request(self, conn, cmd, dst):
"""
Send SOCKS5 request with given command (CMD field) and
address (DST field). Returns resolved DST address that was used.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = conn.makefile("wb")
reader = conn.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# First we'll send the authentication packages we support.
if username and password:
# The username/password details were supplied to the
# set_proxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
writer.write(b"\x05\x02\x00\x02")
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
writer.write(b"\x05\x01\x00")
# We'll receive the server's response to determine which
# method was selected
writer.flush()
chosen_auth = self._readall(reader, 2)
if chosen_auth[0:1] != b"\x05":
# Note: string[i:i+1] is used because indexing of a bytestring
# via bytestring[i] yields an integer in Python 3
raise GeneralProxyError(
"SOCKS5 proxy server sent invalid data")
# Check the chosen authentication method
if chosen_auth[1:2] == b"\x02":
# Okay, we need to perform a basic username/password
# authentication.
writer.write(b"\x01" + chr(len(username)).encode()
+ username
+ chr(len(password)).encode()
+ password)
writer.flush()
auth_status = self._readall(reader, 2)
if auth_status[0:1] != b"\x01":
# Bad response
raise GeneralProxyError(
"SOCKS5 proxy server sent invalid data")
if auth_status[1:2] != b"\x00":
# Authentication failed
raise SOCKS5AuthError("SOCKS5 authentication failed")
# Otherwise, authentication succeeded
# No authentication is required if 0x00
elif chosen_auth[1:2] != b"\x00":
# Reaching here is always bad
if chosen_auth[1:2] == b"\xFF":
raise SOCKS5AuthError(
"All offered SOCKS5 authentication methods were"
" rejected")
else:
raise GeneralProxyError(
"SOCKS5 proxy server sent invalid data")
# Now we can request the actual connection
writer.write(b"\x05" + cmd + b"\x00")
resolved = self._write_SOCKS5_address(dst, writer)
writer.flush()
# Get the response
resp = self._readall(reader, 3)
if resp[0:1] != b"\x05":
raise GeneralProxyError(
"SOCKS5 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x00:
# Connection failed: server returned an error
error = SOCKS5_ERRORS.get(status, "Unknown error")
raise SOCKS5Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
bnd = self._read_SOCKS5_address(reader)
super(socksocket, self).settimeout(self._timeout)
return (resolved, bnd)
finally:
reader.close()
writer.close()
def _write_SOCKS5_address(self, addr, file):
"""
Return the host and port packed for the SOCKS5 protocol,
and the resolved address as a tuple object.
"""
host, port = addr
proxy_type, _, _, rdns, username, password = self.proxy
family_to_byte = {socket.AF_INET: b"\x01", socket.AF_INET6: b"\x04"}
# If the given destination address is an IP address, we'll
# use the IP address request even if remote resolving was specified.
# Detect whether the address is IPv4/6 directly.
for family in (socket.AF_INET, socket.AF_INET6):
try:
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
except socket.error:
continue
# Well it's not an IP number, so it's probably a DNS name.
if rdns:
# Resolve remotely
host_bytes = host.encode("idna")
file.write(b"\x03" + chr(len(host_bytes)).encode() + host_bytes)
else:
# Resolve locally
addresses = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM,
socket.IPPROTO_TCP,
socket.AI_ADDRCONFIG)
# We can't really work out what IP is reachable, so just pick the
# first.
target_addr = addresses[0]
family = target_addr[0]
host = target_addr[4][0]
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
def _read_SOCKS5_address(self, file):
atyp = self._readall(file, 1)
if atyp == b"\x01":
addr = socket.inet_ntoa(self._readall(file, 4))
elif atyp == b"\x03":
length = self._readall(file, 1)
addr = self._readall(file, ord(length))
elif atyp == b"\x04":
addr = socket.inet_ntop(socket.AF_INET6, self._readall(file, 16))
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
port = struct.unpack(">H", self._readall(file, 2))[0]
return addr, port
def _negotiate_SOCKS4(self, dest_addr, dest_port):
"""Negotiates a connection through a SOCKS4 server."""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = self.makefile("wb")
reader = self.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# Check if the destination address provided is an IP address
remote_resolve = False
try:
addr_bytes = socket.inet_aton(dest_addr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if rdns:
addr_bytes = b"\x00\x00\x00\x01"
remote_resolve = True
else:
addr_bytes = socket.inet_aton(
socket.gethostbyname(dest_addr))
# Construct the request packet
writer.write(struct.pack(">BBH", 0x04, 0x01, dest_port))
writer.write(addr_bytes)
# The username parameter is considered userid for SOCKS4
if username:
writer.write(username)
writer.write(b"\x00")
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if remote_resolve:
writer.write(dest_addr.encode("idna") + b"\x00")
writer.flush()
# Get the response from the server
resp = self._readall(reader, 8)
if resp[0:1] != b"\x00":
# Bad data
raise GeneralProxyError(
"SOCKS4 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x5A:
# Connection failed: server returned an error
error = SOCKS4_ERRORS.get(status, "Unknown error")
raise SOCKS4Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
self.proxy_sockname = (socket.inet_ntoa(resp[4:]),
struct.unpack(">H", resp[2:4])[0])
if remote_resolve:
self.proxy_peername = socket.inet_ntoa(addr_bytes), dest_port
else:
self.proxy_peername = dest_addr, dest_port
finally:
reader.close()
writer.close()
def _negotiate_HTTP(self, dest_addr, dest_port):
"""Negotiates a connection through an HTTP server.
NOTE: This currently only supports HTTP CONNECT-style proxies."""
proxy_type, addr, port, rdns, username, password = self.proxy
# If we need to resolve locally, we do this now
addr = dest_addr if rdns else socket.gethostbyname(dest_addr)
http_headers = [
(b"CONNECT " + addr.encode("idna") + b":"
+ str(dest_port).encode() + b" HTTP/1.1"),
b"Host: " + dest_addr.encode("idna")
]
if username and password:
http_headers.append(b"Proxy-Authorization: basic "
+ b64encode(username + b":" + password))
http_headers.append(b"\r\n")
self.sendall(b"\r\n".join(http_headers))
# We just need the first line to check if the connection was successful
fobj = self.makefile()
status_line = fobj.readline()
fobj.close()
if not status_line:
raise GeneralProxyError("Connection closed unexpectedly")
try:
proto, status_code, status_msg = status_line.split(" ", 2)
except ValueError:
raise GeneralProxyError("HTTP proxy server sent invalid response")
if not proto.startswith("HTTP/"):
raise GeneralProxyError(
"Proxy server does not appear to be an HTTP proxy")
try:
status_code = int(status_code)
except ValueError:
raise HTTPError(
"HTTP proxy server did not return a valid HTTP status")
if status_code != 200:
error = "{0}: {1}".format(status_code, status_msg)
if status_code in (400, 403, 405):
# It's likely that the HTTP proxy server does not support the
# CONNECT tunneling method
error += ("\n[*] Note: The HTTP proxy server may not be"
" supported by PySocks (must be a CONNECT tunnel"
" proxy)")
raise HTTPError(error)
self.proxy_sockname = (b"0.0.0.0", 0)
self.proxy_peername = addr, dest_port
_proxy_negotiators = {
SOCKS4: _negotiate_SOCKS4,
SOCKS5: _negotiate_SOCKS5,
HTTP: _negotiate_HTTP
}
@set_self_blocking
def connect(self, dest_pair):
"""
Connects to the specified destination through a proxy.
Uses the same API as socket's connect().
To select the proxy server, use set_proxy().
dest_pair - 2-tuple of (IP/hostname, port).
"""
if len(dest_pair) != 2 or dest_pair[0].startswith("["):
# Probably IPv6, not supported -- raise an error, and hope
# Happy Eyeballs (RFC6555) makes sure at least the IPv4
# connection works...
raise socket.error("PySocks doesn't support IPv6: %s"
% str(dest_pair))
dest_addr, dest_port = dest_pair
if self.type == socket.SOCK_DGRAM:
if not self._proxyconn:
self.bind(("", 0))
dest_addr = socket.gethostbyname(dest_addr)
# If the host address is INADDR_ANY or similar, reset the peer
# address so that packets are received from any peer
if dest_addr == "0.0.0.0" and not dest_port:
self.proxy_peername = None
else:
self.proxy_peername = (dest_addr, dest_port)
return
(proxy_type, proxy_addr, proxy_port, rdns, username,
password) = self.proxy
# Do a minimal input check first
if (not isinstance(dest_pair, (list, tuple))
or len(dest_pair) != 2
or not dest_addr
or not isinstance(dest_port, int)):
# Inputs failed, raise an error
raise GeneralProxyError(
"Invalid destination-connection (host, port) pair")
# We set the timeout here so that we don't hang in connection or during
# negotiation.
super(socksocket, self).settimeout(self._timeout)
if proxy_type is None:
# Treat like regular socket object
self.proxy_peername = dest_pair
super(socksocket, self).settimeout(self._timeout)
super(socksocket, self).connect((dest_addr, dest_port))
return
proxy_addr = self._proxy_addr()
try:
# Initial connection to proxy server.
super(socksocket, self).connect(proxy_addr)
except socket.error as error:
# Error while connecting to proxy
self.close()
proxy_addr, proxy_port = proxy_addr
proxy_server = "{0}:{1}".format(proxy_addr, proxy_port)
printable_type = PRINTABLE_PROXY_TYPES[proxy_type]
msg = "Error connecting to {0} proxy {1}".format(printable_type,
proxy_server)
log.debug("%s due to: %s", msg, error)
raise ProxyConnectionError(msg, error)
else:
# Connected to proxy server, now negotiate
try:
# Calls negotiate_{SOCKS4, SOCKS5, HTTP}
negotiate = self._proxy_negotiators[proxy_type]
negotiate(self, dest_addr, dest_port)
except socket.error as error:
# Wrap socket errors
self.close()
raise GeneralProxyError("Socket error", error)
except ProxyError:
# Protocol error while negotiating with proxy
self.close()
raise
def _proxy_addr(self):
"""
Return proxy address to connect to as tuple object
"""
(proxy_type, proxy_addr, proxy_port, rdns, username,
password) = self.proxy
proxy_port = proxy_port or DEFAULT_PORTS.get(proxy_type)
if not proxy_port:
raise GeneralProxyError("Invalid proxy type")
return proxy_addr, proxy_port
| 37.061998
| 80
| 0.599331
|
64f21cfc74bdc0b4a755c71f96d1717cc72cc8bb
| 63
|
py
|
Python
|
prince/__version__.py
|
MaximeKan/prince
|
eb0df10a9ec6448a04fa2e968e9aa01abe37158d
|
[
"MIT"
] | null | null | null |
prince/__version__.py
|
MaximeKan/prince
|
eb0df10a9ec6448a04fa2e968e9aa01abe37158d
|
[
"MIT"
] | null | null | null |
prince/__version__.py
|
MaximeKan/prince
|
eb0df10a9ec6448a04fa2e968e9aa01abe37158d
|
[
"MIT"
] | null | null | null |
VERSION = (0, 6, 3)
__version__ = '.'.join(map(str, VERSION))
| 15.75
| 41
| 0.603175
|
a22fe341ea3bc32220191893223949c3596d163b
| 937
|
py
|
Python
|
tools/mX2x.py
|
vsmelov/neural-music
|
0cbe06080e2c257c323ffc93dc673bb1e0edf2c4
|
[
"MIT"
] | 2
|
2020-03-06T19:36:17.000Z
|
2022-03-09T07:29:08.000Z
|
tools/mX2x.py
|
vsmelov/neural-music
|
0cbe06080e2c257c323ffc93dc673bb1e0edf2c4
|
[
"MIT"
] | null | null | null |
tools/mX2x.py
|
vsmelov/neural-music
|
0cbe06080e2c257c323ffc93dc673bb1e0edf2c4
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from config import *
from tools.sineModel import XXsineModelAnal, XXsineModelSynth
import numpy as np
def mX2audio(mX, N, H, shift_X, var_X):
# mX = np.sqrt(mX)
mX = mX * var_X + shift_X
mX_full = np.zeros((mX.shape[0], NN)) + zero_db
print 'mX.shape: {}'.format(mX.shape)
print 'mX_full.shape: {}'.format(mX_full.shape)
mX_full[:, min_k:max_k] = mX
tfreq, tmag, tphase = XXsineModelAnal(mX, fs, N, H, sin_t, maxnSines,
minSineDur, freqDevOffset,
freqDevSlope)
audio = XXsineModelSynth(tfreq, tmag, Ns, H, fs)
return audio
if __name__ == '__main__':
import tools.utilFunctions as UF
with open(os.path.join(data_dir, 'X.npy'), 'rb') as f:
sentences = np.load(f)
mX = np.concatenate(sentences[:30])
audio = mX2audio(mX, N, H, -80, 80)
UF.wavwrite(audio, fs, 'test.wav')
| 32.310345
| 73
| 0.595518
|
3c9865a705590751a1fe5567159f47e1e15baecd
| 159
|
py
|
Python
|
ALGORITHMS/Reccursion/Factorial/Factorial.py
|
anishsingh42/CodeChef
|
50f5c0438516210895e513bc4ee959b9d99ef647
|
[
"Apache-2.0"
] | 127
|
2020-10-13T18:04:35.000Z
|
2022-02-17T10:56:27.000Z
|
ALGORITHMS/Reccursion/Factorial/Factorial.py
|
anishsingh42/CodeChef
|
50f5c0438516210895e513bc4ee959b9d99ef647
|
[
"Apache-2.0"
] | 132
|
2020-10-13T18:06:53.000Z
|
2021-10-17T18:44:26.000Z
|
ALGORITHMS/Reccursion/Factorial/Factorial.py
|
anishsingh42/CodeChef
|
50f5c0438516210895e513bc4ee959b9d99ef647
|
[
"Apache-2.0"
] | 364
|
2020-10-13T18:04:52.000Z
|
2022-03-04T14:34:53.000Z
|
def factorial(n):
if (n > 1):
n *= factorial(n - 1)
return n
n = int(input())
print(factorial(n))
# Complexity of the above program is O(n)
| 15.9
| 41
| 0.572327
|
bbe28e0c1269a548559400d12d43c5481d96dd89
| 4,036
|
py
|
Python
|
MLtingPots/Transformer/multihead_attention.py
|
raynbowy23/rays_lab
|
8c29b26df61e42e396046755dc1c49dd5fb04f5e
|
[
"MIT"
] | null | null | null |
MLtingPots/Transformer/multihead_attention.py
|
raynbowy23/rays_lab
|
8c29b26df61e42e396046755dc1c49dd5fb04f5e
|
[
"MIT"
] | null | null | null |
MLtingPots/Transformer/multihead_attention.py
|
raynbowy23/rays_lab
|
8c29b26df61e42e396046755dc1c49dd5fb04f5e
|
[
"MIT"
] | null | null | null |
import types
from typing import Optional
import jax
import jax.numpy as jnp
import haiku as hk
import numpy as np
from icecream import ic
ic.configureOutput(includeContext=True)
class MultiHeadAttention(hk.Module):
"""
Split several small heads and use attention has prior efficiency to single attention.
First, split query, key, and value to head_num and calculate attention,
then concatenate lastly.
Attributes:
depth: the dimension of hidden layer and output
"""
def __init__(self, num_heads: int, key_size: int, w_init_scale: float, value_size: Optional[int] = None, model_size: Optional[int] = None, dropout_rate: float = 0.0, name: Optional[str] = None):
"""Inits scaled_attention"""
super().__init__(name=name)
self.head_num = num_heads
self.key_size = key_size
self.value_size = value_size or key_size
self.model_size = model_size or key_size * self.head_num
self.w_init = hk.initializers.VarianceScaling(w_init_scale)
self.scale_factor = np.sqrt(key_size) #.astype(key_size.dtype)
self.dropout_rate = dropout_rate
def __call__(self, query: jnp.ndarray, key: Optional[jnp.ndarray] = None, value: Optional[jnp.ndarray] = None, mask: Optional[jnp.ndarray] = None, name: Optional[str] = None) -> jnp.ndarray:
query_heads = self._linear_projection(query, self.key_size, "query") # head_size -> key_size # WQ
key_heads = self._linear_projection(key, self.key_size, "key") # WK
value_heads = self._linear_projection(value, self.value_size, "value") # WV
# scale dot product
# [bath, heads, tokens, tokens]
attn_logits = jnp.einsum('...thd, ...Thd -> ...htT', query_heads, key_heads) # [batch_size, head_num, q_length, hidden_dim/head_num]
# mask
sqrt_key_size = np.sqrt(self.key_size).astype(key.dtype)
# attn_logits = attn_logits.masked_fill(attention_mask==0, -1e30)
attn_logits = attn_logits / sqrt_key_size
if mask is not None:
assert mask.shape == attn_logits.shape[-2:]
attn_logits = jnp.where(mask, attn_logits, -1e30)
attention_weights = jax.nn.softmax(attn_logits)
# TODO: attention_weight = dropout_layer
# TODO: add Dropout
# [batch_size, head_num, q_length, hidden_dim/head_num]
attn = jnp.einsum("...htT, ...Thd -> ...thd", attention_weights, value_heads) # [batch_size, q_length, hidden_dim]
# Concatenate attention matrix of all heads into a single vector
attn_vec = jnp.reshape(attn, (*query.shape[:-1], -1))
return hk.Linear(self.model_size, w_init=self.w_init)(attn_vec) # WO
@hk.transparent
def _linear_projection(
self,
x: jnp.ndarray,
head_size: int,
name: Optional[str] = None
) -> jnp.ndarray:
y = hk.Linear(self.head_num * head_size, w_init=self.w_init, name=name)(x)
return y.reshape((*x.shape[:-1], self.head_num, head_size))
class SelfAttention(MultiHeadAttention):
"""
Self attention with a causal mask applied
Attributes:
query: input
key: <- memory
value: <- memory
mask: attention mask
Return:
input -> memory
"""
def __call__(self, query: jnp.ndarray, key: Optional[jnp.ndarray] = None, value: Optional[jnp.ndarray] = None, attention_mask: Optional[jnp.ndarray] = None, name: Optional[str] = None) -> jnp.ndarray:
key = key if key is not None else query
value = value if value is not None else query
# memory = memory if memory is not None else query
seq_len = query.shape[1]
causal_mask = np.tril(np.ones((seq_len, seq_len)))
attention_mask = attention_mask * causal_mask if attention_mask is not None else causal_mask
return super().__call__(query=query, key=key, value=value, mask=attention_mask, name=name)
| 42.484211
| 205
| 0.64891
|
bed91c79eace828a6fe752aa4e0b8f834be53f67
| 3,659
|
py
|
Python
|
lib/rucio/tests/test_qos.py
|
TeAmP0is0N/rucio
|
45c1b83f8e1514953a41fd076b4e651dd564c39f
|
[
"Apache-2.0"
] | null | null | null |
lib/rucio/tests/test_qos.py
|
TeAmP0is0N/rucio
|
45c1b83f8e1514953a41fd076b4e651dd564c39f
|
[
"Apache-2.0"
] | null | null | null |
lib/rucio/tests/test_qos.py
|
TeAmP0is0N/rucio
|
45c1b83f8e1514953a41fd076b4e651dd564c39f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Mario Lassnig <mario.lassnig@cern.ch>, 2020
#
# PY3K COMPATIBLE
from rucio.client.rseclient import RSEClient
from rucio.common.config import config_get, config_get_bool
from rucio.core.rse import update_rse, get_rse
from rucio.tests.common import rse_name_generator
from nose.tools import assert_equal
class TestQoS(object):
@classmethod
def setupClass(self):
if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
self.vo = {'vo': config_get('client', 'vo', raise_exception=False, default='tst')}
else:
self.vo = {}
self.rse_client = RSEClient()
self.tmp_rse_name = rse_name_generator()
self.rse_client.add_rse(self.tmp_rse_name, vo=self.vo)
self.tmp_rse = self.rse_client.get_rse(self.tmp_rse_name)['id']
@classmethod
def teardownClass(self):
self.rse_client.delete_rse(self.tmp_rse_name)
def test_update_and_remove_rse_qos_class(self):
""" QoS (CORE): Update and remove QoS class for RSE """
update_rse(self.tmp_rse, {'qos_class': 'fast_and_expensive'})
rse = get_rse(self.tmp_rse)
assert_equal(rse['qos_class'], 'fast_and_expensive')
update_rse(self.tmp_rse, {'qos_class': 'slow_but_cheap'})
rse = get_rse(self.tmp_rse)
assert_equal(rse['qos_class'], 'slow_but_cheap')
update_rse(self.tmp_rse, {'qos_class': None})
rse = get_rse(self.tmp_rse)
assert_equal(rse['qos_class'], None)
def test_update_and_remove_rse_qos_class_client(self):
""" QoS (CLIENT): Update and remove QoS class for RSE """
self.rse_client.update_rse(self.tmp_rse_name, {'qos_class': 'fast_and_expensive'})
rse = self.rse_client.get_rse(self.tmp_rse_name)
assert_equal(rse['qos_class'], 'fast_and_expensive')
self.rse_client.update_rse(self.tmp_rse_name, {'qos_class': 'slow_but_cheap'})
rse = self.rse_client.get_rse(self.tmp_rse_name)
assert_equal(rse['qos_class'], 'slow_but_cheap')
self.rse_client.update_rse(self.tmp_rse_name, {'qos_class': None})
rse = self.rse_client.get_rse(self.tmp_rse_name)
assert_equal(rse['qos_class'], None)
def test_qos_policies(self):
""" QoS (CLIENT): Add QoS policy for RSE """
self.rse_client.add_qos_policy(self.tmp_rse_name, 'FOO')
policies = self.rse_client.list_qos_policies(self.tmp_rse_name)
assert_equal(policies, ['FOO'])
self.rse_client.add_qos_policy(self.tmp_rse_name, 'BAR')
policies = sorted(self.rse_client.list_qos_policies(self.tmp_rse_name))
assert_equal(policies, ['BAR', 'FOO'])
self.rse_client.delete_qos_policy(self.tmp_rse_name, 'BAR')
policies = self.rse_client.list_qos_policies(self.tmp_rse_name)
assert_equal(policies, ['FOO'])
self.rse_client.delete_qos_policy(self.tmp_rse_name, 'FOO')
policies = self.rse_client.list_qos_policies(self.tmp_rse_name)
assert_equal(policies, [])
| 38.925532
| 94
| 0.701011
|
177bd2dad55441be009c2db3cbd496968679f89d
| 876
|
py
|
Python
|
setup.py
|
slarse/labelbot
|
c61356b07db1eeecdfb25f8d7ff6532be3efb0e1
|
[
"MIT"
] | null | null | null |
setup.py
|
slarse/labelbot
|
c61356b07db1eeecdfb25f8d7ff6532be3efb0e1
|
[
"MIT"
] | 35
|
2019-04-11T12:17:34.000Z
|
2019-04-18T10:18:08.000Z
|
setup.py
|
slarse/github-label-bot
|
c61356b07db1eeecdfb25f8d7ff6532be3efb0e1
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
with open("README.md", mode="r", encoding="utf-8") as f:
readme = f.read()
test_requirements = [
"pytest>=4.0.0",
"codecov",
"pytest-cov",
"responses",
"pytest-mock",
]
required = ["python_jwt", "jwcrypto", "requests", "boto3"]
setup(
name="labelbot",
version="0.0.6",
description=(
"A GitHub label bot for allowing unprivileged users to label issues "
"with allowed labels."
),
long_description=readme,
long_description_content_type="text/markdown",
author="Joakim Croona, Simon Larsén",
author_email="jcroona@kth.se, slarse@kth.se",
license="MIT",
packages=find_packages(exclude=("tests", "docs")),
install_requires=required,
tests_require=test_requirements,
extras_require=dict(TEST=test_requirements),
python_requires=">=3.6",
)
| 26.545455
| 77
| 0.6621
|
52137894322ce2ad912156fa3fd8df4cf7916c6f
| 280
|
py
|
Python
|
app/dashboards/demo.py
|
dawkinsmw/flask_intro
|
642d0273260c4c4a8152514ab87448b061d32fc9
|
[
"Apache-2.0"
] | null | null | null |
app/dashboards/demo.py
|
dawkinsmw/flask_intro
|
642d0273260c4c4a8152514ab87448b061d32fc9
|
[
"Apache-2.0"
] | null | null | null |
app/dashboards/demo.py
|
dawkinsmw/flask_intro
|
642d0273260c4c4a8152514ab87448b061d32fc9
|
[
"Apache-2.0"
] | null | null | null |
from dash import Dash
import dash_html_components as html
app_layout = html.Div(children=[html.H1(children="Hello Dash"), ])
def init_dash(server):
dash_app = Dash(server=server, routes_pathname_prefix="/demo/",)
dash_app.layout = app_layout
return dash_app.server
| 25.454545
| 68
| 0.75
|
cf145e338dfccc4e765007596614f618857b82c6
| 1,209
|
py
|
Python
|
rpy/functions/require.py
|
riccardodivirgilio/rpy
|
95682fbd25b55a6ea8db36d9ebfb54ce64380bc1
|
[
"MIT"
] | null | null | null |
rpy/functions/require.py
|
riccardodivirgilio/rpy
|
95682fbd25b55a6ea8db36d9ebfb54ce64380bc1
|
[
"MIT"
] | null | null | null |
rpy/functions/require.py
|
riccardodivirgilio/rpy
|
95682fbd25b55a6ea8db36d9ebfb54ce64380bc1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from functools import wraps
from rpy.functions.api import pip
def installed_modules():
return {i.key: i.version for i in pip.get_installed_distributions()}
def missing_requirements(*modules):
distributions = installed_modules()
for module in modules:
version = None
if isinstance(module, (tuple, list)):
module, version = module
if not module in distributions or version and not distributions[
module] == version:
yield version and "%s==%s" % (module, version) or module
def require_module(*modules):
commands = list(missing_requirements(*modules))
if commands:
print("Update in progress: pip install %s --user" % " ".join(commands))
if pip.running_under_virtualenv():
pip.main(["install"] + commands)
else:
pip.main(["install", "--user"] + commands)
def require(*modules):
def outer(func):
@wraps(func)
def inner(*args, **kw):
require_module(*modules)
return func(*args, **kw)
return inner
return outer
| 23.25
| 79
| 0.623656
|
e00a1e273b5f8c73df51c7692edf55d621efbfb0
| 273
|
py
|
Python
|
pysql/constants/core/indexes.py
|
jha-hitesh/pysql
|
ad7c7e4e7e65a97e4dc15cda395678e0c09b02ab
|
[
"MIT"
] | null | null | null |
pysql/constants/core/indexes.py
|
jha-hitesh/pysql
|
ad7c7e4e7e65a97e4dc15cda395678e0c09b02ab
|
[
"MIT"
] | null | null | null |
pysql/constants/core/indexes.py
|
jha-hitesh/pysql
|
ad7c7e4e7e65a97e4dc15cda395678e0c09b02ab
|
[
"MIT"
] | null | null | null |
class CoreIndexConstants:
core_index_slots = (
"table", "index_name", "index_definition",
"UNIQUE"
)
core_index_default_values = {
"table": None,
"index_name": None,
"index_definition": "",
"UNIQUE": False
}
| 21
| 50
| 0.553114
|
f450e8a8186a19f88f2c21addecdfb511b8d8256
| 3,496
|
py
|
Python
|
models/simple.py
|
sunbing7/backdoor_federated_learning
|
965187f00b71effe764e7669c381a182fb15e45a
|
[
"MIT"
] | 176
|
2018-12-17T23:35:57.000Z
|
2022-03-12T11:51:02.000Z
|
models/simple.py
|
sunbing7/backdoor_federated_learning
|
965187f00b71effe764e7669c381a182fb15e45a
|
[
"MIT"
] | 10
|
2019-06-21T10:11:02.000Z
|
2022-03-25T14:13:53.000Z
|
models/simple.py
|
sunbing7/backdoor_federated_learning
|
965187f00b71effe764e7669c381a182fb15e45a
|
[
"MIT"
] | 51
|
2019-01-10T14:51:39.000Z
|
2022-03-21T08:22:16.000Z
|
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import numpy as np
import datetime
class SimpleNet(nn.Module):
def __init__(self, name=None, created_time=None):
super(SimpleNet, self).__init__()
self.created_time = created_time
self.name=name
def visualize(self, vis, epoch, acc, loss=None, eid='main', is_poisoned=False, name=None):
if name is None:
name = self.name + '_poisoned' if is_poisoned else self.name
vis.line(X=np.array([epoch]), Y=np.array([acc]), name=name, win='vacc_{0}'.format(self.created_time), env=eid,
update='append' if vis.win_exists('vacc_{0}'.format(self.created_time), env=eid) else None,
opts=dict(showlegend=True, title='Accuracy_{0}'.format(self.created_time),
width=700, height=400))
if loss is not None:
vis.line(X=np.array([epoch]), Y=np.array([loss]), name=name, env=eid,
win='vloss_{0}'.format(self.created_time),
update='append' if vis.win_exists('vloss_{0}'.format(self.created_time), env=eid) else None,
opts=dict(showlegend=True, title='Loss_{0}'.format(self.created_time), width=700, height=400))
return
def train_vis(self, vis, epoch, data_len, batch, loss, eid='main', name=None, win='vtrain'):
vis.line(X=np.array([(epoch-1)*data_len+batch]), Y=np.array([loss]),
env=eid,
name=f'{name}' if name is not None else self.name, win=f'{win}_{self.created_time}',
update='append' if vis.win_exists(f'{win}_{self.created_time}', env=eid) else None,
opts=dict(showlegend=True, width=700, height=400, title='Train loss_{0}'.format(self.created_time)))
def save_stats(self, epoch, loss, acc):
self.stats['epoch'].append(epoch)
self.stats['loss'].append(loss)
self.stats['acc'].append(acc)
def copy_params(self, state_dict, coefficient_transfer=100):
own_state = self.state_dict()
for name, param in state_dict.items():
if name in own_state:
shape = param.shape
#
random_tensor = (torch.cuda.FloatTensor(shape).random_(0, 100) <= coefficient_transfer).type(
torch.cuda.FloatTensor)
negative_tensor = (random_tensor*-1)+1
# own_state[name].copy_(param)
own_state[name].copy_(param.clone())
class SimpleMnist(SimpleNet):
def __init__(self, name=None, created_time=None):
super(SimpleMnist, self).__init__(name, created_time)
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
| 40.183908
| 133
| 0.581808
|
55b70084092e4f4c743f714a24651805a5306335
| 10,750
|
py
|
Python
|
ietf/group/migrations/0003_groupfeatures_data.py
|
omunroe-com/ietfdb2
|
aeaae292fbd55aca1b6043227ec105e67d73367f
|
[
"BSD-3-Clause"
] | 2
|
2021-11-20T03:40:56.000Z
|
2021-11-20T03:40:59.000Z
|
ietf/group/migrations/0003_groupfeatures_data.py
|
omunroe-com/ietfdb2
|
aeaae292fbd55aca1b6043227ec105e67d73367f
|
[
"BSD-3-Clause"
] | null | null | null |
ietf/group/migrations/0003_groupfeatures_data.py
|
omunroe-com/ietfdb2
|
aeaae292fbd55aca1b6043227ec105e67d73367f
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-10 15:58
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
import debug # pyflakes:ignore
from ietf.review.utils import active_review_teams
group_type_features = {
u'ag': {
'about_page': 'ietf.group.views.group_about',
'admin_roles': 'chair',
'agenda_type': 'ietf',
'customize_workflow': False,
'default_tab': 'ietf.group.views.group_about',
'has_chartering_process': False,
'has_default_jabber': False,
'has_dependencies': False,
'has_documents': False,
'has_meetings': True,
'has_nonsession_materials': False,
'has_milestones': False,
'has_reviews': False,
'material_types': 'slides'},
u'area': {
'about_page': 'ietf.group.views.group_about',
'admin_roles': 'chair',
'agenda_type': 'ietf',
'customize_workflow': False,
'default_tab': 'ietf.group.views.group_about',
'has_chartering_process': False,
'has_default_jabber': False,
'has_dependencies': False,
'has_documents': False,
'has_meetings': False,
'has_nonsession_materials': False,
'has_milestones': False,
'has_reviews': False,
'material_types': 'slides'},
u'dir': {
'about_page': 'ietf.group.views.group_about',
'admin_roles': 'chair,secr',
'agenda_type': None,
'customize_workflow': False,
'default_tab': 'ietf.group.views.group_about',
'has_chartering_process': False,
'has_default_jabber': False,
'has_dependencies': False,
'has_documents': False,
'has_meetings': False,
'has_nonsession_materials': False,
'has_milestones': False,
'has_reviews': False,
'material_types': 'slides'},
u'review': {
'about_page': 'ietf.group.views.group_about',
'admin_roles': 'chair,secr',
'agenda_type': None,
'customize_workflow': False,
'default_tab': 'ietf.group.views.review_requests',
'has_chartering_process': False,
'has_default_jabber': False,
'has_dependencies': False,
'has_documents': False,
'has_meetings': False,
'has_nonsession_materials': False,
'has_milestones': False,
'has_reviews': True,
'material_types': 'slides'},
u'iab': {
'about_page': 'ietf.group.views.group_about',
'admin_roles': 'chair',
'agenda_type': 'ietf',
'customize_workflow': False,
'default_tab': 'ietf.group.views.group_about',
'has_chartering_process': False,
'has_default_jabber': False,
'has_dependencies': False,
'has_documents': False,
'has_meetings': True,
'has_nonsession_materials': False,
'has_milestones': False,
'has_reviews': False,
'material_types': 'slides'},
u'ietf': {
'about_page': 'ietf.group.views.group_about',
'admin_roles': 'chair',
'agenda_type': 'ietf',
'customize_workflow': False,
'default_tab': 'ietf.group.views.group_about',
'has_chartering_process': False,
'has_default_jabber': False,
'has_dependencies': False,
'has_documents': False,
'has_meetings': True,
'has_nonsession_materials': False,
'has_milestones': False,
'has_reviews': False,
'material_types': 'slides'},
u'individ': {
'about_page': 'ietf.group.views.group_about',
'admin_roles': 'chair',
'agenda_type': None,
'customize_workflow': False,
'default_tab': 'ietf.group.views.group_about',
'has_chartering_process': False,
'has_default_jabber': False,
'has_dependencies': False,
'has_documents': False,
'has_meetings': False,
'has_nonsession_materials': False,
'has_milestones': False,
'has_reviews': False,
'material_types': 'slides'},
u'irtf': {
'about_page': 'ietf.group.views.group_about',
'admin_roles': 'chair',
'agenda_type': 'ietf',
'customize_workflow': False,
'default_tab': 'ietf.group.views.group_about',
'has_chartering_process': False,
'has_default_jabber': False,
'has_dependencies': False,
'has_documents': False,
'has_meetings': False,
'has_nonsession_materials': False,
'has_milestones': False,
'has_reviews': False,
'material_types': 'slides'},
u'isoc': {
'about_page': 'ietf.group.views.group_about',
'admin_roles': 'chair',
'agenda_type': None,
'customize_workflow': False,
'default_tab': 'ietf.group.views.group_about',
'has_chartering_process': False,
'has_default_jabber': False,
'has_dependencies': False,
'has_documents': False,
'has_meetings': False,
'has_nonsession_materials': False,
'has_milestones': False,
'has_reviews': False,
'material_types': 'slides'},
u'nomcom': {
'about_page': 'ietf.group.views.group_about',
'admin_roles': 'chair',
'agenda_type': 'side',
'customize_workflow': False,
'default_tab': 'ietf.group.views.group_about',
'has_chartering_process': False,
'has_default_jabber': False,
'has_dependencies': False,
'has_documents': False,
'has_meetings': False,
'has_nonsession_materials': False,
'has_milestones': False,
'has_reviews': False,
'material_types': 'slides'},
u'program': {
'about_page': 'ietf.group.views.group_about',
'admin_roles': 'lead',
'agenda_type': None,
'customize_workflow': False,
'default_tab': 'ietf.group.views.group_about',
'has_chartering_process': False,
'has_default_jabber': False,
'has_dependencies': False,
'has_documents': True,
'has_meetings': False,
'has_nonsession_materials': False,
'has_milestones': True,
'has_reviews': False,
'material_types': 'slides'},
u'rfcedtyp': {
'about_page': 'ietf.group.views.group_about',
'admin_roles': 'chair',
'agenda_type': 'side',
'customize_workflow': False,
'default_tab': 'ietf.group.views.group_about',
'has_chartering_process': False,
'has_default_jabber': False,
'has_dependencies': False,
'has_documents': False,
'has_meetings': False,
'has_nonsession_materials': False,
'has_milestones': False,
'has_reviews': False,
'material_types': 'slides'},
u'rg': {
'about_page': 'ietf.group.views.group_about',
'admin_roles': 'chair',
'agenda_type': 'ietf',
'customize_workflow': True,
'default_tab': 'ietf.group.views.group_documents',
'has_chartering_process': True,
'has_default_jabber': True,
'has_dependencies': True,
'has_documents': True,
'has_meetings': True,
'has_nonsession_materials': False,
'has_milestones': True,
'has_reviews': False,
'material_types': 'slides'},
u'sdo': {
'about_page': 'ietf.group.views.group_about',
'admin_roles': 'chair',
'agenda_type': None,
'customize_workflow': False,
'default_tab': 'ietf.group.views.group_about',
'has_chartering_process': False,
'has_default_jabber': False,
'has_dependencies': False,
'has_documents': False,
'has_meetings': False,
'has_nonsession_materials': False,
'has_milestones': False,
'has_reviews': False,
'material_types': 'slides'},
u'team': {
'about_page': 'ietf.group.views.group_about',
'admin_roles': 'chair',
'agenda_type': 'ietf',
'customize_workflow': False,
'default_tab': 'ietf.group.views.group_about',
'has_chartering_process': False,
'has_default_jabber': False,
'has_dependencies': False,
'has_documents': False,
'has_meetings': True,
'has_nonsession_materials': True,
'has_milestones': False,
'has_reviews': False,
'material_types': 'slides'},
u'wg': {
'about_page': 'ietf.group.views.group_about',
'admin_roles': 'chair',
'agenda_type': 'ietf',
'customize_workflow': True,
'default_tab': 'ietf.group.views.group_documents',
'has_chartering_process': True,
'has_default_jabber': True,
'has_dependencies': True,
'has_documents': True,
'has_meetings': True,
'has_nonsession_materials': False,
'has_milestones': True,
'has_reviews': False,
'material_types': 'slides'},
}
def forward(apps, schema_editor):
Group = apps.get_model('group', 'Group')
GroupTypeName = apps.get_model('name', 'GroupTypeName')
GroupFeatures = apps.get_model('group', 'GroupFeatures')
AgendaTypeName = apps.get_model('name', 'AgendaTypeName')
for type in group_type_features:
features = group_type_features[type]
features['type_id'] = type
if features['agenda_type']:
features['agenda_type'] = AgendaTypeName.objects.get(slug=features['agenda_type'])
GroupFeatures.objects.create(**features)
dir = GroupTypeName.objects.get(slug='dir')
review = GroupTypeName.objects.create(slug='review', name='Directorate (with reviews)', desc='', used=True, order=0)
review_teams = [ g.acronym for g in active_review_teams() ]
for group in Group.objects.filter(type=dir):
if group.acronym in review_teams:
group.type = review
group.save()
def reverse(apps, schema_editor):
Group = apps.get_model('group', 'Group')
GroupFeatures = apps.get_model('group', 'GroupFeatures')
GroupTypeName = apps.get_model('name', 'GroupTypeName')
dir = GroupTypeName.objects.get(slug='dir')
review = GroupTypeName.objects.get(slug='review')
for group in Group.objects.filter(type=review):
group.type = dir
group.save()
for entry in GroupFeatures.objects.all():
entry.delete()
review.delete()
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('group', '0002_groupfeatures_historicalgroupfeatures'),
('name', '0003_agendatypename_data'),
]
operations = [
migrations.RunPython(forward, reverse),
]
| 35.953177
| 120
| 0.607442
|
e1fcb82f9d4c6883ae6307079bc063645ff6e1c1
| 3,194
|
py
|
Python
|
haiku/_src/integration/jax_transforms_test.py
|
tirkarthi/dm-haiku
|
803671cf6ce5bc35fca7e6af89938579407e12ff
|
[
"Apache-2.0"
] | 1
|
2020-06-25T13:19:17.000Z
|
2020-06-25T13:19:17.000Z
|
haiku/_src/integration/jax_transforms_test.py
|
arita37/dm-haiku
|
66f9c69353a6259a3523875fdc24ca35c5f27131
|
[
"Apache-2.0"
] | null | null | null |
haiku/_src/integration/jax_transforms_test.py
|
arita37/dm-haiku
|
66f9c69353a6259a3523875fdc24ca35c5f27131
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.conformance.descriptors."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
from haiku._src import test_utils
from haiku._src.integration import descriptors
import jax
import jax.numpy as jnp
import numpy as np
ModuleFn = descriptors.ModuleFn
DEFAULT_ATOL = 1e-5
CUSTOM_ATOL = {hk.nets.ResNet: 0.05, hk.nets.MobileNetV1: 0.05,
hk.nets.VectorQuantizer: 0.05, hk.nets.VectorQuantizerEMA: 0.05,
hk.BatchNorm: 1e-4}
class JaxTransformsTest(parameterized.TestCase):
@test_utils.combined_named_parameters(descriptors.ALL_MODULES)
def test_jit(self, module_fn: ModuleFn, shape, dtype):
rng = jax.random.PRNGKey(42)
if jnp.issubdtype(dtype, jnp.integer):
x = jax.random.randint(rng, shape, 0, np.prod(shape), dtype)
else:
x = jax.random.uniform(rng, shape, dtype)
def g(x):
return module_fn()(x)
f = hk.transform_with_state(g)
atol = CUSTOM_ATOL.get(descriptors.module_type(module_fn), DEFAULT_ATOL)
assert_allclose = functools.partial(np.testing.assert_allclose, atol=atol)
# Ensure initialization under jit is the same.
jax.tree_multimap(assert_allclose,
f.init(rng, x),
jax.jit(f.init)(rng, x))
# Ensure application under jit is the same.
params, state = f.init(rng, x)
jax.tree_multimap(assert_allclose,
f.apply(params, state, rng, x),
jax.jit(f.apply)(params, state, rng, x))
@test_utils.combined_named_parameters(descriptors.OPTIONAL_BATCH_MODULES)
def test_vmap(self, module_fn: ModuleFn, shape, dtype):
batch_size, shape = shape[0], shape[1:]
rng = jax.random.PRNGKey(42)
if jnp.issubdtype(dtype, jnp.integer):
sample = jax.random.randint(rng, shape, 0, np.prod(shape), dtype)
else:
sample = jax.random.uniform(rng, shape, dtype)
batch = jnp.broadcast_to(sample, (batch_size,) + sample.shape)
def g(x):
return module_fn()(x)
f = hk.transform_with_state(g)
# Ensure application under vmap is the same.
params, state = f.init(rng, sample)
v_apply = jax.vmap(f.apply, in_axes=(None, None, None, 0))
jax.tree_multimap(
lambda a, b: np.testing.assert_allclose(a, b, atol=DEFAULT_ATOL),
f.apply(params, state, rng, batch),
v_apply(params, state, rng, batch))
if __name__ == '__main__':
absltest.main()
| 34.717391
| 80
| 0.676581
|
8114ccc251dd026ba3b79dd15522f4abf9727e03
| 4,743
|
py
|
Python
|
evogtk/gui/widgetlib/trayicon.py
|
R3v1L/evogtk
|
9f951a08770e99ffd701a1994ba948aa8014f2af
|
[
"MIT"
] | null | null | null |
evogtk/gui/widgetlib/trayicon.py
|
R3v1L/evogtk
|
9f951a08770e99ffd701a1994ba948aa8014f2af
|
[
"MIT"
] | null | null | null |
evogtk/gui/widgetlib/trayicon.py
|
R3v1L/evogtk
|
9f951a08770e99ffd701a1994ba948aa8014f2af
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2008 EVO Sistemas Libres <central@evosistemas.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
###############################################################################
# trayicon
# Tray Icon types helper class
###############################################################################
# Python import
from gettext import lgettext as _
# GTK Imports
import gobject
import gtk
class TrayIcon(gtk.StatusIcon):
"""
Tray Icon widget class
"""
__gtype_name__ = 'TrayIcon'
def __init__(self,icon='pixmaps/icon.png',tooltip=None,menu=None,action=None,menucallback=None,visible=False):
"""
Class constructor
"""
# Parent class initialization
super(gtk.StatusIcon,self).__init__()
# Save menu for later use
self.__menu=menu
# Set icon image
self.set_from_file(icon)
# Set visibility
self.set_visible(visible)
# Set icon tooltip
if tooltip:
self.set_tooltip(tooltip)
# Bind menu callback
if action:
self.connect('activate',action)
if menu and not menucallback:
self.connect('popup-menu',self.__default_menu_callback)
elif menucallback:
self.connect('popup-menu',menucallback)
def __default_menu_callback(self,widget,button,timestamp):
"""
Show status icon menu
"""
if self.__menu.get_sensitive():
self.__menu.popup(None,None,None,1,0)
def __stop_blinking(self):
"""
Stop blinking for tray icon
"""
self.blink(False)
return False
def blink(self,status=True,duration=5000):
"""
Set blinking status of tray icon
"""
self.set_blinking(status)
if status and duration:
gobject.timeout_add(duration,self.__stop_blinking,priority=gobject.PRIORITY_HIGH)
def show(self):
"""
Shows tray icon
"""
self.set_visible(True)
def hide(self):
"""
Hide tray icon
"""
self.set_visible(False)
# Check appindicator support
try:
import appindicator
class AppIndicator(appindicator.Indicator):
"""
App indicator widget class
"""
def __init__(self,name,category=appindicator.CATEGORY_APPLICATION_STATUS,icon='stock_unknown',attention_icon='error',menu=None,iconpath=None):
"""
Class initialization
"""
super(AppIndicator,self).__init__(name,icon,category)
self.set_status (appindicator.STATUS_ACTIVE)
self.set_attention_icon(attention_icon)
if iconpath:
self.set_icon_theme_path(iconpath)
if menu:
self.set_menu(menu)
def __stop_blinking(self):
"""
Stop blinking for tray icon
"""
self.blink(False)
return False
def blink(self,status=True,duration=5000):
"""
Set attention status on app indicator
"""
if status:
self.set_status(appindicator.STATUS_ATTENTION)
if duration:
gobject.timeout_add(duration,self.__stop_blinking,priority=gobject.PRIORITY_HIGH)
else:
self.set_status(appindicator.STATUS_ACTIVE)
def show(self):
"""
Set active status on app indicator
"""
self.set_status(appindicator.STATUS_ACTIVE)
def hide(self):
"""
Set passive status on app indicator
"""
self.set_status(appindicator.STATUS_PASSIVE)
except:
class AppIndicator(object):
def __init__(self,*args,**kwargs):
raise Exception(_('EVOGTK: Can\'t load application indicator library (maybe python-appindicator is not installed)'))
| 32.486301
| 150
| 0.580645
|
e564a33c647273c0ee7c92c5854d0ada943c2871
| 11,971
|
py
|
Python
|
plugins/xafs/xafsft.py
|
bruceravel/xraylarch
|
a8179208872d43bd23453fa0c64680e11bc2b5ed
|
[
"BSD-3-Clause"
] | null | null | null |
plugins/xafs/xafsft.py
|
bruceravel/xraylarch
|
a8179208872d43bd23453fa0c64680e11bc2b5ed
|
[
"BSD-3-Clause"
] | null | null | null |
plugins/xafs/xafsft.py
|
bruceravel/xraylarch
|
a8179208872d43bd23453fa0c64680e11bc2b5ed
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
"""
XAFS Fourier transforms
"""
import numpy as np
from numpy import (pi, arange, zeros, ones, sin, cos,
exp, log, sqrt, where, interp, linspace)
# from numpy.fft import fft, ifft
from scipy.fftpack import fft, ifft
from scipy.special import i0 as bessel_i0
from larch import (Group, ValidateLarchPlugin, Make_CallArgs,
parse_group_args)
from larch_plugins.math import complex_phase
from larch_plugins.xafs import set_xafsGroup
MODNAME = '_xafs'
VALID_WINDOWS = ['han', 'fha', 'gau', 'kai', 'par', 'wel', 'sin', 'bes']
def ftwindow(x, xmin=None, xmax=None, dx=1, dx2=None,
window='hanning', _larch=None, **kws):
"""
create a Fourier transform window array.
Parameters:
-------------
x: 1-d array array to build window on.
xmin: starting x for FT Window
xmax: ending x for FT Window
dx: tapering parameter for FT Window
dx2: second tapering parameter for FT Window (=dx)
window: name of window type
Returns:
----------
1-d window array.
Notes:
-------
Valid Window names:
hanning cosine-squared taper
parzen linear taper
welch quadratic taper
gaussian Gaussian (normal) function window
sine sine function window
kaiser Kaiser-Bessel function-derived window
"""
if window is None:
window = VALID_WINDOWS[0]
nam = window.strip().lower()[:3]
if nam not in VALID_WINDOWS:
raise RuntimeError("invalid window name %s" % window)
dx1 = dx
if dx2 is None: dx2 = dx1
if xmin is None: xmin = min(x)
if xmax is None: xmax = max(x)
xstep = (x[-1] - x[0]) / (len(x)-1)
xeps = 1.e-4 * xstep
x1 = max(min(x), xmin - dx1 / 2.0)
x2 = xmin + dx1 / 2.0 + xeps
x3 = xmax - dx2 / 2.0 - xeps
x4 = min(max(x), xmax + dx2 / 2.0)
if nam == 'fha':
if dx1 < 0: dx1 = 0
if dx2 > 1: dx2 = 1
x2 = x1 + xeps + dx1*(xmax-xmin)/2.0
x3 = x4 - xeps - dx2*(xmax-xmin)/2.0
elif nam == 'gau':
dx1 = max(dx1, xeps)
def asint(val): return int((val+xeps)/xstep)
i1, i2, i3, i4 = asint(x1), asint(x2), asint(x3), asint(x4)
i1, i2 = max(0, i1), max(0, i2)
i3, i4 = min(len(x)-1, i3), min(len(x)-1, i4)
if i2 == i1: i1 = max(0, i2-1)
if i4 == i3: i3 = max(i2, i4-1)
x1, x2, x3, x4 = x[i1], x[i2], x[i3], x[i4]
if x1 == x2: x2 = x2+xeps
if x3 == x4: x4 = x4+xeps
# initial window
fwin = zeros(len(x))
if i3 > i2:
fwin[i2:i3] = ones(i3-i2)
# now finish making window
if nam in ('han', 'fha'):
fwin[i1:i2+1] = sin((pi/2)*(x[i1:i2+1]-x1) / (x2-x1))**2
fwin[i3:i4+1] = cos((pi/2)*(x[i3:i4+1]-x3) / (x4-x3))**2
elif nam == 'par':
fwin[i1:i2+1] = (x[i1:i2+1]-x1) / (x2-x1)
fwin[i3:i4+1] = 1 - (x[i3:i4+1]-x3) / (x4-x3)
elif nam == 'wel':
fwin[i1:i2+1] = 1 - ((x[i1:i2+1]-x2) / (x2-x1))**2
fwin[i3:i4+1] = 1 - ((x[i3:i4+1]-x3) / (x4-x3))**2
elif nam in ('kai', 'bes'):
cen = (x4+x1)/2
wid = (x4-x1)/2
arg = 1 - (x-cen)**2 / (wid**2)
arg[where(arg<0)] = 0
if nam == 'bes': # 'bes' : ifeffit 1.0 implementation of kaiser-bessel
fwin = bessel_i0(dx* sqrt(arg)) / bessel_i0(dx)
fwin[where(x<=x1)] = 0
fwin[where(x>=x4)] = 0
else: # better version
scale = max(1.e-10, bessel_i0(dx)-1)
fwin = (bessel_i0(dx * sqrt(arg)) - 1) / scale
elif nam == 'sin':
fwin[i1:i4+1] = sin(pi*(x4-x[i1:i4+1]) / (x4-x1))
elif nam == 'gau':
cen = (x4+x1)/2
fwin = exp(-(((x - cen)**2)/(2*dx1*dx1)))
return fwin
@ValidateLarchPlugin
@Make_CallArgs(["r", "chir"])
def xftr(r, chir=None, group=None, rmin=0, rmax=20, with_phase=False,
dr=1, dr2=None, rw=0, window='kaiser', qmax_out=None,
nfft=2048, kstep=0.05, _larch=None, **kws):
"""
reverse XAFS Fourier transform, from chi(R) to chi(q).
calculate reverse XAFS Fourier transform
This assumes that chir_re and (optional chir_im are
on a uniform r-grid given by r.
Parameters:
------------
r: 1-d array of distance, or group.
chir: 1-d array of chi(R)
group: output Group
qmax_out: highest *k* for output data (30 Ang^-1)
rweight: exponent for weighting spectra by r^rweight (0)
rmin: starting *R* for FT Window
rmax: ending *R* for FT Window
dr: tapering parameter for FT Window
dr2: second tapering parameter for FT Window
window: name of window type
nfft: value to use for N_fft (2048).
kstep: value to use for delta_k (0.05).
with_phase: output the phase as well as magnitude, real, imag [False]
Returns:
---------
None -- outputs are written to supplied group.
Notes:
-------
Arrays written to output group:
rwin window Omega(R) (length of input chi(R)).
q uniform array of k, out to qmax_out.
chiq complex array of chi(k).
chiq_mag magnitude of chi(k).
chiq_re real part of chi(k).
chiq_im imaginary part of chi(k).
chiq_pha phase of chi(k) if with_phase=True
(a noticable performance hit)
Supports First Argument Group convention (with group member names 'r' and 'chir')
"""
if 'rweight' in kws:
rw = kws['rweight']
r, chir, group = parse_group_args(r, members=('r', 'chir'),
defaults=(chir,), group=group,
fcn_name='xftr')
rstep = r[1] - r[0]
kstep = pi/(rstep*nfft)
scale = 1.0
cchir = zeros(nfft, dtype='complex128')
r_ = rstep * arange(nfft, dtype='float64')
cchir[0:len(chir)] = chir
if chir.dtype == np.dtype('complex128'):
scale = 0.5
win = ftwindow(r_, xmin=rmin, xmax=rmax, dx=dr, dx2=dr2, window=window)
out = scale * xftr_fast( cchir*win * r_**rw, kstep=kstep, nfft=nfft)
if qmax_out is None: qmax_out = 30.0
q = linspace(0, qmax_out, int(1.05 + qmax_out/kstep))
nkpts = len(q)
group = set_xafsGroup(group, _larch=_larch)
group.q = q
mag = sqrt(out.real**2 + out.imag**2)
group.rwin = win[:len(chir)]
group.chiq = out[:nkpts]
group.chiq_mag = mag[:nkpts]
group.chiq_re = out.real[:nkpts]
group.chiq_im = out.imag[:nkpts]
if with_phase:
group.chiq_pha = complex_phase(out[:nkpts])
@ValidateLarchPlugin
@Make_CallArgs(["k", "chi"])
def xftf(k, chi=None, group=None, kmin=0, kmax=20, kweight=0,
dk=1, dk2=None, with_phase=False, window='kaiser', rmax_out=10,
nfft=2048, kstep=0.05, _larch=None, **kws):
"""
forward XAFS Fourier transform, from chi(k) to chi(R), using
common XAFS conventions.
Parameters:
-----------
k: 1-d array of photo-electron wavenumber in Ang^-1 or group
chi: 1-d array of chi
group: output Group
rmax_out: highest R for output data (10 Ang)
kweight: exponent for weighting spectra by k**kweight
kmin: starting k for FT Window
kmax: ending k for FT Window
dk: tapering parameter for FT Window
dk2: second tapering parameter for FT Window
window: name of window type
nfft: value to use for N_fft (2048).
kstep: value to use for delta_k (0.05 Ang^-1).
with_phase: output the phase as well as magnitude, real, imag [False]
Returns:
---------
None -- outputs are written to supplied group.
Notes:
-------
Arrays written to output group:
kwin window function Omega(k) (length of input chi(k)).
r uniform array of R, out to rmax_out.
chir complex array of chi(R).
chir_mag magnitude of chi(R).
chir_re real part of chi(R).
chir_im imaginary part of chi(R).
chir_pha phase of chi(R) if with_phase=True
(a noticable performance hit)
Supports First Argument Group convention (with group member names 'k' and 'chi')
"""
# allow kweight keyword == kw
if 'kw' in kws:
kweight = kws['kw']
k, chi, group = parse_group_args(k, members=('k', 'chi'),
defaults=(chi,), group=group,
fcn_name='xftf')
cchi, win = xftf_prep(k, chi, kmin=kmin, kmax=kmax, kweight=kweight,
dk=dk, dk2=dk2, nfft=nfft, kstep=kstep,
window=window, _larch=_larch)
out = xftf_fast(cchi*win, kstep=kstep, nfft=nfft)
rstep = pi/(kstep*nfft)
irmax = min(nfft/2, int(1.01 + rmax_out/rstep))
group = set_xafsGroup(group, _larch=_larch)
r = rstep * arange(irmax)
mag = sqrt(out.real**2 + out.imag**2)
group.kwin = win[:len(chi)]
group.r = r[:irmax]
group.chir = out[:irmax]
group.chir_mag = mag[:irmax]
group.chir_re = out.real[:irmax]
group.chir_im = out.imag[:irmax]
if with_phase:
group.chir_pha = complex_phase(out[:irmax])
@ValidateLarchPlugin
def xftf_prep(k, chi, kmin=0, kmax=20, kweight=2, dk=1, dk2=None,
window='kaiser', nfft=2048, kstep=0.05, _larch=None):
"""
calculate weighted chi(k) on uniform grid of len=nfft, and the
ft window.
Returns weighted chi, window function which can easily be multiplied
and used in xftf_fast.
"""
if dk2 is None: dk2 = dk
npts = int(1.01 + max(k)/kstep)
k_max = max(max(k), kmax+dk2)
k_ = kstep * np.arange(int(1.01+k_max/kstep), dtype='float64')
chi_ = interp(k_, k, chi)
win = ftwindow(k_, xmin=kmin, xmax=kmax, dx=dk, dx2=dk2, window=window)
return ((chi_[:npts] *k_[:npts]**kweight), win[:npts])
def xftf_fast(chi, nfft=2048, kstep=0.05, _larch=None, **kws):
"""
calculate forward XAFS Fourier transform. Unlike xftf(),
this assumes that:
1. data is already on a uniform grid
2. any windowing and/or kweighting has been applied.
and simply returns the complex chi(R), not setting any larch data.
This is useful for repeated FTs, as inside loops.
Parameters:
------------
chi: 1-d array of chi to be transformed
nfft: value to use for N_fft (2048).
kstep: value to use for delta_k (0.05).
Returns:
--------
complex 1-d array chi(R)
"""
cchi = zeros(nfft, dtype='complex128')
cchi[0:len(chi)] = chi
return (kstep / sqrt(pi)) * fft(cchi)[:int(nfft/2)]
def xftr_fast(chir, nfft=2048, kstep=0.05, _larch=None, **kws):
"""
calculate reverse XAFS Fourier transform, from chi(R) to
chi(q), using common XAFS conventions. This version demands
chir be the complex chi(R) as created from xftf().
It returns the complex array of chi(q) without putting any
values into an output group.
Parameters:
-------------
chir: 1-d array of chi(R) to be transformed
nfft: value to use for N_fft (2048).
kstep: value to use for delta_k (0.05).
Returns:
----------
complex 1-d array for chi(q).
This is useful for repeated FTs, as inside loops.
"""
cchi = zeros(nfft, dtype='complex128')
cchi[0:len(chir)] = chir
return (4*sqrt(pi)/kstep) * ifft(cchi)[:int(nfft/2)]
def registerLarchPlugin():
return (MODNAME, {'xftf': xftf,
'xftr': xftr,
'xftf_prep': xftf_prep,
'xftf_fast': xftf_fast,
'xftr_fast': xftr_fast,
'ftwindow': ftwindow,
})
| 33.721127
| 85
| 0.558516
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.