max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
server/dao/messageDao.py
ZibingZhang/Level-Up
0
12400
<gh_stars>0 from constants import cursor def add_message(player_name, message): cursor.execute( "INSERT INTO levelup.messages (" "SENDER, MESSAGE" ") VALUES (" "%s, %s" ")", (player_name, message) ) def reset(): cursor.execute( "DELETE FROM levelup.messages" ) cursor.execute( "ALTER TABLE levelup.messages AUTO_INCREMENT=1" ) def get_largest_id(): cursor.execute( "SELECT MAX(ID) FROM levelup.messages" ) id = cursor.fetchall()[0][0] return int(id) if id is not None else 0 def get_next_messages(message_id): cursor.execute( "SELECT SENDER, MESSAGE FROM levelup.messages WHERE id>%s ORDER BY ID ASC", (message_id, ) ) return cursor.fetchall()
2.59375
3
skeletrack/bbox.py
mpeven/skeletal-tracker
0
12401
import numpy as np import shapely.geometry as geom class Bbox: def __init__(self, name, part_id, depth_image, xyz, box_size, projection): if not isinstance(xyz, np.ndarray): raise ValueError("xyz must be an np.ndarray") self.name = name self.id = part_id self.center = np.array([xyz[0], xyz[1]]) self.z = xyz[2] self.im_d = depth_image self.im_d[self.im_d == 0] = 255 x_delta_scaled = box_size[0]/2 self.weight = 1.0 y_delta_scaled = box_size[1]/2 self.xmin, self.xmax = xyz[0]-x_delta_scaled, xyz[0]+x_delta_scaled self.ymin, self.ymax = xyz[1]-y_delta_scaled, xyz[1]+y_delta_scaled self.poly = geom.box(self.xmin, self.ymin, self.xmax, self.ymax) self.color_min = (int(projection['fx']*self.xmin/xyz[2] + projection['cx']), int(projection['fy']*self.ymin/xyz[2] + projection['cy'])) self.color_max = (int(projection['fx']*self.xmax/xyz[2] + projection['cx']), int(projection['fy']*self.ymax/xyz[2] + projection['cy'])) self.depth_min = (int(projection['fx_d']*self.xmin/xyz[2] + projection['cx_d']), int(projection['fy_d']*self.ymin/xyz[2] + projection['cy_d'])) self.depth_max = (int(projection['fx_d']*self.xmax/xyz[2] + projection['cx_d']), int(projection['fy_d']*self.ymax/xyz[2] + projection['cy_d'])) def __str__(self): return "{{{: 1.4f},{: 1.4f}}}, {{{: 1.4f},{: 1.4f}}}".format(self.xmin, self.ymin, self.xmax, self.ymax) def __repr__(self): return "(bbox: {{{: 1.4f},{: 1.4f}}}, {{{: 1.4f},{: 1.4f}}})".format(self.xmin, self.ymin, self.xmax, self.ymax) def size(self): return (self.xmax - self.xmin) * (self.ymax - self.ymin) def get_bb_depth_matrix(self): """ Get the portion of the depth image inside the bounding box """ min_x, max_x = sorted((self.depth_min[0], self.depth_max[0])) min_y, max_y = sorted((self.depth_min[1], self.depth_max[1])) bounded_im = self.im_d[min_y: max_y+1, min_x: max_x+1] return bounded_im def overlap(self, bb2): dx = min(self.xmax, bb2.xmax) - max(self.xmin, bb2.xmin) dy = min(self.ymax, bb2.ymax) - max(self.ymin, bb2.ymin) if (dx>=0) and (dy>=0): return dx*dy return 0 def p_over(self, bb2): return self.overlap(bb2)/(min(self.size(), bb2.size())) def p_depth(self, bb2): bounded_im1 = self.get_bb_depth_matrix() bounded_im2 = bb2.get_bb_depth_matrix() print(bounded_im1.empty or bounded_im2.empty) mean1 = np.mean(bounded_im1) mean2 = np.mean(bounded_im2) stdev1 = np.std(bounded_im1) stdev2 = np.std(bounded_im2) half_negative_square_of_mean_difference = -1/2 * (mean1 - mean2) ** 2 term1_power = half_negative_square_of_mean_difference / (stdev1 ** 2) term2_power = half_negative_square_of_mean_difference / (stdev2 ** 2) out = (np.exp(term1_power) + np.exp(term2_power))/2 return out def prob(self, bb2, alpha): return alpha * self.p_over(bb2) + (1-alpha) * self.p_depth(bb2)
2.40625
2
apps/snippet/admin.py
AniPython/ani
0
12402
<gh_stars>0 from django.contrib import admin from .models import Tag, Article @admin.register(Tag) class TagAdmin(admin.ModelAdmin): list_display = ('name', 'order') list_editable = ('order',) @admin.register(Article) class ArticleAdmin(admin.ModelAdmin): list_display = ['title', 'author'] readonly_fields = ['create_time', 'update_time']
1.890625
2
test/functional/test_framework/script_util.py
TopoX84/newlux
1,389
12403
#!/usr/bin/env python3 # Copyright (c) 2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Useful Script constants and utils.""" from test_framework.script import CScript # To prevent a "tx-size-small" policy rule error, a transaction has to have a # non-witness size of at least 82 bytes (MIN_STANDARD_TX_NONWITNESS_SIZE in # src/policy/policy.h). Considering a Tx with the smallest possible single # input (blank, empty scriptSig), and with an output omitting the scriptPubKey, # we get to a minimum size of 60 bytes: # # Tx Skeleton: 4 [Version] + 1 [InCount] + 1 [OutCount] + 4 [LockTime] = 10 bytes # Blank Input: 32 [PrevTxHash] + 4 [Index] + 1 [scriptSigLen] + 4 [SeqNo] = 41 bytes # Output: 8 [Amount] + 1 [scriptPubKeyLen] = 9 bytes # # Hence, the scriptPubKey of the single output has to have a size of at # least 22 bytes, which corresponds to the size of a P2WPKH scriptPubKey. # The following script constant consists of a single push of 21 bytes of 'a': # <PUSH_21> <21-bytes of 'a'> # resulting in a 22-byte size. It should be used whenever (small) fake # scriptPubKeys are needed, to guarantee that the minimum transaction size is # met. DUMMY_P2WPKH_SCRIPT = CScript([b'a' * 21])
2
2
linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/Protocol/Torrent/TorrentIPC.py
mdavid/nuxleus
1
12404
<gh_stars>1-10 #!/usr/bin/env python # # Copyright (C) 2006 British Broadcasting Corporation and Kamaelia Contributors(1) # All Rights Reserved. # # You may only modify and redistribute this under the terms of any of the # following licenses(2): Mozilla Public License, V1.1, GNU General # Public License, V2.0, GNU Lesser General Public License, V2.1 # # (1) Kamaelia Contributors are listed in the AUTHORS file and at # http://kamaelia.sourceforge.net/AUTHORS - please extend this file, # not this notice. # (2) Reproduced in the COPYING file, and at: # http://kamaelia.sourceforge.net/COPYING # Under section 3.5 of the MPL, we are using this text since we deem the MPL # notice inappropriate for this file. As per MPL/GPL/LGPL removal of this # notice is prohibited. # # Please contact us via: <EMAIL> # to discuss alternative licensing. # ------------------------------------------------------------------------- # Licensed to the BBC under a Contributor Agreement: RJL """(Bit)Torrent IPC messages""" from Kamaelia.BaseIPC import IPC # ====================== Messages to send to TorrentMaker ======================= class TIPCMakeTorrent(IPC): "Create a .torrent file" Parameters = [ "trackerurl", "log2piecesizebytes", "title", "comment", "srcfile" ] #Parameters: # trackerurl - the URL of the BitTorrent tracker that will be used # log2piecesizebytes - log base 2 of the hash-piece-size, sensible value: 18 # title - name of the torrent # comment - a field that can be read by users when they download the torrent # srcfile - the file that the .torrent file will have metainfo about # ========= Messages for TorrentPatron to send to TorrentService ================ # a message for TorrentClient (i.e. to be passed on by TorrentService) class TIPCServicePassOn(IPC): "Add a client to TorrentService" Parameters = [ "replyService", "message" ] #Parameters: replyService, message # request to add a TorrentPatron to a TorrentService's list of clients class TIPCServiceAdd(IPC): "Add a client to TorrentService" Parameters = [ "replyService" ] #Parameters: replyService # request to remove a TorrentPatron from a TorrentService's list of clients class TIPCServiceRemove(IPC): "Remove a client from TorrentService" Parameters = [ "replyService" ] #Parameters: replyService # ==================== Messages for TorrentClient to produce ==================== # a new torrent has been added with id torrentid class TIPCNewTorrentCreated(IPC): "New torrent %(torrentid)d created in %(savefolder)s" Parameters = [ "torrentid", "savefolder" ] #Parameters: torrentid, savefolder # the torrent you requested me to download is already being downloaded as torrentid class TIPCTorrentAlreadyDownloading(IPC): "That torrent is already downloading!" Parameters = [ "torrentid" ] #Parameters: torrentid # for some reason the torrent could not be started class TIPCTorrentStartFail(object): "Torrent failed to start!" Parameters = [] #Parameters: (none) # message containing the current status of a particular torrent class TIPCTorrentStatusUpdate(IPC): "Current status of a single torrent" def __init__(self, torrentid, statsdictionary): super(TIPCTorrentStatusUpdate, self).__init__() self.torrentid = torrentid self.statsdictionary = statsdictionary def __str__(self): return "Torrent %d status : %s" % (self.torrentid, str(int(self.statsdictionary.get("fractionDone",0) * 100)) + "%") # ====================== Messages to send to TorrentClient ====================== # create a new torrent (a new download session) from a .torrent file's binary contents class TIPCCreateNewTorrent(IPC): "Create a new torrent" Parameters = [ "rawmetainfo" ] #Parameters: rawmetainfo - the contents of a .torrent file # close a running torrent class TIPCCloseTorrent(IPC): "Close torrent %(torrentid)d" Parameters = [ "torrentid" ] #Parameters: torrentid
1.609375
2
srcf/database/schema.py
danielchriscarter/srcf-python
0
12405
from __future__ import print_function, unicode_literals from binascii import unhexlify from enum import Enum import os import pwd import six from sqlalchemy import Column, Integer, String, Boolean, DateTime, Text, Enum as SQLAEnum, Numeric from sqlalchemy import event from sqlalchemy.dialects.postgresql import HSTORE from sqlalchemy.schema import Table, FetchedValue, CheckConstraint, ForeignKey, DDL from sqlalchemy.orm import relationship, backref from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.ext.mutable import MutableDict from .compat import MemberCompat, SocietyCompat, AdminsSetCompat __all__ = ["Member", "Society", "PendingAdmin", "POSTGRES_USER", "RESTRICTED"] # Should we make the notes & danger flags, and pending-admins # tables available? # These postgres roles have special permissions / are mentioned # in the schema. Everyone else should connect as 'nobody' schema_users = ("root", "srcf-admin", "hades") # When connecting over a unix socket, postgres uses `getpeereid` # for authentication; this is the number that matters: euid_name = pwd.getpwuid(os.geteuid()).pw_name if euid_name in schema_users or euid_name.endswith("-adm"): POSTGRES_USER = euid_name else: POSTGRES_USER = "nobody" is_root = POSTGRES_USER == "root" or POSTGRES_USER.endswith("-adm") is_webapp = POSTGRES_USER == "srcf-admin" is_hades = POSTGRES_USER == "hades" RESTRICTED = not is_root def _hexdump(raw): rendered = "".join(chr(x) if len(repr(chr(x))) == 3 else "." for x in range(256)) safe = [] for pos in range(0, len(raw), 16): line = raw[pos:pos + 16] hex_ = " ".join("{:02x}".format(c) for c in line) if len(line) > 8: hex_ = "{} {}".format(hex_[:24], hex_[24:]) chars = "".join(rendered[c] if c < len(rendered) else "." for c in line) safe.append("{:08x} {:48s} |{}|".format(pos, hex_, chars)) return "\n".join(safe) CRSID_TYPE = String(7) SOCIETY_TYPE = String(16) Base = declarative_base() class MailHandler(Enum): """ Choices for handling of email sent to `@srcf.net` addresses. """ forward = 1 """ Forward emails to the user's registered contact address. """ pip = 2 """ Process emails using Exim. """ hades = 3 """ Deliver emails to the user's Hades mailbox. """ class Member(Base, MemberCompat): __tablename__ = 'members' crsid = Column(CRSID_TYPE, CheckConstraint('crsid = lower(crsid)'), primary_key=True) surname = Column(String(100)) preferred_name = Column(String(100)) member = Column(Boolean, nullable=False) user = Column(Boolean, nullable=False) disk_quota_gb = Column(Integer, FetchedValue()) disk_usage_gb = Column(Numeric, FetchedValue()) disk_usage_updated = Column(DateTime(timezone=True), FetchedValue()) if is_root or is_webapp: uid = Column(Integer, FetchedValue()) gid = Column(Integer, FetchedValue()) email = Column(String(100), CheckConstraint("email ~ E'@'"), unique=True) # FetchedValue: these columns are set by triggers (see below) joined = Column(DateTime(timezone=True), FetchedValue()) modified = Column(DateTime(timezone=True), FetchedValue()) danger = Column(Boolean, nullable=False, server_default='f') notes = Column(Text, nullable=False, server_default='') domains = relationship("Domain", primaryjoin="foreign(Domain.owner) == Member.crsid") if is_root or is_webapp or is_hades: mail_handler = Column(SQLAEnum(*(handler.name for handler in MailHandler)), nullable=False, server_default='pip') __table_args__ = ( CheckConstraint(""" (NOT member OR (surname IS NOT NULL AND preferred_name IS NOT NULL AND email IS NOT NULL AND joined IS NOT NULL)) """, name="members_must_have_details"), CheckConstraint('member OR NOT "user"', name="users_must_be_members"), ) def __str__(self): return self.crsid def __repr__(self): if is_root or is_webapp: m = ' member' if self.member else ' ex-member' u = ' user' if self.user else '' flags = m + u r = '<Member {0} {1} {2}{3}>'.format(self.crsid, self.name, self.email, flags) else: r = '<Member {0} {1}>'.format(self.crsid, self.name) if not six.PY3: r = r.encode("utf8") return r def __eq__(self, other): if not isinstance(other, Member): return False else: return self.crsid == other.crsid def __hash__(self): return hash(self.crsid) @hybrid_property def name(self): """Joins :attr:`preferred_name` and :attr:`surname`""" if self.preferred_name and self.surname: return self.preferred_name + " " + self.surname else: return self.preferred_name or self.surname or None society_admins = Table( 'society_admins', Base.metadata, Column('crsid', CRSID_TYPE, ForeignKey('members.crsid'), primary_key=True), Column('society', SOCIETY_TYPE, ForeignKey('societies.society'), primary_key=True), ) class Society(Base, SocietyCompat): __tablename__ = "societies" society = Column(SOCIETY_TYPE, CheckConstraint('society = lower(society)'), primary_key=True) description = Column(String(100), nullable=False) disk_quota_gb = Column(Integer, FetchedValue()) disk_usage_gb = Column(Numeric, FetchedValue()) disk_usage_updated = Column(DateTime(timezone=True), FetchedValue()) if is_root or is_webapp: uid = Column(Integer, FetchedValue()) gid = Column(Integer, FetchedValue()) joined = Column(DateTime(timezone=True), FetchedValue()) modified = Column(DateTime(timezone=True), FetchedValue()) role_email = Column(String(100), CheckConstraint("email ~ E'@'")) danger = Column(Boolean, nullable=False, server_default='f') notes = Column(Text, nullable=False, server_default='') admins = relationship("Member", secondary=society_admins, collection_class=AdminsSetCompat, backref=backref("societies", collection_class=set)) if is_root or is_webapp: pending_admins = relationship("PendingAdmin", backref=backref("society")) domains = relationship("Domain", primaryjoin="foreign(Domain.owner) == Society.society") def __str__(self): return self.society def __repr__(self): orphaned = '' if self.admins else ' orphaned' return '<Society {0}{1}>'.format(self.society, orphaned) def __eq__(self, other): if not isinstance(other, Society): return False else: return self.society == other.society def __hash__(self): return hash(self.society) def __contains__(self, other): if isinstance(other, Member): return other in self.admins elif isinstance(other, six.string_types): return other in self.admin_crsids else: return False @property def admin_crsids(self): """:attr:`admins`, as a set of strings (crsids)""" return frozenset(m.crsid for m in self.admins) @hybrid_property def email(self): """<EMAIL> address""" return self.society + <EMAIL>" if is_root or is_webapp: class PendingAdmin(Base): __tablename__ = "pending_society_admins" # There is no ForeignKey constraint here because this table exists to # reference users that don't exist yet. crsid = Column(CRSID_TYPE, CheckConstraint('crsid = lower(crsid)'), primary_key=True) society_society = Column(SOCIETY_TYPE, ForeignKey('societies.society'), name="society", primary_key=True) def __str__(self): return "{0} {1}".format(self.crsid, self.society.society) def __repr__(self): return '<PendingAdmin {0} {1}>'.format(self.crsid, self.society.society) class Domain(Base): __tablename__ = "domains" id = Column(Integer, primary_key=True) class_ = Column("class", String(7), nullable=False) owner = Column(String(16), nullable=False) domain = Column(String(256), nullable=False) root = Column(String(256)) wild = Column(Boolean, nullable=False, server_default='f') danger = Column(Boolean, nullable=False, server_default='f') last_good = Column(DateTime(timezone=True)) def __str__(self): return self.domain def __repr__(self): return "<{}: {} ({} {}){}{}>".format(self.__class__.__name__, self.domain, self.class_, self.owner, " @ {}".format(repr(self.root)) if self.root else "", " wild" if self.wild else "") class HTTPSCert(Base): __tablename__ = "https_certs" id = Column(Integer, primary_key=True) domain = Column(String(256), nullable=False) name = Column(String(32)) def __str__(self): return self.domain def __repr__(self): return "<{}: {} ({})>".format(self.__class__.__name__, self.domain, self.name) JobState = SQLAEnum('unapproved', 'queued', 'running', 'done', 'failed', 'withdrawn', name='job_state') LogType = SQLAEnum('created', 'started', 'progress', 'output', 'done', 'failed', 'note', name='log_type') LogLevel = SQLAEnum('debug', 'info', 'warning', 'error', 'critical', name='log_level') event.listen( Base.metadata, "before_create", DDL("CREATE EXTENSION hstore") ) class Job(Base): __tablename__ = 'jobs' job_id = Column(Integer, primary_key=True) owner_crsid = Column(CRSID_TYPE, ForeignKey("members.crsid")) owner = relationship("Member") state = Column(JobState, nullable=False, server_default='unapproved') state_message = Column(Text) created_at = Column(DateTime) type = Column(String(100), nullable=False) args = Column(MutableDict.as_mutable(HSTORE), nullable=False) environment = Column(Text) class JobLog(Base): __tablename__ = 'job_log' log_id = Column(Integer, primary_key=True) job_id = Column(Integer, ForeignKey("jobs.job_id")) time = Column(DateTime) type = Column(LogType) level = Column(LogLevel) message = Column(Text) raw = Column(Text) @property def raw_safe(self): if not self.raw.startswith("\\x"): return self.raw raw = unhexlify(self.raw[2:]) try: return raw.decode("utf-8") except UnicodeDecodeError: return "[Could not decode output as UTF-8]\n{}".format(_hexdump(raw)) else: PendingAdmin = None LogLevel = None Domain = None HTTPSCert = None JobState = None Job = None JobLog = None def dump_schema(): from sqlalchemy import create_engine import os.path directory = os.path.dirname(__file__) with open(os.path.join(directory, "triggers.sql")) as f: triggers = f.read() with open(os.path.join(directory, "grants.sql")) as f: grants = f.read() event.listen( Base.metadata, "after_create", DDL(triggers) ) event.listen( Base.metadata, "after_create", DDL(grants) ) def dump(sql, *multiparams, **params): print(sql.compile(dialect=engine.dialect), ";") engine = create_engine('postgresql://', strategy='mock', executor=dump) Base.metadata.create_all(engine, checkfirst=False) if __name__ == "__main__": dump_schema()
2.109375
2
tests/test_grid.py
ascillitoe/pyvista
0
12406
import os import numpy as np import pytest import vtk import pyvista from pyvista import examples from pyvista.plotting import system_supports_plotting beam = pyvista.UnstructuredGrid(examples.hexbeamfile) # create structured grid x = np.arange(-10, 10, 2) y = np.arange(-10, 10, 2) z = np.arange(-10, 10, 2) x, y, z = np.meshgrid(x, y, z) sgrid = pyvista.StructuredGrid(x, y, z) try: test_path = os.path.dirname(os.path.abspath(__file__)) test_data_path = os.path.join(test_path, 'test_data') except: test_path = '/home/alex/afrl/python/source/pyvista/tests' def test_volume(): assert beam.volume > 0.0 @pytest.mark.skipif(not system_supports_plotting(), reason="Requires system to support plotting") def test_struct_example(): # create and plot structured grid grid = examples.load_structured() cpos = grid.plot(off_screen=True) # basic plot assert isinstance(cpos, pyvista.CameraPosition) # Plot mean curvature cpos_curv = grid.plot_curvature(off_screen=True) assert isinstance(cpos_curv, pyvista.CameraPosition) def test_init_from_structured(): unstruct_grid = pyvista.UnstructuredGrid(sgrid) assert unstruct_grid.points.shape[0] == x.size assert np.all(unstruct_grid.celltypes == 12) def test_init_from_unstructured(): grid = pyvista.UnstructuredGrid(beam, deep=True) grid.points += 1 assert not np.any(grid.points == beam.points) def test_init_bad_input(): with pytest.raises(Exception): unstruct_grid = pyvista.UnstructuredGrid(np.array(1)) with pytest.raises(Exception): unstruct_grid = pyvista.UnstructuredGrid(np.array(1), np.array(1), np.array(1), 'woa') def test_init_from_arrays(): offset = np.array([0, 9], np.int8) cells = np.array([8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 15]) cell_type = np.array([vtk.VTK_HEXAHEDRON, vtk.VTK_HEXAHEDRON], np.int32) cell1 = np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0], [0, 0, 1], [1, 0, 1], [1, 1, 1], [0, 1, 1]]) cell2 = np.array([[0, 0, 2], [1, 0, 2], [1, 1, 2], [0, 1, 2], [0, 0, 3], [1, 0, 3], [1, 1, 3], [0, 1, 3]]) points = np.vstack((cell1, cell2)).astype(np.int32) grid = pyvista.UnstructuredGrid(offset, cells, cell_type, points) assert grid.n_cells == 2 assert np.allclose(grid.offset, offset) def test_surface_indices(): surf = beam.extract_surface() surf_ind = surf.point_arrays['vtkOriginalPointIds'] assert np.allclose(surf_ind, beam.surface_indices()) def test_extract_feature_edges(): edges = beam.extract_feature_edges(90) assert edges.n_points edges = beam.extract_feature_edges(180) assert not edges.n_points @pytest.mark.parametrize('binary', [True, False]) @pytest.mark.parametrize('extension', ['vtu', 'vtk']) def test_save(extension, binary, tmpdir): filename = str(tmpdir.mkdir("tmpdir").join('tmp.%s' % extension)) beam.save(filename, binary) grid = pyvista.UnstructuredGrid(filename) assert grid.cells.shape == beam.cells.shape assert grid.points.shape == beam.points.shape grid = pyvista.read(filename) assert grid.cells.shape == beam.cells.shape assert grid.points.shape == beam.points.shape assert isinstance(grid, pyvista.UnstructuredGrid) def test_init_bad_filename(): filename = os.path.join(test_path, 'test_grid.py') with pytest.raises(Exception): grid = pyvista.UnstructuredGrid(filename) with pytest.raises(Exception): grid = pyvista.UnstructuredGrid('not a file') def test_save_bad_extension(): with pytest.raises(Exception): grid = pyvista.UnstructuredGrid('file.abc') def test_linear_copy(): # need a grid with quadratic cells lgrid = beam.linear_copy() assert np.all(lgrid.celltypes < 20) def test_extract_cells(): ind = [1, 2, 3] part_beam = beam.extract_cells(ind) assert part_beam.n_cells == len(ind) assert part_beam.n_points < beam.n_points mask = np.zeros(beam.n_cells, np.bool) mask[:3] = True part_beam = beam.extract_cells(mask) assert part_beam.n_cells == len(ind) assert part_beam.n_points < beam.n_points def test_merge(): grid = beam.copy() grid.points[:, 0] += 1 unmerged = grid.merge(beam, inplace=False, merge_points=False) grid.merge(beam, inplace=True, merge_points=True) assert grid.n_points > beam.n_points assert grid.n_points < unmerged.n_points def test_merge_not_main(): grid = beam.copy() grid.points[:, 0] += 1 unmerged = grid.merge(beam, inplace=False, merge_points=False, main_has_priority=False) grid.merge(beam, inplace=True, merge_points=True) assert grid.n_points > beam.n_points assert grid.n_points < unmerged.n_points def test_merge_list(): grid_a = beam.copy() grid_a.points[:, 0] += 1 grid_b = beam.copy() grid_b.points[:, 1] += 1 grid_a.merge([beam, grid_b], inplace=True, merge_points=True) assert grid_a.n_points > beam.n_points def test_init_structured(): xrng = np.arange(-10, 10, 2) yrng = np.arange(-10, 10, 2) zrng = np.arange(-10, 10, 2) x, y, z = np.meshgrid(xrng, yrng, zrng) grid = pyvista.StructuredGrid(x, y, z) assert np.allclose(sgrid.x, x) assert np.allclose(sgrid.y, y) assert np.allclose(sgrid.z, z) grid_a = pyvista.StructuredGrid(grid) assert np.allclose(grid_a.points, grid.points) def test_invalid_init_structured(): xrng = np.arange(-10, 10, 2) yrng = np.arange(-10, 10, 2) zrng = np.arange(-10, 10, 2) x, y, z = np.meshgrid(xrng, yrng, zrng) z = z[:, :, :2] with pytest.raises(Exception): grid = pyvista.StructuredGrid(x, y, z) @pytest.mark.parametrize('binary', [True, False]) @pytest.mark.parametrize('extension', ['vts', 'vtk']) def test_save_structured(extension, binary, tmpdir): filename = str(tmpdir.mkdir("tmpdir").join('tmp.%s' % extension)) sgrid.save(filename, binary) grid = pyvista.StructuredGrid(filename) assert grid.x.shape == sgrid.y.shape assert grid.n_cells assert grid.points.shape == sgrid.points.shape grid = pyvista.read(filename) assert grid.x.shape == sgrid.y.shape assert grid.n_cells assert grid.points.shape == sgrid.points.shape assert isinstance(grid, pyvista.StructuredGrid) def test_load_structured_bad_filename(): with pytest.raises(Exception): pyvista.StructuredGrid('not a file') filename = os.path.join(test_path, 'test_grid.py') with pytest.raises(Exception): grid = pyvista.StructuredGrid(filename) def test_create_rectilinear_grid_from_specs(): # 3D example xrng = np.arange(-10, 10, 2) yrng = np.arange(-10, 10, 5) zrng = np.arange(-10, 10, 1) grid = pyvista.RectilinearGrid(xrng) assert grid.n_cells == 9 assert grid.n_points == 10 grid = pyvista.RectilinearGrid(xrng, yrng) assert grid.n_cells == 9*3 assert grid.n_points == 10*4 grid = pyvista.RectilinearGrid(xrng, yrng, zrng) assert grid.n_cells == 9*3*19 assert grid.n_points == 10*4*20 assert grid.bounds == [-10.0,8.0, -10.0,5.0, -10.0,9.0] # 2D example cell_spacings = np.array([1., 1., 2., 2., 5., 10.]) x_coordinates = np.cumsum(cell_spacings) y_coordinates = np.cumsum(cell_spacings) grid = pyvista.RectilinearGrid(x_coordinates, y_coordinates) assert grid.n_cells == 5*5 assert grid.n_points == 6*6 assert grid.bounds == [1.,21., 1.,21., 0.,0.] def test_create_rectilinear_after_init(): x = np.array([0,1,2]) y = np.array([0,5,8]) z = np.array([3,2,1]) grid = pyvista.RectilinearGrid() grid.x = x assert grid.dimensions == [3, 1, 1] grid.y = y assert grid.dimensions == [3, 3, 1] grid.z = z assert grid.dimensions == [3, 3, 3] assert np.allclose(grid.x, x) assert np.allclose(grid.y, y) assert np.allclose(grid.z, z) def test_create_rectilinear_grid_from_file(): grid = examples.load_rectilinear() assert grid.n_cells == 16146 assert grid.n_points == 18144 assert grid.bounds == [-350.0,1350.0, -400.0,1350.0, -850.0,0.0] assert grid.n_arrays == 1 def test_read_rectilinear_grid_from_file(): grid = pyvista.read(examples.rectfile) assert grid.n_cells == 16146 assert grid.n_points == 18144 assert grid.bounds == [-350.0,1350.0, -400.0,1350.0, -850.0,0.0] assert grid.n_arrays == 1 def test_cast_rectilinear_grid(): grid = pyvista.read(examples.rectfile) structured = grid.cast_to_structured_grid() assert isinstance(structured, pyvista.StructuredGrid) assert structured.n_points == grid.n_points assert structured.n_cells == grid.n_cells assert np.allclose(structured.points, grid.points) for k, v in grid.point_arrays.items(): assert np.allclose(structured.point_arrays[k], v) for k, v in grid.cell_arrays.items(): assert np.allclose(structured.cell_arrays[k], v) def test_create_uniform_grid_from_specs(): # create UniformGrid dims = [10, 10, 10] grid = pyvista.UniformGrid(dims) # Using default spacing and origin assert grid.dimensions == [10, 10, 10] assert grid.extent == [0, 9, 0, 9, 0, 9] assert grid.origin == [0.0, 0.0, 0.0] assert grid.spacing == [1.0, 1.0, 1.0] spacing = [2, 1, 5] grid = pyvista.UniformGrid(dims, spacing) # Using default origin assert grid.dimensions == [10, 10, 10] assert grid.origin == [0.0, 0.0, 0.0] assert grid.spacing == [2.0, 1.0, 5.0] origin = [10, 35, 50] grid = pyvista.UniformGrid(dims, spacing, origin) # Everything is specified assert grid.dimensions == [10, 10, 10] assert grid.origin == [10.0, 35.0, 50.0] assert grid.spacing == [2.0, 1.0, 5.0] assert grid.dimensions == [10, 10, 10] def test_uniform_setters(): grid = pyvista.UniformGrid() grid.dimensions = [10, 10, 10] assert grid.GetDimensions() == (10, 10, 10) assert grid.dimensions == [10, 10, 10] grid.spacing = [5, 2, 1] assert grid.GetSpacing() == (5, 2, 1) assert grid.spacing == [5, 2, 1] grid.origin = [6, 27.7, 19.8] assert grid.GetOrigin() == (6, 27.7, 19.8) assert grid.origin == [6, 27.7, 19.8] def test_create_uniform_grid_from_file(): grid = examples.load_uniform() assert grid.n_cells == 729 assert grid.n_points == 1000 assert grid.bounds == [0.0,9.0, 0.0,9.0, 0.0,9.0] assert grid.n_arrays == 2 assert grid.dimensions == [10, 10, 10] def test_read_uniform_grid_from_file(): grid = pyvista.read(examples.uniformfile) assert grid.n_cells == 729 assert grid.n_points == 1000 assert grid.bounds == [0.0,9.0, 0.0,9.0, 0.0,9.0] assert grid.n_arrays == 2 assert grid.dimensions == [10, 10, 10] def test_cast_uniform_to_structured(): grid = examples.load_uniform() structured = grid.cast_to_structured_grid() assert structured.n_points == grid.n_points assert structured.n_arrays == grid.n_arrays assert structured.bounds == grid.bounds def test_cast_uniform_to_rectilinear(): grid = examples.load_uniform() rectilinear = grid.cast_to_rectilinear_grid() assert rectilinear.n_points == grid.n_points assert rectilinear.n_arrays == grid.n_arrays assert rectilinear.bounds == grid.bounds @pytest.mark.parametrize('binary', [True, False]) @pytest.mark.parametrize('extension', ['vtr', 'vtk']) def test_save_rectilinear(extension, binary, tmpdir): filename = str(tmpdir.mkdir("tmpdir").join('tmp.%s' % extension)) ogrid = examples.load_rectilinear() ogrid.save(filename, binary) grid = pyvista.RectilinearGrid(filename) assert grid.n_cells == ogrid.n_cells assert np.allclose(grid.x, ogrid.x) assert np.allclose(grid.y, ogrid.y) assert np.allclose(grid.z, ogrid.z) assert grid.dimensions == ogrid.dimensions grid = pyvista.read(filename) assert isinstance(grid, pyvista.RectilinearGrid) assert grid.n_cells == ogrid.n_cells assert np.allclose(grid.x, ogrid.x) assert np.allclose(grid.y, ogrid.y) assert np.allclose(grid.z, ogrid.z) assert grid.dimensions == ogrid.dimensions @pytest.mark.parametrize('binary', [True, False]) @pytest.mark.parametrize('extension', ['vti', 'vtk']) def test_save_uniform(extension, binary, tmpdir): filename = str(tmpdir.mkdir("tmpdir").join('tmp.%s' % extension)) ogrid = examples.load_uniform() ogrid.save(filename, binary) grid = pyvista.UniformGrid(filename) assert grid.n_cells == ogrid.n_cells assert grid.origin == ogrid.origin assert grid.spacing == ogrid.spacing assert grid.dimensions == ogrid.dimensions grid = pyvista.read(filename) assert isinstance(grid, pyvista.UniformGrid) assert grid.n_cells == ogrid.n_cells assert grid.origin == ogrid.origin assert grid.spacing == ogrid.spacing assert grid.dimensions == ogrid.dimensions def test_grid_points(): """Test the points methods on UniformGrid and RectilinearGrid""" points = np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0], [0, 0, 1], [1, 0, 1], [1, 1, 1], [0, 1, 1]]) grid = pyvista.UniformGrid() grid.points = points assert grid.dimensions == [2, 2, 2] assert grid.spacing == [1, 1, 1] assert grid.origin == [0., 0., 0.] assert np.allclose(np.unique(grid.points, axis=0), np.unique(points, axis=0)) opts = np.c_[grid.x, grid.y, grid.z] assert np.allclose(np.unique(opts, axis=0), np.unique(points, axis=0)) # Now test rectilinear grid del grid grid = pyvista.RectilinearGrid() grid.points = points assert grid.dimensions == [2, 2, 2] assert np.allclose(np.unique(grid.points, axis=0), np.unique(points, axis=0)) def test_grid_extract_selection_points(): grid = pyvista.UnstructuredGrid(sgrid) sub_grid = grid.extract_selection_points([0]) assert sub_grid.n_cells == 1 sub_grid = grid.extract_selection_points(range(100)) assert sub_grid.n_cells > 1 def test_gaussian_smooth(): uniform = examples.load_uniform() active = uniform.active_scalars_name values = uniform.active_scalars uniform = uniform.gaussian_smooth(scalars=active) assert uniform.active_scalars_name == active assert uniform.active_scalars.shape == values.shape assert not np.all(uniform.active_scalars == values) values = uniform.active_scalars uniform = uniform.gaussian_smooth(radius_factor=5, std_dev=1.3) assert uniform.active_scalars_name == active assert uniform.active_scalars.shape == values.shape assert not np.all(uniform.active_scalars == values)
2.265625
2
Server/src/quadradiusr_server/server.py
kjarosh/QuadradiusR
0
12407
<reponame>kjarosh/QuadradiusR import asyncio import logging from collections import defaultdict from typing import Optional, List, Dict from aiohttp import web from aiohttp.web_runner import AppRunner, TCPSite from quadradiusr_server.auth import Auth from quadradiusr_server.config import ServerConfig from quadradiusr_server.cron import Cron, SetupService from quadradiusr_server.db.base import Game, Lobby from quadradiusr_server.db.database_engine import DatabaseEngine from quadradiusr_server.db.repository import Repository from quadradiusr_server.game import GameInProgress from quadradiusr_server.lobby import LiveLobby from quadradiusr_server.notification import NotificationService from quadradiusr_server.utils import import_submodules routes = web.RouteTableDef() class ServerNotStartedException(Exception): pass class QuadradiusRServer: def __init__(self, config: ServerConfig) -> None: self.config: ServerConfig = config self.notification_service = NotificationService() self.database = DatabaseEngine(config.database) self.repository = Repository(self.database) self.auth = Auth(config.auth, self.repository) self.cron = Cron(config.cron, self.repository, self.notification_service) self.setup_service = SetupService(self.repository) self.app = web.Application() self.app['server'] = self self.app['auth'] = self.auth self.app['database'] = self.database self.app['repository'] = self.repository self.app['notification'] = self.notification_service self.app.add_routes(routes) if config.static.redirect_root: async def root_handler(request): raise web.HTTPFound(config.static.redirect_root) self.app.router.add_route('GET', '', root_handler) if config.static.serve_path: self.app.router.add_static('/', config.static.serve_path) self.runner: Optional[AppRunner] = None self.site: Optional[TCPSite] = None self.lobbies: Dict[str, LiveLobby] = dict() self.games: Dict[str, GameInProgress] = dict() self.gateway_connections: Dict[str, List[object]] = \ defaultdict(lambda: []) def _ensure_started(self): if not self.site: raise ServerNotStartedException() @property def is_secure(self) -> bool: self._ensure_started() return True if self.site._ssl_context else False @property def address(self) -> (str, int): self._ensure_started() return self.site._server.sockets[0].getsockname() def _get_scheme(self, protocol): if protocol == 'http': scheme = 'https' if self.is_secure else 'http' elif protocol == 'ws': scheme = 'wss' if self.is_secure else 'ws' else: raise ValueError(f'Unknown protocol {protocol}') return scheme def get_url(self, protocol: str = 'http') -> str: # TCPSite.name is not implemented properly self._ensure_started() addr = self.address scheme = self._get_scheme(protocol) return f'{scheme}://{addr[0]}:{addr[1]}' def get_href(self, protocol: str = 'http') -> str: if self.config.href: return f'{self._get_scheme(protocol)}://{self.config.href}' else: return self.get_url(protocol) async def start(self): await self.database.initialize() self.runner = AppRunner(self.app) await self.runner.setup() cfg = self.config logging.info('Starting server') self.site = TCPSite( runner=self.runner, host=cfg.host, port=cfg.port, shutdown_timeout=cfg.shutdown_timeout, backlog=cfg.backlog, reuse_address=cfg.reuse_address, reuse_port=cfg.reuse_port, # TODO ssl_context=ssl_context, ) await self.setup_service.run_setup_jobs() await self.cron.register() await self.site.start() logging.info(f'Server started at {cfg.host}:{cfg.port}') async def shutdown(self): logging.info('Server shutdown initiated') if self.runner: await self.runner.cleanup() if self.database: await self.database.dispose() logging.info('Server shutdown finished') async def _run_async(self): await self.start() while True: await asyncio.sleep(3600) def run(self) -> int: loop = asyncio.new_event_loop() try: loop.run_until_complete(self._run_async()) return 0 except KeyboardInterrupt: logging.info('Interrupted') loop.run_until_complete(self.shutdown()) return -1 finally: loop.close() def register_gateway(self, gateway): user_id = gateway.user_id self.gateway_connections[user_id].append(gateway) def unregister_gateway(self, gateway): user_id = gateway.user_id self.gateway_connections[user_id].remove(gateway) def start_lobby(self, lobby: Lobby) -> LiveLobby: if lobby.id_ not in self.lobbies.keys(): self.lobbies[lobby.id_] = LiveLobby( lobby.id_, self.repository, self.notification_service) return self.lobbies[lobby.id_] def start_game(self, game: Game) -> GameInProgress: if game.id_ not in self.games.keys(): self.games[game.id_] = GameInProgress( game, self.repository, self.config.game) return self.games[game.id_] # importing submodules automatically registers endpoints import quadradiusr_server.rest import_submodules(quadradiusr_server.rest)
2.09375
2
12_find the output/03_In Python/01_GeeksForGeeks/05_Set Five/problem_4.py
Magdyedwar1996/python-level-one-codes
1
12408
def gfg(x,l = []): for i in range(x): l.append(i*i) print(l) gfg(2) gfg(3,[3,2,1]) gfg(3)
3.40625
3
duck/utils/cal_ints.py
galaxycomputationalchemistry/duck
1
12409
<gh_stars>1-10 import json, pickle, sys, os from parmed.geometry import distance2 from parmed.topologyobjects import Atom import operator import parmed import math def check_same(atom, chain, res_name, res_number, atom_name): if atom.residue.name == res_name: if atom.residue.number == res_number: if atom.name == atom_name: if atom.residue.chain == chain: return True return False def is_lig(atom): # Non-hydrogen if atom.residue.name == "UNL" and atom.atomic_number > 1: return True def find_atom(res_atom=None, prot_file=None, combined_pmd=None): # Parse the input data like this -> "A_LYS_311_N" chain = res_atom.split("_")[0] res_name = res_atom.split("_")[1] res_number = int(res_atom.split("_")[2]) atom_name = res_atom.split("_")[3] # Read the original PDB File and find the atom coords protein = parmed.load_file(prot_file) for atom in protein.atoms: if check_same(atom, chain, res_name, res_number, atom_name): prot_atom = atom break distance_atom_1 = [(x.idx, distance2(x, prot_atom)) for x in combined_pmd.atoms] distance_atom_1.sort(key=operator.itemgetter(1)) return distance_atom_1, prot_atom def find_result(res_atom=None, prot_file=None, combined_pmd=None): # Find the distance_atom_1, prot_atom = find_atom(res_atom, prot_file, combined_pmd) # Now find the one nearest distance_atom_2 = [ (x.idx, distance2(x, prot_atom)) for x in combined_pmd.atoms if is_lig(x) ] distance_atom_2.sort(key=operator.itemgetter(1)) # These are the interactions to find index_one = distance_atom_1[0][0] # The ligand one index_two = distance_atom_2[0][0] out_res = [index_one, index_two, math.sqrt(distance_atom_2[0][1])] return index_one, index_two, out_res, distance_atom_2[0][1] def find_interaction(res_atom=None, prot_file=None): output_file = "indice.text" if not res_atom or prot_file: if os.path.isfile(output_file): return json.load(open(output_file)) # Read files print("loading pickle") pickle_in = open("complex_system.pickle", "rb") combined_pmd = pickle.load(pickle_in)[0] pickle_in.close() index_one, index_two, out_res, dist = find_result(res_atom, prot_file, combined_pmd) out_f = open(output_file, "w") out_f.write(json.dumps(out_res)) out_f.close() return [index_one, index_two, math.sqrt(dist)] if __name__ == "__main__": # Define the input res_atom = sys.argv[1] prot_file = sys.argv[2] find_interaction(res_atom, prot_file)
2.625
3
snakes/help_info.py
japinol7/snakes
12
12410
<filename>snakes/help_info.py<gh_stars>10-100 """Module help_info.""" __author__ = '<NAME> (japinol)' class HelpInfo: """Manages information used for help purposes.""" def print_help_keys(self): print(' F1: \t show a help screen while playing the game' ' t: \t stats on/off\n' ' L_Ctrl + R_Alt + g: grid\n' ' p: \t pause\n' ' ESC: exit game\n' ' ^m: \t pause/resume music\n' ' ^s: \t sound effects on/off\n' ' Alt + Enter: change full screen / normal screen mode\n' ' ^h: \t shows this help\n' ' \t left, a: move snake to the left\n' ' \t right, d: move snake to the right\n' ' \t up, w: move snake up\n' ' \t down, s: move snake down\n' ' \t u 4: fire a light shot\n' ' \t i 5: fire a medium shot\n' ' \t j 1: fire a strong shot\n' ' \t k 2: fire a heavy shot\n' )
2.546875
3
run_minprop_PD.py
kztakemoto/network_propagation
3
12411
import warnings warnings.simplefilter('ignore') import argparse import pickle import numpy as np import pandas as pd import networkx as nx import scipy.sparse as sp from network_propagation_methods import minprop_2 from sklearn.metrics import roc_auc_score, auc import matplotlib.pyplot as plt #### Parameters ############# parser = argparse.ArgumentParser(description='Runs MINProp') parser.add_argument('--alphaP', type=float, default=0.25, help='diffusion parameter for the protein-protein interaction network') parser.add_argument('--alphaD', type=float, default=0.25, help='diffusion parameter for the disease similarity network') parser.add_argument('--max_iter', type=int, default=1000, help='maximum number of iterations') parser.add_argument('--eps', type=float, default=1.0e-6, help='convergence threshold') parser.add_argument('--dir_data', type=str, default='./data/', help='directory of pickled network data') args = parser.parse_args() #### load data ############ ### protein-protein interaction network with open(args.dir_data + 'norm_adj_networkP.pickle', mode='rb') as f: norm_adj_networkP = pickle.load(f) nb_proteins = norm_adj_networkP.shape[0] ### disease similarity network with open(args.dir_data + 'adj_networkD.pickle', mode='rb') as f: adj_networkD = pickle.load(f) nb_diseases = adj_networkD.shape[0] # normalized adjacency matrix deg_networkD = np.sum(adj_networkD, axis=0) norm_adj_networkD = sp.csr_matrix(adj_networkD / np.sqrt(np.dot(deg_networkD.T, deg_networkD)), dtype=np.float64) del(adj_networkD) del(deg_networkD) ### protein-disease network (data used in PRINCE study) with open(args.dir_data + 'biadj_networkPD.pickle', mode='rb') as f: biadj_networkPD = pickle.load(f) # get the list of protein-disease pairs PD_pairs = biadj_networkPD.nonzero() # number of protein-disease pairs nb_PD_pairs = len(PD_pairs[0]) #### Network propagation MINProp ########################### roc_value_set = np.array([], dtype=np.float64) rankings = np.array([], dtype=np.int64) for i in range(nb_PD_pairs): # leave-one-out validation # remove a protein-disease association idx_P = PD_pairs[0][i] idx_D = PD_pairs[1][i] biadj_networkPD[idx_P, idx_D] = 0.0 biadj_networkPD.eliminate_zeros() # normalized biadjacency matrix (ToDo: faster implementation) degP = np.sum(biadj_networkPD, axis=1) degD = np.sum(biadj_networkPD, axis=0) norm_biadj_networkPD = sp.csr_matrix(biadj_networkPD / np.sqrt(np.dot(degP, degD)), dtype=np.float64) norm_biadj_networkPD.data[np.isnan(norm_biadj_networkPD.data)] = 0.0 norm_biadj_networkPD.eliminate_zeros() # set initial label yP = np.zeros(nb_proteins, dtype=np.float64) yD = np.zeros(nb_diseases, dtype=np.float64) yD[idx_D] = 1.0 # propagation fP, fD, convergent = minprop_2(norm_adj_networkP, norm_adj_networkD, norm_biadj_networkPD, yP, yD, args.alphaP, args.alphaD, args.eps, args.max_iter) # ranking labels_real = np.zeros(nb_proteins) labels_real[idx_P] = 1 rank = int(np.where(labels_real[np.argsort(-fP)]==1)[0]) + 1 rankings = np.append(rankings, rank) # get AUC value roc_value = roc_auc_score(labels_real, fP) print(i, "AUC:", roc_value, convergent) roc_value_set = np.append(roc_value_set, roc_value) # reassign the protein-disease association biadj_networkPD[idx_P, idx_D] = 1.0 print("Average AUC", np.mean(roc_value_set)) # compute sensitivity and top rate (ROC-like curve) # ToDo: faster implementation sen_set = np.array([], dtype=np.float64) top_rate_set = np.array([], dtype=np.float64) for k in range(nb_proteins): # sensitibity sen = (rankings <= (k+1)).sum() / nb_PD_pairs # top rate top_rate = (k + 1) / nb_proteins sen_set = np.append(sen_set, sen) top_rate_set = np.append(top_rate_set, top_rate) # get AUC value print("Summarized AUC", auc(top_rate_set, sen_set)) # plot ROC-like curve plt.scatter(top_rate_set, sen_set) plt.show()
2.21875
2
Exercicios em Python/ex080.py
Raphael-Azevedo/Exercicios_Python
0
12412
<gh_stars>0 n = [] i = 0 for c in range(0, 5): n1 = int(input('Digite um valor: ')) if c == 0 or n1 > n[-1]: n.append(n1) print(f'Adicionado na posição {c} da lista...') else: pos = 0 while pos < len(n): if n1 <= n[pos]: n.insert(pos, n1) print(f'Adicionado na posição {pos} da lista...') break pos += 1 print(f'Os valores digitados em ordem foram {n}')
3.765625
4
rawal_stuff/src/demo.py
rawalkhirodkar/traffic_light_detection
0
12413
<reponame>rawalkhirodkar/traffic_light_detection import cv2 import numpy as np import random import copy import dlib from keras.models import Sequential from keras.optimizers import SGD from keras.datasets import cifar10 from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Convolution2D, MaxPooling2D from keras.utils import np_utils from keras.models import load_model from convnetskeras.convnets import preprocess_image_batch, convnet from convnetskeras.imagenet_tool import synset_to_dfs_ids np.set_printoptions(threshold=np.inf) #----------------------------Globals------------------------------------------------------------ MIN_AREA = 20 MAX_AREA = 500 MIN_RED_DENSITY = 0.4 MIN_BLACk_DENSITY_BELOW = 0 MIN_POLYAPPROX = 3 WIDTH_HEIGHT_RATIO = [0.333, 1.5] #range #------------------------------------------------------------------------------------------------ tracker_list = [] TRACK_FRAME = 10 VOTE_FRAME = 3 frame0_detections = [] frame1_detections = [] frame2_detections = [] frame_detections = [] RADIAL_DIST = 10 #------------------------------------------------------------------------------------------------ def dist(x1,y1,x2,y2): a = np.array((x1 ,y1)) b = np.array((x2, y2)) return np.linalg.norm(a-b) #------------------------------------------------------------------------------------------------ BOUNDING_BOX = [0,0,0,0] #x1, y1, x2, y2 #------------------------------------------------------------------------------------------------ def prune_detection(detections): ans = [] size = len(detections) for i in range(0,size): (x,y,w,h) = detections[i] found = -1 for j in range(i+1,size): (x1,y1,w1,h1) = detections[j] if(dist(x,y,x1,y1) < RADIAL_DIST): found = 1 break if found == -1: ans.append(detections[i]) return ans #------------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------------ def inside(p): (x,y) = p if(x < BOUNDING_BOX[2] and x > BOUNDING_BOX[0] and y < BOUNDING_BOX[3] and y > BOUNDING_BOX[1]): return True return False #------------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------------ def is_violation(frame_detections): for (x,y,w,h) in frame_detections: p1 = (x,y) p2 = (x+w,y) p3 = (x,y+h) p4 = (x+w,y+h) if(inside(p1) and inside(p2) and inside(p3) and inside(p4)): continue elif(not(inside(p1)) and not(inside(p2)) and not(inside(p3)) and not(inside(p4))): continue else: return True return False #------------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------------ def create_model(): nb_classes = 2 # Create the model model = Sequential() model.add(Convolution2D(32, 3, 3, input_shape=(3, 128, 128), border_mode='same')) model.add(Activation('relu')) model.add(Convolution2D(32, 3, 3)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2,3))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(nb_classes)) model.add(Activation('softmax')) return model #------------------------------------------------------------------------------------------------ print "Loading model" model = create_model() model.load_weights("../model/traffic_light_weights.h5") #------------------------------------------------------------------------------------------------ sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True) model_heatmap = convnet('vgg_19',weights_path="../model/weights/vgg19_weights.h5", heatmap=True) model_heatmap.compile(optimizer=sgd, loss='mse') traffic_light_synset = "n06874185" ids = synset_to_dfs_ids(traffic_light_synset) #------------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------------ clipnum = raw_input("Enter Clip number:\n") f=open('../../dayTrain/dayClip'+str(clipnum)+'/frameAnnotationsBULB.csv','r') inputs=f.read() f.close(); inputs=inputs.split() inputs=[i.split(";") for i in inputs] for i in range(21): inputs.pop(0) # fourcc = cv2.VideoWriter_fourcc(*'XVID') fourcc = cv2.cv.CV_FOURCC(*'XVID') out = cv2.VideoWriter('output'+str(clipnum)+'.avi',fourcc, 20.0, (1280,960)) #------------------------------------------------------------------------------------------------ frame_num = -1 VIOLATION = -1 for i in inputs: if i[1]=="stop": filename="../../dayTrain/dayClip"+str(clipnum)+"/frames/"+i[0][12:len(i[0])] original_img=cv2.imread(filename) img=copy.copy(original_img) height, width, channels = img.shape if(frame_num == -1): center_x = width/2 center_y = height/2 BB_width = width/4 BB_height = height/4 BOUNDING_BOX = [center_x-BB_width,center_y-BB_height,center_x + BB_width, center_y + BB_height ] frame_num += 1 #------------------detection begins-------------------------------------------------------- if(frame_num % TRACK_FRAME < VOTE_FRAME): #VOTE_FRAME = 3, then 0,1,2 allowed #------------------reset------------------------ if(frame_num % TRACK_FRAME == 0): tracker_list = [] frame0_detections = [] frame1_detections = [] frame2_detections = [] #------------------reset------------------------ #-----------preprocess------------------------------------ img = cv2.medianBlur(img,3) # Median Blur to Remove Noise img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) b,g,r = cv2.split(img) clahe = cv2.createCLAHE(clipLimit=7.0, tileGridSize=(8,8)) # Adaptive histogram equilization clahe = clahe.apply(r) img = cv2.merge((b,g,clahe)) #---------------------------------------------------------- #----------red threshold the HSV image-------------------- img1 = cv2.inRange(img, np.array([0, 100, 100]), np.array([10,255,255])) #lower red hue img2 = cv2.inRange(img, np.array([160, 100, 100]), np.array([179,255,255])) #upper red hue img3 = cv2.inRange(img, np.array([160, 40, 60]), np.array([180,70,80])) img4 = cv2.inRange(img, np.array([0, 150, 40]), np.array([20,190,75])) img5 = cv2.inRange(img, np.array([145, 35, 65]), np.array([170,65,90])) img = cv2.bitwise_or(img1,img3) img = cv2.bitwise_or(img,img2) img = cv2.bitwise_or(img,img4) img = cv2.bitwise_or(img,img5) cv2.medianBlur(img,7) ret,thresh = cv2.threshold(img,127,255,0) #---------------------------------------------------------- #--------------------Heatmap------------------------------------ im_heatmap = preprocess_image_batch([filename], color_mode="bgr") out_heatmap = model_heatmap.predict(im_heatmap) heatmap = out_heatmap[0,ids].sum(axis=0) my_range = np.max(heatmap) - np.min(heatmap) heatmap = heatmap / my_range heatmap = heatmap * 255 heatmap = cv2.resize(heatmap,(width,height)) cv2.imwrite("heatmap.png",heatmap) cv2.imwrite("image.png",original_img) heatmap[heatmap < 128] = 0 # Black heatmap[heatmap >= 128] = 255 # White heatmap = np.asarray(heatmap,dtype=np.uint8) #---------------------------------------------------------- thresh = cv2.bitwise_and(thresh,heatmap) #---------------------------------------------------------- contours, hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) for cnt in contours: area = cv2.contourArea(cnt) x,y,w,h = cv2.boundingRect(cnt) red_density = (area*1.0)/(w*h) width_height_ratio = (w*1.0)/h perimeter = cv2.arcLength(cnt, True) approx = cv2.approxPolyDP(cnt, 0.04 * perimeter, True) temp=cv2.cvtColor(original_img[y+h:y+2*h,x:x+w], cv2.COLOR_RGB2GRAY) (thresh, temp) = cv2.threshold(temp, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) black_density_below = ((w*h - cv2.countNonZero(temp))*1.0)/(w*h) if area>MIN_AREA and area<MAX_AREA and len(approx) > MIN_POLYAPPROX and red_density > MIN_RED_DENSITY and width_height_ratio < WIDTH_HEIGHT_RATIO[1] and width_height_ratio > WIDTH_HEIGHT_RATIO[0] and black_density_below > MIN_BLACk_DENSITY_BELOW: try: r_x1=x-50 r_y1=y-50 r_x2=x+w+50 r_y2=y+h+50 temp=original_img[r_y1:r_y2,r_x1:r_x2] xx=cv2.resize(temp,(128,128)) xx=np.asarray(xx) xx=np.transpose(xx,(2,0,1)) xx=np.reshape(xx,(1,3,128,128)) if model.predict_classes(xx,verbose=0)==[1]: cv2.rectangle(original_img, (x,y), (x+w,y+h),(0,255,0), 2) #append detections if frame_num % TRACK_FRAME == 0: frame0_detections.append((x,y,w,h)) elif frame_num%TRACK_FRAME == 1: frame1_detections.append((x,y,w,h)) elif frame_num%TRACK_FRAME == 2: frame2_detections.append((x,y,w,h)) else: cv2.rectangle(original_img, (x,y), (x+w,y+h),(255,0,0), 1) except Exception as e: cv2.rectangle(original_img, (x,y), (x+w,y+h),(0,255,0), 2) #edges are allowed print e pass #--------------------Violation in Detect Phase------------------------------ frame_detections = [] if(frame_num % TRACK_FRAME == 0): frame_detections = frame0_detections if(frame_num % TRACK_FRAME == 1): frame_detections = frame1_detections if(frame_num % TRACK_FRAME == 2): frame_detections = frame2_detections #--------------------Violation in Detect Phase------------------------------ #compute and start tracking if frame_num % TRACK_FRAME == 2: all_detections = frame0_detections + frame1_detections + frame2_detections final_detections = prune_detection(all_detections) for (x,y,w,h) in final_detections: tracker = dlib.correlation_tracker() tracker.start_track(original_img, dlib.rectangle(x,y,(x+w),(y+h))) tracker_list.append(tracker) #------------------detection end---------------------------------------------------- #------------------tracking begins---------------------------------------------------- else: frame_detections = [] for tracker in tracker_list: tracker.update(original_img) rect = tracker.get_position() pt1 = (int(rect.left()), int(rect.top())) pt2 = (int(rect.right()), int(rect.bottom())) cv2.rectangle(original_img, pt1, pt2, (255, 255, 255), 2) frame_detections.append((pt1[0], pt1[1], pt2[0]-pt1[0], pt2[1]-pt1[1])) #------------------ tracking end---------------------------------------------------- if(is_violation(frame_detections) == True): cv2.rectangle(original_img, (BOUNDING_BOX[0],BOUNDING_BOX[1]), (BOUNDING_BOX[2],BOUNDING_BOX[3]),(0, 0, 255), 2) else: cv2.rectangle(original_img, (BOUNDING_BOX[0],BOUNDING_BOX[1]), (BOUNDING_BOX[2],BOUNDING_BOX[3]),(60, 255, 255), 2) cv2.imshow("Annotated",original_img) out.write(original_img) ch = 0xFF & cv2.waitKey(1) if ch == 27: break cv2.destroyAllWindows() #------------------------------------------------------------------------------------------------
2.21875
2
kaivy/geometry/line2d.py
team-kaivy/kaivy
0
12414
######################################################################################################################## # # # This file is part of kAIvy # # # # Copyright (c) 2019-2021 by the kAIvy team and contributors # # # ######################################################################################################################## import numpy as np from kaivy.geometry.geometry2d import Geometry2D from kaivy.geometry.transformation2d import Transformation2D from kivy.graphics import Line, SmoothLine, Color class Line2D(Geometry2D): """ Defines a simple line defined by two points """ def __init__(self, points, width=1.0, color=(1.0, 1.0, 1.0, 1.0)): """ Initializer :param points: The line's points """ super().__init__() self.geometry_class_name = 'Line2D' self.set_nodes(np.array(points)) self.smooth = True self.color = color self.width = width def render_to_kivy(self, target, transformation: Transformation2D, parameters={}, geometry_out=None): color = parameters.get('color', self.color) target.add(Color(*color)) nodes = transformation.transform(self.nodes) if geometry_out is not None: if self.GO_TAG_LINE_LIST not in geometry_out: # add line array if still missing geometry_out[self.GO_TAG_LINE_LIST] = [] geometry_out[self.GO_TAG_LINE_LIST].append({self.GO_TAG_OWNER: self, self.GO_TAG_LINE_LIST_LINES: nodes}) nodes = nodes.flatten().tolist() if self.smooth: target.add(SmoothLine(points=nodes, width=self.width)) else: target.add(Line(points=nodes, width=self.width)) def distance_to_point(self, point, ray=False): """ Returns the distance between this line and given point :param point: A 2D coordinate :param ray: Defines if the line defines an unbound ray """ return self.line_distance_to_point(self.nodes, point, ray=ray) @staticmethod def line_distance_to_point(point_list, point, ray=False): """ Returns the distance from line p1 p2 and a given point point :param point_list: The line's points as numpy array :param point: A 2D coordinate :param ray: Defines if the line defines an unbound ray :return: The distance to the point and the nearest point. None, None if line is invalid """ # two points define the line n = (point_list[1] - point_list[0]) if np.sum(n) == 0: return None, None line_length = np.linalg.norm(n) n = n / line_length ap = point - point_list[0] t = ap.dot(n) if not ray: t = min(max(t, 0), line_length) x = point_list[0] + t * n # d = (np.cross(ap, n) ** 2).sum()**0.5 return ((point - x) ** 2).sum() ** 0.5, x def to_dict(self, options): # Overrides Geometry2D to_dict result = super().to_dict(options) if options.get(self.OPTION_VISUAL_DETAILS, True): result['width'] = self.width result['smooth'] = self.smooth return result
2.90625
3
data_loader/MSVD_dataset.py
dendisuhubdy/collaborative-experts
0
12415
<gh_stars>0 import copy from pathlib import Path from typing import Dict, Union, List from collections import defaultdict import numpy as np from typeguard import typechecked from zsvision.zs_utils import memcache, concat_features from utils.util import memory_summary from base.base_dataset import BaseDataset class MSVD(BaseDataset): @staticmethod @typechecked def dataset_paths() -> Dict[str, Union[str, List[str], Path, Dict]]: subset_paths = {} test_splits = { "dev": "val_list.txt", "official": "test_list.txt", "public_server_val": "public_server_val.txt", "public_server_test": "public_server_test.txt", } for split_name, fname in test_splits.items(): subset_paths[split_name] = {"train": "train_list.txt", "val": fname} feature_names = [ "imagenet.senet154.0", "scene.densenet161.0", "i3d.i3d.0", "s3dg.s3dg.0", "imagenet.resnext101_32x48d.0", "trn.moments-trn.0", "r2p1d.r2p1d-ig65m.0", "r2p1d.r2p1d-ig65m-kinetics.0", "moments_3d.moments-resnet3d50.0", "moments-static.moments-resnet50.0", "detection", "detection-sem" ] custom_paths = { "face": ["aggregated_face_feats/face-avg.pickle"], "ocr": ["aggregated_ocr_feats/ocr-w2v.pickle"], } text_feat_paths = {} challenge_text_feat_paths = {} for text_feat in ("openai", "w2v"): text_feat_names = {key: f"{text_feat}-caption-{key}" for key in {"train", "val", "test"}} text_feat_paths[text_feat] = {key: f"aggregated_text_feats/{val}.pkl" for key, val in text_feat_names.items()} challenge_text_feat_paths[text_feat] = \ f"aggregated_text_feats/{text_feat}.pkl" feature_info = { "subset_list_paths": subset_paths, "feature_names": feature_names, "custom_paths": custom_paths, "text_feat_paths": text_feat_paths, "challenge_text_feat_paths": challenge_text_feat_paths, "raw_captions_path": "raw-captions.pkl", "dict_youtube_mapping_path": "dict_youtube_mapping.pkl" } return feature_info def load_features(self): root_feat = Path(self.root_feat) feat_names = {key: self.visual_feat_paths(key) for key in self.paths["feature_names"]} feat_names.update(self.paths["custom_paths"]) features = {} for expert, rel_names in feat_names.items(): if expert not in self.ordered_experts: continue feat_paths = tuple([root_feat / rel_name for rel_name in rel_names]) if len(feat_paths) == 1: features[expert] = memcache(feat_paths[0]) else: # support multiple forms of feature (e.g. max and avg pooling). For # now, we only support direct concatenation msg = f"{expert}: Only direct concat of muliple feats is possible" print(f"Concatenating aggregates for {expert}....") assert self.feat_aggregation[expert]["aggregate"] == "concat", msg axis = self.feat_aggregation[expert]["aggregate-axis"] x = concat_features.cache_info() # pylint: disable=no-value-for-parameter print(f"concat cache info: {x}") features_ = concat_features(feat_paths, axis=axis) memory_summary() if expert == "speech": features_defaults = defaultdict(lambda: np.zeros((1, 300))) features_defaults.update(features_) features_ = features_defaults # Make separate feature copies for each split to allow in-place filtering features[expert] = copy.deepcopy(features_) self.features = features if self.challenge_mode: self.load_challenge_text_features() else: text_feat_paths = self.paths["text_feat_paths"][self.text_feat] text_features = memcache(root_feat / text_feat_paths["train"]) split_names = {"dev": "val", "official": "test"} text_features.update(memcache( root_feat / text_feat_paths[split_names[self.split_name]])) key_map = memcache(root_feat / self.paths["dict_youtube_mapping_path"]) inverse_map = {val: key for key, val in key_map.items()} self.text_features = {inverse_map[key]: val for key, val in text_features.items()} self.raw_captions = memcache(root_feat / self.paths["raw_captions_path"]) if "detection" in self.ordered_experts: # Example processing processed = {} for key, subdict in self.features["detection"].items(): box, conf = subdict["detection_boxes"], subdict["detection_scores"] raw = subdict["raw_feats_avg"] processed[key] = np.concatenate((box, conf.reshape(-1, 1), raw), axis=1) self.features["detection"] = processed if "openpose" in self.ordered_experts: # Example processing processed = {} for key, subdict in self.features["openpose"].items(): raw = np.concatenate(subdict["matrix"], axis=1) processed[key] = raw.transpose(1, 0, 2).reshape(-1, 3 * 18) self.features["openpose"] = processed def sanity_checks(self): assert self.num_test_captions == 81, "Expected to have 81 test caps for MSVD"
1.929688
2
wagtail/wagtailsearch/forms.py
balkantechnologies/BalkanCMS_core
1
12416
<filename>wagtail/wagtailsearch/forms.py from django import forms from django.forms.models import inlineformset_factory from django.utils.translation import ugettext_lazy as _ from wagtail.wagtailadmin.widgets import AdminPageChooser from wagtail.wagtailsearch import models class QueryForm(forms.Form): query_string = forms.CharField(label=_("Search term(s)/phrase"), help_text=_("Enter the full search string to match. An " "exact match is required for your Editors Picks to be " "displayed, wildcards are NOT allowed."), required=True) class EditorsPickForm(forms.ModelForm): sort_order = forms.IntegerField(required=False) def __init__(self, *args, **kwargs): super(EditorsPickForm, self).__init__(*args, **kwargs) self.fields['page'].widget = AdminPageChooser() class Meta: model = models.EditorsPick fields = ('query', 'page', 'description') widgets = { 'description': forms.Textarea(attrs=dict(rows=3)), } EditorsPickFormSetBase = inlineformset_factory(models.Query, models.EditorsPick, form=EditorsPickForm, can_order=True, can_delete=True, extra=0) class EditorsPickFormSet(EditorsPickFormSetBase): minimum_forms = 1 minimum_forms_message = _("Please specify at least one recommendation for this search term.") def add_fields(self, form, *args, **kwargs): super(EditorsPickFormSet, self).add_fields(form, *args, **kwargs) # Hide delete and order fields form.fields['DELETE'].widget = forms.HiddenInput() form.fields['ORDER'].widget = forms.HiddenInput() # Remove query field del form.fields['query'] def clean(self): # Editors pick must have at least one recommended page to be valid # Check there is at least one non-deleted form. non_deleted_forms = self.total_form_count() non_empty_forms = 0 for i in range(0, self.total_form_count()): form = self.forms[i] if self.can_delete and self._should_delete_form(form): non_deleted_forms -= 1 if not (form.instance.id is None and not form.has_changed()): non_empty_forms += 1 if ( non_deleted_forms < self.minimum_forms or non_empty_forms < self.minimum_forms ): raise forms.ValidationError(self.minimum_forms_message)
2.140625
2
app/utils/docs_utils.py
BoostryJP/ibet-Prime
2
12417
<filename>app/utils/docs_utils.py """ Copyright BOOSTRY Co., Ltd. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. SPDX-License-Identifier: Apache-2.0 """ from typing import ( List, Dict, Any ) from pydantic import BaseModel from fastapi.openapi.utils import get_openapi from fastapi.exceptions import RequestValidationError from app.exceptions import ( InvalidParameterError, SendTransactionError, AuthorizationError, ServiceUnavailableError ) class MetaModel(BaseModel): code: int title: str class Error400MetaModel(MetaModel): class Config: @staticmethod def schema_extra(schema: Dict[str, Any], _) -> None: properties = schema["properties"] properties["code"]["example"] = 1 properties["title"]["example"] = "InvalidParameterError" class Error400Model(BaseModel): meta: Error400MetaModel detail: str class Error401MetaModel(MetaModel): class Config: @staticmethod def schema_extra(schema: Dict[str, Any], _) -> None: properties = schema["properties"] properties["code"]["example"] = 1 properties["title"]["example"] = "AuthorizationError" class Error401Model(BaseModel): meta: Error401MetaModel detail: str class Error404MetaModel(MetaModel): class Config: @staticmethod def schema_extra(schema: Dict[str, Any], _) -> None: properties = schema["properties"] properties["code"]["example"] = 1 properties["title"]["example"] = "NotFound" class Error404Model(BaseModel): meta: Error404MetaModel detail: str class Error405MetaModel(MetaModel): class Config: @staticmethod def schema_extra(schema: Dict[str, Any], _) -> None: properties = schema["properties"] properties["code"]["example"] = 1 properties["title"]["example"] = "MethodNotAllowed" class Error405Model(BaseModel): meta: Error405MetaModel detail: str class Error422MetaModel(MetaModel): class Config: @staticmethod def schema_extra(schema: Dict[str, Any], _) -> None: properties = schema["properties"] properties["code"]["example"] = 1 properties["title"]["example"] = "RequestValidationError" class Error422DetailModel(BaseModel): loc: List[str] msg: str type: str class Config: @staticmethod def schema_extra(schema: Dict[str, Any], _) -> None: properties = schema["properties"] properties["loc"]["example"] = ["header", "issuer-address"] properties["msg"]["example"] = "field required" properties["type"]["example"] = "value_error.missing" class Error422Model(BaseModel): meta: Error422MetaModel detail: List[Error422DetailModel] class Error503MetaModel(MetaModel): class Config: @staticmethod def schema_extra(schema: Dict[str, Any], _) -> None: properties = schema["properties"] properties["code"]["example"] = 1 properties["title"]["example"] = "ServiceUnavailableError" class Error503Model(BaseModel): meta: Error503MetaModel detail: str DEFAULT_RESPONSE = { 400: { "description": "Invalid Parameter Error / Send Transaction Error", "model": Error400Model }, 401: { "description": "Authorization Error", "model": Error401Model }, 404: { "description": "Not Found Error", "model": Error404Model }, 405: { "description": "Method Not Allowed", "model": Error405Model }, 422: { "description": "Validation Error", "model": Error422Model }, 503: { "description": "Service Unavailable Error", "model": Error503Model } } def get_routers_responses(*args): responses = {} for arg in args: if isinstance(arg, int): responses[arg] = DEFAULT_RESPONSE.get(arg, {}) elif arg == InvalidParameterError: responses[400] = DEFAULT_RESPONSE[400] elif arg == SendTransactionError: responses[400] = DEFAULT_RESPONSE[400] elif arg == AuthorizationError: responses[401] = DEFAULT_RESPONSE[401] elif arg == RequestValidationError: responses[422] = DEFAULT_RESPONSE[422] elif arg == ServiceUnavailableError: responses[503] = DEFAULT_RESPONSE[503] return responses def custom_openapi(app): def openapi(): openapi_schema = app.openapi_schema if openapi_schema is None: openapi_schema = get_openapi( title=app.title, version=app.version, openapi_version=app.openapi_version, description=app.description, routes=app.routes, tags=app.openapi_tags, servers=app.servers, ) def _get(src: dict, *keys): tmp_src = src for key in keys: tmp_src = tmp_src.get(key) if tmp_src is None: return None return tmp_src paths = _get(openapi_schema, "paths") if paths is not None: for path_info in paths.values(): for router in path_info.values(): # Remove Default Validation Error Response Structure # NOTE: # HTTPValidationError is automatically added to APIs docs that have path, header, query, # and body parameters. # But HTTPValidationError does not have 'meta', # and some APIs do not generate a Validation Error(API with no-required string parameter only, etc). resp_422 = _get(router, "responses", "422") if resp_422 is not None: ref = _get(resp_422, "content", "application/json", "schema", "$ref") if ref == "#/components/schemas/HTTPValidationError": router["responses"].pop("422") # Remove empty response's contents responses = _get(router, "responses") for resp in responses.values(): schema = _get(resp, "content", "application/json", "schema") if schema == {}: resp.pop("content") return openapi_schema return openapi
1.953125
2
scripts/models/arcii.py
mogumogu2333/MatchZoo
0
12418
<gh_stars>0 import os import sys sys.path.insert(0, "../../") import matchzoo as mz import typing import pandas as pd import matchzoo from matchzoo.preprocessors.units.tokenize import Tokenize, WordPieceTokenize from matchzoo.engine.base_preprocessor import load_preprocessor import pickle import utils os.environ["CUDA_VISIBLE_DEVICES"] = "6" input_dir = "../../data/" model_dir = "../../models/arcii" num_epochs = 10 utils.ensure_dir(model_dir) with open(os.path.join(input_dir, "train.pkl"), 'rb') as f: train_pack_processed = pickle.load(f) print(train_pack_processed.frame().head()) with open(os.path.join(input_dir, "test.pkl"), 'rb') as f: test_pack_processed = pickle.load(f) print(test_pack_processed.frame().head()) preprocessor = load_preprocessor(dirpath=os.path.join(input_dir)) print(preprocessor._context) glove_embedding = mz.datasets.embeddings.load_glove_embedding(dimension=100) ranking_task = mz.tasks.Classification() ranking_task.metrics = ['accuracy'] print("`ranking_task` initialized with metrics", ranking_task.metrics) model = mz.models.ArcII() model.params.update(preprocessor.context) model.params['task'] = ranking_task model.params['embedding_output_dim'] = 100 model.params['embedding_trainable'] = True model.params['num_blocks'] = 2 model.params['kernel_1d_count'] = 32 model.params['kernel_1d_size'] = 3 model.params['kernel_2d_count'] = [64, 64] model.params['kernel_2d_size'] = [3, 3] model.params['pool_2d_size'] = [[3, 3], [3, 3]] model.params['optimizer'] = 'adam' model.build() model.compile() model.backend.summary() embedding_matrix = glove_embedding.build_matrix(preprocessor.context['vocab_unit'].state['term_index']) model.load_embedding_matrix(embedding_matrix) test_x, test_y = test_pack_processed.unpack() evaluate = mz.callbacks.EvaluateAllMetrics(model, x=test_x, y=test_y, batch_size=128) dump_prediction = mz.callbacks.DumpPrediction(model, x=test_x, y=test_y, batch_size=128, model_save_path=model_dir) train_generator = mz.DataGenerator( train_pack_processed, num_dup=2, num_neg=1, batch_size=128, ) print('num batches:', len(train_generator)) history = model.fit_generator(train_generator, epochs=num_epochs, callbacks=[evaluate, dump_prediction], workers=4, use_multiprocessing=True)
2.09375
2
mcpyrate/markers.py
Technologicat/mcpyrate
34
12419
# -*- coding: utf-8; -*- """AST markers for internal communication. *Internal* here means they are to be never passed to Python's `compile`; macros may use them to work together. """ __all__ = ["ASTMarker", "get_markers", "delete_markers", "check_no_markers_remaining"] import ast from . import core, utils, walkers class ASTMarker(ast.AST): """Base class for AST markers. Markers are AST-node-like objects meant for communication between co-operating, related macros. They are also used by the macro expander to talk with itself during expansion. We inherit from `ast.AST`, so that during macro expansion, a marker behaves like a single AST node. It is a postcondition of a completed macro expansion that no markers remain in the AST. To help fail-fast, if you define your own marker types, use `get_markers` to check (at an appropriate point) that the expanded AST has no instances of your own markers remaining. (You'll want a base class for your own markers.) A typical usage example is in the quasiquote system, where the unquote operators (some of which expand to markers) may only appear inside a quoted section. So just before the quote operator exits, it checks that all quasiquote markers within that section have been compiled away. """ # TODO: Silly default `None`, because `copy` and `deepcopy` call `__init__` without arguments, # TODO: though the docs say they behave like `pickle` (and wouldn't thus need to call __init__ at all!). def __init__(self, body=None): """body: the actual AST that is annotated by this marker""" self.body = body self._fields = ["body"] # support ast.iter_fields def get_markers(tree, cls=ASTMarker): """Return a `list` of any `cls` instances found in `tree`. For output validation.""" class ASTMarkerCollector(walkers.ASTVisitor): def examine(self, tree): if isinstance(tree, cls): self.collect(tree) self.generic_visit(tree) w = ASTMarkerCollector() w.visit(tree) return w.collected def delete_markers(tree, cls=ASTMarker): """Delete any `cls` ASTMarker instances found in `tree`. The deletion takes place by replacing each marker node with the actual AST node stored in its `body` attribute. """ class ASTMarkerDeleter(walkers.ASTTransformer): def transform(self, tree): if isinstance(tree, cls): return self.visit(tree.body) return self.generic_visit(tree) return ASTMarkerDeleter().visit(tree) def check_no_markers_remaining(tree, *, filename, cls=None): """Check that `tree` has no AST markers remaining. If a class `cls` is provided, only check for markers that `isinstance(cls)`. If there are any, raise `MacroExpansionError`. No return value. `filename` is the full path to the `.py` file, for error reporting. Convenience function. """ cls = cls or ASTMarker remaining_markers = get_markers(tree, cls) if remaining_markers: codes = [utils.format_context(node, n=5) for node in remaining_markers] locations = [utils.format_location(filename, node, code) for node, code in zip(remaining_markers, codes)] report = "\n\n".join(locations) raise core.MacroExpansionError(f"{filename}: AST markers remaining after expansion:\n{report}")
2.96875
3
lambda.py
deepanshu-yadav/NSFW-Classifier
13
12420
<filename>lambda.py import boto3 import json import numpy as np import base64, os, boto3, ast, json endpoint = 'myprojectcapstone' def format_response(message, status_code): return { 'statusCode': str(status_code), 'body': json.dumps(message), 'headers': { 'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*' } } def lambda_handler(event, context): try : body = json.loads(event['body']) image = base64.b64decode(body['data'].replace('data:image/png;base64,', '')) try : runtime = boto3.Session().client(service_name='sagemaker-runtime', region_name='us-east-2') response = runtime.invoke_endpoint(EndpointName=endpoint, ContentType='application/x-image', Body=image) print(response) try: probs = response['Body'].read() probs = json.loads(probs) #probs = ast.literal_eval(probs) #pred = probs.index(max(probs)) pred = np.argmax( np.array( probs ) ) if pred == 0: resp = 'Animated Nsfw' elif pred == 1: resp = 'Conatins Nudity' elif pred == 2: resp = 'Contains Porn' elif pred == 4: resp = 'Conatins semi Nudity' else : resp = 'Safe For viewing' return format_response(resp, 200) except: return format_response('Ouch! Something went wrong with loading json data from endpoint'+response['Body'].read() , 200) except : return format_response('Ouch! Something went wrong with endpoint' , 200) except : return format_response('Ouch! Something went wrong with decoding' , 200)
2.28125
2
src/bindings/python/tests/test_ngraph/test_eye.py
si-eun-kim/openvino
2
12421
# Copyright (C) 2018-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import openvino.runtime.opset9 as ov import numpy as np import pytest from tests.runtime import get_runtime from openvino.runtime.utils.types import get_element_type_str from openvino.runtime.utils.types import get_element_type @pytest.mark.parametrize( "num_rows, num_columns, diagonal_index, out_type", [ pytest.param(2, 5, 0, np.float32), pytest.param(5, 3, 2, np.int64), pytest.param(3, 3, -1, np.float16), pytest.param(5, 5, -10, np.float32), ], ) def test_eye_rectangle(num_rows, num_columns, diagonal_index, out_type): num_rows_array = np.array([num_rows], np.int32) num_columns_array = np.array([num_columns], np.int32) diagonal_index_array = np.array([diagonal_index], np.int32) num_rows_tensor = ov.constant(num_rows_array) num_columns_tensor = ov.constant(num_columns_array) diagonal_index_tensor = ov.constant(diagonal_index_array) # Create with param names eye_node = ov.eye(num_rows=num_rows_tensor, num_columns=num_columns_tensor, diagonal_index=diagonal_index_tensor, output_type=get_element_type_str(out_type)) # Create with default orded eye_node = ov.eye(num_rows_tensor, num_columns_tensor, diagonal_index_tensor, get_element_type_str(out_type)) expected_results = np.eye(num_rows, M=num_columns, k=diagonal_index, dtype=np.float32) assert eye_node.get_type_name() == "Eye" assert eye_node.get_output_size() == 1 assert eye_node.get_output_element_type(0) == get_element_type(out_type) assert tuple(eye_node.get_output_shape(0)) == expected_results.shape # TODO: Enable with Eye reference implementation # runtime = get_runtime() # computation = runtime.computation(eye_node) # eye_results = computation() # assert np.allclose(eye_results, expected_results) @pytest.mark.parametrize( "num_rows, num_columns, diagonal_index, batch_shape, out_type", [ pytest.param(2, 5, 0, [1], np.float32), pytest.param(5, 3, 2, [2, 2], np.int64), pytest.param(3, 3, -1, [1, 3, 2], np.float16), pytest.param(5, 5, -10, [1, 1], np.float32), ], ) def test_eye_batch_shape(num_rows, num_columns, diagonal_index, batch_shape, out_type): num_rows_array = np.array([num_rows], np.int32) num_columns_array = np.array([num_columns], np.int32) diagonal_index_array = np.array([diagonal_index], np.int32) batch_shape_array = np.array(batch_shape, np.int32) num_rows_tensor = ov.constant(num_rows_array) num_columns_tensor = ov.constant(num_columns_array) diagonal_index_tensor = ov.constant(diagonal_index_array) batch_shape_tensor = ov.constant(batch_shape_array) # Create with param names eye_node = ov.eye(num_rows=num_rows_tensor, num_columns=num_columns_tensor, diagonal_index=diagonal_index_tensor, batch_shape=batch_shape_tensor, output_type=get_element_type_str(out_type)) # Create with default orded eye_node = ov.eye(num_rows_tensor, num_columns_tensor, diagonal_index_tensor, get_element_type_str(out_type), batch_shape_tensor) output_shape = [*batch_shape, 1, 1] one_matrix = np.eye(num_rows, M=num_columns, k=diagonal_index, dtype=np.float32) expected_results = np.tile(one_matrix, output_shape) assert eye_node.get_type_name() == "Eye" assert eye_node.get_output_size() == 1 assert eye_node.get_output_element_type(0) == get_element_type(out_type) assert tuple(eye_node.get_output_shape(0)) == expected_results.shape # TODO: Enable with Eye reference implementation # runtime = get_runtime() # computation = runtime.computation(eye_node) # eye_results = computation() # assert np.allclose(eye_results, expected_results)
2.09375
2
tests/error/test_format_error.py
GDGSNF/graphql-core
590
12422
<gh_stars>100-1000 from typing import List, Union from pytest import raises from graphql.error import GraphQLError, format_error from graphql.language import Node, Source from graphql.pyutils import Undefined def describe_format_error(): def formats_graphql_error(): source = Source( """ query { something }""" ) path: List[Union[int, str]] = ["one", 2] extensions = {"ext": None} error = GraphQLError( "test message", Node(), source, [14, 40], path, ValueError("original"), extensions=extensions, ) formatted = format_error(error) assert formatted == error.formatted assert formatted == { "message": "test message", "locations": [{"line": 2, "column": 14}, {"line": 3, "column": 20}], "path": path, "extensions": extensions, } def uses_default_message(): # noinspection PyTypeChecker formatted = format_error(GraphQLError(None)) # type: ignore assert formatted == { "message": "An unknown error occurred.", "locations": None, "path": None, } def includes_path(): path: List[Union[int, str]] = ["path", 3, "to", "field"] error = GraphQLError("msg", path=path) formatted = format_error(error) assert formatted == error.formatted assert formatted == {"message": "msg", "locations": None, "path": path} def includes_extension_fields(): error = GraphQLError("msg", extensions={"foo": "bar"}) formatted = format_error(error) assert formatted == error.formatted assert formatted == { "message": "msg", "locations": None, "path": None, "extensions": {"foo": "bar"}, } def rejects_none_and_undefined_errors(): with raises(TypeError) as exc_info: # noinspection PyTypeChecker format_error(None) # type: ignore assert str(exc_info.value) == "Expected a GraphQLError." with raises(TypeError) as exc_info: # noinspection PyTypeChecker format_error(Undefined) # type: ignore assert str(exc_info.value) == "Expected a GraphQLError."
2.234375
2
pymonad/test/test_Maybe.py
bjd2385/pymonad
0
12423
# -------------------------------------------------------- # (c) Copyright 2014 by <NAME>. # Licensed under BSD 3-clause licence. # -------------------------------------------------------- import unittest from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing from pymonad.Reader import curry from pymonad.test.MonadTester import * from pymonad.test.MonoidTester import * class TestJustFunctor(unittest.TestCase, MonadTester): def __init__(self, x): super(TestJustFunctor, self).__init__(x) self.setClassUnderTest(Just) def testFunctorLaws(self): self.given(8) self.ensure_first_functor_law_holds() self.ensure_second_functor_law_holds() class TestNothingFunctor(unittest.TestCase, MonadTester): def __init__(self, x): super(TestNothingFunctor, self).__init__(x) self.setClassUnderTest(_Nothing) def testFunctorLaws(self): self.given(None) self.ensure_first_functor_law_holds() self.ensure_second_functor_law_holds() class TestJustApplicative(unittest.TestCase, MonadTester): def __init__(self, x): super(TestJustApplicative, self).__init__(x) self.setClassUnderTest(Just) def testApplicativeLaws(self): self.given(8) self.ensure_first_applicative_law_holds() self.ensure_second_applicative_law_holds() self.ensure_third_applicative_law_holds() self.ensure_fourth_applicative_law_holds() self.ensure_fifth_applicative_law_holds() class TestNothingApplicative(unittest.TestCase, MonadTester): def __init__(self, x): super(TestNothingApplicative, self).__init__(x) self.setClassUnderTest(_Nothing) def testApplicativeLaws(self): self.given(None) self.ensure_first_applicative_law_holds() self.ensure_second_applicative_law_holds() self.ensure_third_applicative_law_holds() self.ensure_fourth_applicative_law_holds() self.ensure_fifth_applicative_law_holds() class TestJustMonad(unittest.TestCase, MonadTester): def __init__(self, x): super(TestJustMonad, self).__init__(x) self.setClassUnderTest(Just) def monad_function_f(self, x): return Just(x + 10) def monad_function_g(self, x): return Just(x * 5) def testMonadLaws(self): self.given(8) self.ensure_first_monad_law_holds() self.ensure_second_monad_law_holds() self.ensure_third_monad_law_holds() class TestNothingMonad(unittest.TestCase, MonadTester): def __init__(self, x): super(TestNothingMonad, self).__init__(x) self.setClassUnderTest(_Nothing) def monad_function_f(self, x): return Just(x + 10) def monad_function_g(self, x): return Just(x * 5) def testMonadLaws(self): self.given(None) self.ensure_first_monad_law_holds() self.ensure_second_monad_law_holds() self.ensure_third_monad_law_holds() class TestMaybeEquality(unittest.TestCase, MonadTester): def testEqualityOfIdenticalTypes(self): self.givenMonads(Just(8), Just(8)) self.ensureMonadsAreEqual() def testInequalityOfIdenticalTypes(self): self.givenMonads(Just(8), Just(9)) self.ensureMonadsAreNotEqual() def testInequalityOfJustAndNothing(self): self.givenMonads(Just(8), Nothing) self.ensureMonadsAreNotEqual() def testMonadComparisonExceptionWithJust(self): self.givenMonads(Just(8), Reader(8)) self.ensureComparisonRaisesException() def testMonadComparisonExceptionWithNothing(self): self.givenMonads(Nothing, Reader(8)) self.ensureComparisonRaisesException() class TestMaybeMonoid(unittest.TestCase, MonoidTester): def test_mzero(self): self.givenMonoid(Maybe) self.get_mzero() self.ensure_mzero_is(Nothing) def test_right_identity(self): self.givenMonoid(Just(9)) self.ensure_monoid_plus_zero_equals(Just(9)) def test_left_identity(self): self.givenMonoid(Just(9)) self.ensure_zero_plus_monoid_equals(Just(9)) def test_associativity(self): self.givenMonoids(Just(1), Just(2), Just(3)) self.ensure_associativity() def test_mplus_with_two_just_values(self): self.givenMonoids(Just(1), Just(2)) self.ensure_mconcat_equals(Just(3)) def test_mplus_with_one_just_and_one_nothing(self): self.givenMonoids(Just(1), Nothing) self.ensure_mconcat_equals(Just(1)) class TestFirstMonoid(unittest.TestCase, MonoidTester): def test_mzero(self): self.givenMonoid(First) self.get_mzero() self.ensure_mzero_is(First(Nothing)) def test_right_identity(self): self.givenMonoid(First(Just(9))) self.ensure_monoid_plus_zero_equals(First(Just(9))) def test_left_identity(self): self.givenMonoid(First(Just(9))) self.ensure_zero_plus_monoid_equals(First(Just(9))) def test_associativity(self): self.givenMonoids(First(Just(1)), First(Just(2)), First(Just(3))) self.ensure_associativity() def test_mplus_with_two_just_values(self): self.givenMonoids(First(Just(1)), First(Just(2))) self.ensure_mconcat_equals(First(Just(1))) def test_mplus_with_just_and_nothing(self): self.givenMonoids(First(Just(1)), Nothing) self.ensure_mconcat_equals(First(Just(1))) def test_mplus_with_nothing_and_just(self): self.givenMonoids(Nothing, First(Just(1))) self.ensure_mconcat_equals(First(Just(1))) class TestLastMonoid(unittest.TestCase, MonoidTester): def test_mzero(self): self.givenMonoid(Last) self.get_mzero() self.ensure_mzero_is(Last(Nothing)) def test_right_identity(self): self.givenMonoid(Last(Just(9))) self.ensure_monoid_plus_zero_equals(Last(Just(9))) def test_left_identity(self): self.givenMonoid(Last(Just(9))) self.ensure_zero_plus_monoid_equals(Last(Just(9))) def test_associativity(self): self.givenMonoids(Last(Just(1)), Last(Just(2)), Last(Just(3))) self.ensure_associativity() def test_mplus_with_two_just_values(self): self.givenMonoids(Last(Just(1)), Last(Just(2))) self.ensure_mconcat_equals(Last(Just(2))) def test_mplus_with_just_and_nothing(self): self.givenMonoids(Last(Just(1)), Nothing) self.ensure_mconcat_equals(Last(Just(1))) def test_mplus_with_nothing_and_just(self): self.givenMonoids(Nothing, Last(Just(1))) self.ensure_mconcat_equals(Last(Just(1))) if __name__ == "__main__": unittest.main()
2.296875
2
sborl/__init__.py
canonical/sborl
0
12424
# Copyright 2022 Canonical Ltd. # See LICENSE file for licensing details. __version__ = "0.0.8" # flake8: noqa: F401,F402 from . import errors, events, relation, testing from .relation import EndpointWrapper
1.023438
1
sizer.py
riffcc/librarian
0
12425
<filename>sizer.py<gh_stars>0 #!/usr/bin/python3 # Fetch torrent sizes # TODO: Report number of files before we go etc import os from torrentool.api import Torrent from fnmatch import fnmatch root = '/opt/radio/collections' pattern = "*.torrent" alltorrentsize = 0 print("Thanks for using The Librarian.") for path, subdirs, files in os.walk(root): for name in files: if fnmatch(name, pattern): torrentstats = Torrent.from_file(os.path.join(path, name)) alltorrentsize += torrentstats.total_size print('Torrent size ' + str(torrentstats.total_size) + ' for a total so far of ' + str(alltorrentsize)) print('DEBUG' + os.path.join(path, name)) # Reading filesize my_torrent = Torrent.from_file('/opt/radio/collections/arienscompanymanuals/archive.org/download/collection_01_ariens_manuals/collection_01_ariens_manuals_archive.torrent') size = my_torrent.total_size # Total files size in bytes. print(size)
2.890625
3
i_vis/core/login.py
piechottam/i-vis-core
0
12426
<filename>i_vis/core/login.py """ Flask LoginManager plugin. Import and execute ``login.init_app(app)`` in a factory function to use. """ from typing import Any, Callable, TYPE_CHECKING from functools import wraps from flask import redirect, request, url_for, current_app from flask_login import current_user from flask_login.login_manager import LoginManager from .errors import IllegalAccessError if TYPE_CHECKING: from werkzeug.wrappers import Response login = LoginManager() def admin_required(func: Callable) -> Callable: """Make view only accessible to admins. Args: func: Callabe to wrap. Returns: Wrapped callable - only callable when user is an admin. """ @wraps(func) def decorated_view(*args: Any, **kwargs: Any) -> Any: if not current_app.config.get("LOGIN_DISABLED", True) and ( current_user is None or not current_user.is_authenticated or not current_user.is_admin ): # TODO # move flash_permission_denied() # move return redirect(url_for("main.index")) raise IllegalAccessError return func(*args, **kwargs) return decorated_view @login.unauthorized_handler def unauthorized_callback() -> "Response": return redirect(url_for("main.signin", next=request.path))
2.203125
2
code/App.py
KasinSparks/Arduino_RGB_Lights
0
12427
from tkinter import * from ModeEnum import Mode import SerialHelper import Views.StaticView import Views.CustomWidgets.Silder from ColorEnum import Color from functools import partial from Views.CommandPanel import CommandPanel from Views.ListItem import ListItem from ProcessControl import ProcessManager, ProcessCommandEnum import os, signal menuBackgroundColor = "#262e30" menuForegroundColor = "#e5e4c5" menuActiveForegroundColor = menuForegroundColor menuActiveBackgroundColor = "#464743" mainBackgroundColor = "#1b2122" class App(Frame): def __init__(self,master=None): Frame.__init__(self,master) self.mode = Mode.Static self.ser = SerialHelper.SerialHelper() self.test = Views.StaticView.StaticView(self) self.sliderRed = Views.CustomWidgets.Silder.Silder(self, "Red", color=Color.RED) self.sliderGreen = Views.CustomWidgets.Silder.Silder(self, "Green", color=Color.GREEN) self.sliderBlue = Views.CustomWidgets.Silder.Silder(self, "Blue", color=Color.BLUE) self.grid() self.createWidgets() # Restart the RGB controller #f = open("../config/processctl", "w") #f.write("controller.py,start") #f.close() ##ProcessManager.sendCommand("controller.py", ProcessCommandEnum.ProcessCommandEnum.START) def createWidgets(self): self.cPanel = CommandPanel() self.quitButton= Button(self, text="Quit", command=self.quit) self.quitButton.grid() self.my_label = Label(self, text="My Label!") self.my_label.grid() self.connectedLabel = Label(self, text="Not Connected", foreground='red') self.connectedLabel.grid() self.test.grid() self.tempText = Label(self, text="NONE") self.tempText.grid() self.addButton = Button(self, text="Add", command=self.addValues) self.addButton.grid() # TODO: change the value to reflect the item selected index #self.addButton = Button(self, text="Add After Selected", command=partial(self.addValues, self.cPanel.getListItemIndex(self.cPanel._selectedItem))) # Hacky way of doing this... listItem could be done better self.addButton = Button(self, text="Add After Selected", command=partial(self.addValues, listItem='Not None')) self.addButton.grid() # TODO: Add at a random position self.addButton = Button(self, text="Add At A Random Position", command=partial(self.addValues, random=True)) self.addButton.grid() # test self.sliderRed.grid(column=0, row=0) self.sliderGreen.grid(column=1, row=0) self.sliderBlue.grid(column=2, row=0) self.delayAreaFrame = Frame(self) self.delayAreaFrame.grid(column=3, row=0) self.fadeValLabel = Label(self.delayAreaFrame, text="Fade Value:") self.fadeValLabel.grid(column=0, row=0) self.fadeVal = Entry(self.delayAreaFrame) self.fadeVal.grid(column=0, row=1) self.delayValLabel = Label(self.delayAreaFrame, text="Delay Value:") self.delayValLabel.grid(column=0, row=3) self.delayVal = Entry(self.delayAreaFrame) self.delayVal.grid(column=0, row=4) self.addDelayButton = Button(self.delayAreaFrame, text="Add Delay Value", command=self.addDelayValue) self.addDelayButton.grid(column=1, row=3, rowspan=2) self.cPanel.grid(column=4,row = 0) #self.cPanel.insert(END, ListItem(self.cPanel, "Insert Test 1")) self.my_menu = Menu(self, tearoff=0, activebackground=menuActiveBackgroundColor, background=menuBackgroundColor, activeforeground=menuActiveForegroundColor, foreground=menuForegroundColor ) #self.fileMenu = Menu(self.my_menu) #self.fileMenu.add_command(label="Exit", command=self.quit) self.my_menu.add_cascade(label="File", menu=self.fileMenu(self.my_menu)) self.my_menu.add_cascade(label="Ports", menu=self.portsMenu(self.my_menu)) self.my_menu.add_cascade(label="Mode", menu=self.modeMenu(self.my_menu)) def fileMenu(self, mainMenu): fileMenu = Menu(mainMenu, tearoff=0, activebackground=menuActiveBackgroundColor, background=menuBackgroundColor, activeforeground=menuActiveForegroundColor, foreground=menuForegroundColor ) fileMenu.add_command(label="Exit", command=self.quit) return fileMenu def portsMenu(self, mainMenu): portsMenu = Menu(mainMenu, tearoff=0, activebackground=menuActiveBackgroundColor, background=menuBackgroundColor, activeforeground=menuActiveForegroundColor, foreground=menuForegroundColor ) for sp in SerialHelper.getSerialPorts(): # Have this be a call to the function and supply the serial port as the arg functionCall = partial(self.selectPort, sp[0], self.connectedLabel) portsMenu.add_command(label=sp, command=functionCall) return portsMenu def selectPort(self, port, uiElement): color = 'red' text = 'Failed' if self.ser.connect(port): text = 'Connected on ' + port color = 'green' f = open("config/port", "w") f.write(port) f.close() # Restart the RGB controller ##f = open("../config/processctl", "w") ##f.write("controller.py,restart") ##f.close() ProcessManager.sendCommand("controller.py", ProcessCommandEnum.ProcessCommandEnum.RESTART) uiElement['foreground'] = color uiElement['text'] = text def modeMenu(self, mainMenu): menu = Menu(mainMenu, tearoff=0, activebackground=menuActiveBackgroundColor, background=menuBackgroundColor, activeforeground=menuActiveForegroundColor, foreground=menuForegroundColor ) for m in Mode: funcCall = partial(self.changeMode, m) menu.add_command(label=m, command=funcCall) return menu def changeMode(self, mode): print("Mode changed from: "+ (str) (self.mode) + " to: " + (str) (mode)) self.mode = mode loopingCondition = os.path.join(os.getcwd(), 'config', 'loopingCondition') f = open(loopingCondition, 'w') message = "LOOPING: " if self.mode == Mode.Dynamic: message += "TRUE;" elif self.mode == Mode.Static: message += "FALSE;" f.write(message) f.close() def parseFadeValue(self): fadeValStr = self.fadeVal.get() try: value = int(fadeValStr) if value < 1 or value > 255: print("Delay value out of byte range") return 1 except ValueError as err: print(err) return 1 return value def addValues(self, listItem=None, index=-1, random=False): if index is None: print("Index was None... Values not added.") return elif listItem is not None: if self.cPanel._selectedItem is None: print("No selected object... Value was not added.") return index = self.cPanel.getListItemIndex(self.cPanel._selectedItem) + 1 elif random: index = self.cPanel.getRandomIndex() tempString = self.paddNum(self.sliderRed.getValue()) + ',' + self.paddNum(self.sliderGreen.getValue()) + ',' + self.paddNum(self.sliderBlue.getValue()) + ',' + self.paddNum(self.parseFadeValue()) + ';' self.tempText['text'] = tempString #self.writeToFile(file="../config/command", text=tempString + '\n') self.cPanel.addItem(tempString, index) def addDelayValue(self): # Check range of value delayValStr = self.delayVal.get() try: value = int(delayValStr) if value < 1 or value > 255: print("Delay value out of byte range") return -1 except ValueError as err: print(err) return -1 delayValStr = "DELAY: " + delayValStr self.cPanel.addItem(delayValStr) def paddNum(self, num=0): if num > 255: print("Fade number > 255. Defaulting to 000") return "000" paddedZeros = "" # Generate the correct number of padding zeros if num < 100: paddedZeros += '0' if num < 10: paddedZeros += '0' # Pad the number paddedZeros += str(num) return paddedZeros def writeToFile(self, file=None, fileArgs='a', text=None): if file is None: print("No file to write to...") return f = open(file, fileArgs) f.write(text) #from SerialHelper import getSerialPorts #for sp in getSerialPorts(): # print(sp) # Start the app up! app = App() app.master.title("RGB Lights 3000") app.master.config(menu=app.my_menu, background=mainBackgroundColor) #subprocess.call(["./controller.py", "/dev/ttyUSB0"]) # Start up the app and the process manager pid = os.fork() if pid: # parent app.mainloop() os.kill(pid, signal.SIGTERM) else: # child exec(open("./code/ProcessControl/ProcessManager.py").read()) #os.execlp("python3", "python3", "./ProcessControl/ProcessManager.py") #os.system("controller.py") #app.mainloop() #print("here")
2.453125
2
tests/io/product/test_sidd_writing.py
ngageoint/SarPy
0
12428
import os import json import tempfile import shutil import unittest from sarpy.io.complex.sicd import SICDReader from sarpy.io.product.sidd import SIDDReader from sarpy.io.product.sidd_schema import get_schema_path from sarpy.processing.sidd.sidd_product_creation import create_detected_image_sidd, create_dynamic_image_sidd, create_csi_sidd from sarpy.processing.ortho_rectify import NearestNeighborMethod from tests import parse_file_entry try: from lxml import etree except ImportError: etree = None product_file_types = {} this_loc = os.path.abspath(__file__) file_reference = os.path.join(os.path.split(this_loc)[0], 'product_file_types.json') # specifies file locations if os.path.isfile(file_reference): with open(file_reference, 'r') as fi: the_files = json.load(fi) for the_type in the_files: valid_entries = [] for entry in the_files[the_type]: the_file = parse_file_entry(entry) if the_file is not None: valid_entries.append(the_file) product_file_types[the_type] = valid_entries sicd_files = product_file_types.get('SICD', []) def check_versus_schema(input_nitf, the_schema): reader = SIDDReader(input_nitf) sidd_bytes = reader.nitf_details.get_des_bytes(0) xml_doc = etree.fromstring(sidd_bytes) xml_schema = etree.XMLSchema(file=the_schema) return xml_schema.validate(xml_doc) class TestSIDDWriting(unittest.TestCase): @unittest.skipIf(len(sicd_files) == 0, 'No sicd files found') def test_sidd_creation(self): for fil in sicd_files: reader = SICDReader(fil) ortho_helper = NearestNeighborMethod(reader) # create a temp directory temp_directory = tempfile.mkdtemp() sidd_files = [] # create a basic sidd detected image with self.subTest(msg='Create version 1 detected image for file {}'.format(fil)): create_detected_image_sidd( ortho_helper, temp_directory, output_file='di_1.nitf', version=1) sidd_files.append('di_1.nitf') with self.subTest(msg='Create version 2 detected image for file {}'.format(fil)): create_detected_image_sidd( ortho_helper, temp_directory, output_file='di_2.nitf', version=2) sidd_files.append('di_2.nitf') # create a csi image with self.subTest(msg='Create version 1 csi for file {}'.format(fil)): create_csi_sidd( ortho_helper, temp_directory, output_file='csi_1.nitf', version=1) sidd_files.append('csi_1.nitf') with self.subTest(msg='Create version 2 csi for file {}'.format(fil)): create_csi_sidd( ortho_helper, temp_directory, output_file='csi_2.nitf', version=2) sidd_files.append('csi_2.nitf') # create a dynamic image with self.subTest(msg='Create version 1 subaperture stack for file {}'.format(fil)): create_dynamic_image_sidd( ortho_helper, temp_directory, output_file='sast_1.nitf', version=1, frame_count=3) sidd_files.append('sast_1.nitf') with self.subTest(msg='Create version 2 subaperture stack for file {}'.format(fil)): create_dynamic_image_sidd( ortho_helper, temp_directory, output_file='sast_2.nitf', version=2, frame_count=3) sidd_files.append('sast_2.nitf') # check that each sidd structure serialized according to the schema if etree is not None: for vers in [1, 2]: schema = get_schema_path('urn:SIDD:{}.0.0'.format(vers)) the_fil = 'di_{}.nitf'.format(vers) if the_fil in sidd_files: self.assertTrue( check_versus_schema(os.path.join(temp_directory, the_fil), schema), 'Detected image version {} structure not valid versus schema {}'.format(vers, schema)) the_fil = 'csi_{}.nitf'.format(vers) if the_fil in sidd_files: self.assertTrue( check_versus_schema(os.path.join(temp_directory, the_fil), schema), 'csi version {} structure not valid versus schema {}'.format(vers, schema)) the_fil = 'sast_{}.nitf'.format(vers) if the_fil in sidd_files: self.assertTrue( check_versus_schema(os.path.join(temp_directory, the_fil), schema), 'Dynamic image version {} structure not valid versus schema {}'.format(vers, schema)) # clean up the temporary directory shutil.rmtree(temp_directory)
2.375
2
src/westpa/tools/wipi.py
burntyellow/adelman_ci
0
12429
<reponame>burntyellow/adelman_ci import numpy as np import scipy.sparse as sp from westpa.tools import Plotter # A useful dataclass used as a wrapper for w_ipa to facilitate # ease-of-use in ipython/jupyter notebooks/sessions. # It basically just wraps up numpy arrays and dicts. class WIPIDataset(object): def __init__(self, raw, key): self.__dict__ = {} self.raw = raw self.name = key def __repr__(self): if isinstance(self.__dict__['raw'], dict): return repr(self.__dir__()) else: return repr(self.raw) def __getitem__(self, value): if not isinstance(value, str): return self.__dict__['raw'][value] if value in list(self.__dict__['raw'].keys()): return self.__dict__['raw'][value] elif value in list(self.__dict__.keys()): return self.__dict__[value] def __setitem__(self, key, value): self.__dict__[key] = value def __getattr__(self, value): # Check if it's an attribute of the underlying datatype. # If not, just use the getitem function. if value in dir(self.__dict__['raw']): return getattr(self.__dict__['raw'], value) else: return self.__getitem__(value) def __setattr__(self, key, value): self.__dict__[key] = value def __dir__(self): dict_keys = list(self.__dict__.keys()) remove = ['raw', 'name', '__dict__', 'plotter'] for i in remove: try: dict_keys.remove(str(i)) except: pass # We don't enforce that this is a dictionary. if isinstance(self.__dict__['raw'], dict): return sorted(set(list(self.raw.keys()) + dict_keys)) else: return sorted(set(dict_keys)) def keys(self): print(self.__dir__()) # We want to override the basic math functions, now, so... this is only valid for numpy sets. def __add__(self, other): return self.__dict__['raw'] + other def __radd__(self, other): return other + self.__dict__['raw'] def __sub__(self, other): return self.__dict__['raw'] - other def __rsub__(self, other): return other - self.__dict__['raw'] def __mul__(self, other): return self.__dict__['raw'] * other def __rmul__(self, other): return other * self.__dict__['raw'] def __div__(self, other): return self.__dict__['raw'] / other def __floordiv__(self, other): return self.__dict__['raw'] // other def __rdiv__(self, other): return other / self.__dict__['raw'] def __mod__(self, other): return self.__dict__['raw'] % other def __pow__(self, other): return self.__dict__['raw'] ** other def __lshift__(self, other): return self.__dict__['raw'] << other def __rshift__(self, other): return self.__dict__['raw'] >> other def __and__(self, other): return self.__dict__['raw'] & other def __eq__(self, other): return self.__dict__['raw'] == other def __ne__(self, other): return self.__dict__['raw'] != other def __lt__(self, other): return self.__dict__['raw'] < other def __gt__(self, other): return self.__dict__['raw'] > other def __le__(self, other): return self.__dict__['raw'] <= other def __ge__(self, other): return self.__dict__['raw'] >= other def __xor__(self, other): return self.__dict__['raw'] ^ other def __or__(self, other): return self.__dict__['raw'] | other #def __iadd__(self, other): # return self.__dict__['raw'] += other #def __isub__(self, other): # return self.__dict__['raw'] -= other #def __imul__(self, other): # return self.__dict__['raw'] *= other #def __idiv__(self, other): # return self.__dict__['raw'] /= other #def __ifloordiv__(self, other): # return self.__dict__['raw'] //= other #def __imod__(self, other): # return self.__dict__['raw'] %= other #def __ipow__(self, other): # return self.__dict__['raw'] **= other #def __ilshift__(self, other): # return self.__dict__['raw'] <<= other #def __irshift__(self, other): # return self.__dict__['raw'] >>= other #def __iand__(self, other): # return self.__dict__['raw'] &= other #def __ixor__(self, other): # return self.__dict__['raw'] ^= other #def __ior__(self, other): # return self.__dict__['raw'] |= other # Similar to the above, but slightly expanded to contain information from analysis files. class KineticsIteration(object): def __init__(self, kin_h5file, index, assign, iteration=-1): self.__dict__ = {} self.h5file = kin_h5file # Keys: _2D_h5keys = [ 'conditional_flux_evolution', 'rate_evolution' ] _1D_h5keys = [ 'state_pop_evolution', 'color_prob_evolution', 'target_flux_evolution' ] for key in _2D_h5keys: try: self.__dict__[key] = self.__2D_with_error__(key, index, assign) except: self.__dict__[key] = None for key in _1D_h5keys: try: self.__dict__[key] = self.__1D_with_error__(key, index, assign) except: self.__dict__[key] = None try: self.__dict__['total_fluxes'] = WIPIDataset(raw=np.array(self.h5file['total_fluxes']), key='total_fluxes') # We'll have to update this to make things better... #self.__dict__['total_fluxes'].plotter = Plotter(self.h5file['total_fluxes'][...], 'Total Fluxes', iteration=iteration, interface='text') #self.__dict__['total_fluxes'].plot = self.__dict__['total_fluxes'].plotter.plot except: pass def __repr__(self): return repr(self.__dir__()) def __getitem__(self, value): if value in list(self.__dict__.keys()): return self.__dict__[value] def __setitem__(self, key, value): self.__dict__[key] = value def __getattr__(self, value): if value in list(self.__dict__.keys()): return self.__dict__[value] def __setattr__(self, key, value): self.__dict__[key] = value def __dir__(self): dict_keys = list(self.__dict__.keys()) # We don't want to show the plotter class; just the plot function remove = [ 'h5file', '__dict__'] for i in remove: try: dict_keys.remove(str(i)) except: pass return sorted(set(dict_keys)) def keys(self): print(self.__dir__()) # We seriously need to rename this. # It's similar to the global WIPDataset, but has some nice pretty print functions. class __custom_dataset__(object): # This is just allow it to be indexed via properties. # Not a huge thing, but whatever. def __init__(self, raw, assign, key): self.__dict__ = {} self.raw = raw self.name = key self.assign = assign self.nstates = assign.attrs['nstates'] self.dim = len(raw.shape) def __repr__(self): return repr(self.__dir__()) def __getitem__(self, value): if value in self.__dict__['raw'].dtype.names: return self.__dict__['raw'][value] elif value in list(self.__dict__.keys()): return self.__dict__[value] def __setitem__(self, key, value): self.__dict__[key] = value def __getattr__(self, value): if value in self.__dict__['raw'].dtype.names: return self.__dict__['raw'][value] elif value in list(self.__dict__.keys()): return self.__dict__[value] def __setattr__(self, key, value): self.__dict__[key] = value def __dir__(self): dict_keys = list(self.__dict__.keys()) # We don't want to show the plotter class; just the plot function remove = ['assign', 'dim', 'nstates', 'plotter', '__dict__'] for i in remove: try: dict_keys.remove(str(i)) except: pass return sorted(set(list(self.raw.dtype.names) + dict_keys)) def keys(self): print(self.__dir__()) def _repr_pretty_(self, p, cycle): if self.dim == 1: return self._1D_repr_pretty_(p, cycle) if self.dim == 2: return self._2D_repr_pretty_(p, cycle) def _1D_repr_pretty_(self, p, cycle): # We're just using this as a way to print things in a pretty way. They can still be indexed appropriately. # Stolen shamelessly from westtools/kinetics_tool.py maxlabellen = max(list(map(len,self.assign['state_labels']))) p.text('') p.text('{name} data:\n'.format(name=self.name)) for istate in range(self.nstates): p.text('{:{maxlabellen}s}: mean={:21.15e} CI=({:21.15e}, {:21.15e}) * tau^-1\n' .format(self.assign['state_labels'][istate], self.raw['expected'][istate], self.raw['ci_lbound'][istate], self.raw['ci_ubound'][istate], maxlabellen=maxlabellen)) p.text('To access data, index via the following names:\n') p.text(str(self.__dir__())) return " " def _2D_repr_pretty_(self, p, cycle): # We're just using this as a way to print things in a pretty way. They can still be indexed appropriately. # Stolen shamelessly from westtools/kinetics_tool.py maxlabellen = max(list(map(len,self.assign['state_labels']))) p.text('') p.text('{name} data:\n'.format(name=self.name)) for istate in range(self.nstates): for jstate in range(self.nstates): if istate == jstate: continue p.text('{:{maxlabellen}s} -> {:{maxlabellen}s}: mean={:21.15e} CI=({:21.15e}, {:21.15e}) * tau^-1\n' .format(self.assign['state_labels'][istate], self.assign['state_labels'][jstate], self.raw['expected'][istate, jstate], self.raw['ci_lbound'][istate, jstate], self.raw['ci_ubound'][istate, jstate], maxlabellen=maxlabellen)) p.text('To access data, index via the following names:\n') p.text(str(self.__dir__())) return " " def __2D_with_error__(self, h5key, index, assign): # Check the start and stop, calculate the block size, and index appropriately. # While we could try and automatically generate this above, it's a little more consistent to try it here. # This should show the first block for which the current iteration has contributed data. self.step_iter = (self.h5file[h5key]['iter_stop'][0] - self.h5file[h5key]['iter_start'][0])[1,0] value = ((index-self.h5file.attrs['iter_start']) // self.step_iter) if value < 0: value = 0 raw = self.h5file[h5key][value, :, :] error = (raw['ci_ubound'] - raw['ci_lbound']) / (2*raw['expected']) expected = raw['expected'] raw = self.__custom_dataset__(raw, assign, h5key) raw.error = error raw.plotter = Plotter(self.h5file, h5key, iteration=value, interface='text') raw.plot = raw.plotter.plot return raw def __1D_with_error__(self, h5key, index, assign): self.step_iter = (self.h5file[h5key]['iter_stop'][0] - self.h5file[h5key]['iter_start'][0])[1] value = ((index-self.h5file.attrs['iter_start']) // self.step_iter) if value < 0: value = 0 raw = self.h5file[h5key][value, :] error = (raw['ci_ubound'] - raw['ci_lbound']) / (2*raw['expected']) expected = raw['expected'] raw = self.__custom_dataset__(raw, assign, h5key) raw.error = error raw.plotter = Plotter(self.h5file, h5key, iteration=value, interface='text') raw.plot = raw.plotter.plot return raw class __get_data_for_iteration__(object): ''' All interesting data from an iteration (current/past). Whenever you change the scheme or iteration, this dictionary is automatically updated. For the current iteration, it's keyed to the current seg_id. For the past iteration, it's keyed to the seg_id in the CURRENT iteration such that: w.current[X] & w.past[X] returns information about seg_id X in the current iteration and information on seg_ID X's PARENT in the preceding iteration. Can be indexed via a seg_id, or like a dictionary with the following keys: kinavg, weights, pcoord, auxdata (optional), parents, summary, seg_id, walkers, states, bins kinavg, states, and bins refer to the output from w_kinavg and w_assign for this iteration and analysis scheme. They are NOT dynamics bins, but the bins defined in west.cfg. Has the following properties: .minweight, .maxweight which return all properties of the segment that matches those criteria in the selected iteration. If you change the analysis scheme, so, too, will the important values. ''' def __init__(self, parent, value, seg_ids = None): ''' Initializes and sets the correct data. ''' # We've classed this so that we can override some of the normal functions and allow indexing via seg_id self.__dict__ = {} # Is this function thread safe? iter_group = parent.data_reader.get_iter_group(value) #iter_group = parent.west['iterations/iter_{num:08d}'.format(num=value)] self.parent = parent current = {} current['iteration'] = value if seg_ids is None: seg_ids = range(0, iter_group['seg_index']['weight'].shape[0]) # Just make these easier to access. current['weights'] = iter_group['seg_index']['weight'][seg_ids] current['pcoord'] = iter_group['pcoord'][...][seg_ids, :, :] try: current['auxdata'] = {} for key in list(iter_group['auxdata'].keys()): current['auxdata'][key] = iter_group['auxdata'][key][...][seg_ids, :] except: pass current['parents'] = iter_group['seg_index']['parent_id'][seg_ids] current['summary'] = parent.data_reader.data_manager.get_iter_summary(int(value)) current['seg_id'] = np.array(list(range(0, iter_group['seg_index'].shape[0])))[seg_ids] current['walkers'] = current['summary']['n_particles'] current['states'] = parent.assign['trajlabels'][value-1, :current['walkers'], :][seg_ids] current['bins'] = parent.assign['assignments'][value-1, :current['walkers'], :][seg_ids] # Calculates the bin population for this iteration. nbins = parent.assign['state_map'].shape[0] # We have to take the 'unknown' state into account nstates = parent.assign['state_labels'].shape[0] + 1 # Temporarily disabled while I sort out the fact that we shouldn't be using data from w_assign for state populations. #current['plot'] = Plotter(parent.direct, parent.reweight, parent.iteration, parent.assign['bin_labels'], parent.assign['state_labels'], current['populations'].states, current['populations'].bins, parent.interface) # Now we'll load up the results of the kinetics analysis. current['direct'] = KineticsIteration(parent.direct, value, parent.assign, value) evolution_datasets = [ 'rate_evolution', 'conditional_flux_evolution', 'state_pop_evolution', 'color_prob_evolution' , 'total_fluxes', 'target_flux_evolution'] # We want to load these up as... oh, who knows, I suppose? try: current['reweight'] = KineticsIteration(parent.reweight, value, parent.assign, value) # We'll make this not a sparse matrix... matrix = parent.reweight['iterations/iter_{:08d}'.format(value)] # Assume color. current['instant_matrix'] = sp.coo_matrix((matrix['flux'][...], (matrix['rows'][...], matrix['cols'][...])), shape=((nbins-1)*2, (nbins-1)*2)).todense() reweighting = True except: # This analysis hasn't been enabled, so we'll simply return the default error message. current['reweight'] = parent.reweight['rate_evolution'] current['instant_matrix'] = parent.reweight['bin_populations'] current['matrix'] = parent.reweight['bin_populations'] reweighting = False # Check if the analysis has been enabled. If yes, make them specify dataset dictionaries. If not, return the thing. if reweighting: for key in evolution_datasets: current[key] = WIPIDataset(raw={ 'direct': current['direct'][key], 'reweight': current['reweight'][key] }, key='a') else: for key in evolution_datasets: current[key] = WIPIDataset(raw={ 'direct': current['direct'][key] }, key='direct') self.raw = current def __repr__(self): ''' Returns the dictionary containing the iteration's values. ''' return repr(self.__dir__()) def keys(self): ''' Returns the keys function of the internal dictionary. ''' return list(self.__dict__['raw'].keys()) def __setitem__(self, key, value): self.__dict__[key] = value def __getattr__(self, value): if value in list(self.__dict__['raw'].keys()): return self.__dict__['raw'][value] elif value in list(self.__dict__.keys()): return self.__dict__[value] def __setattr__(self, key, value): self.__dict__[key] = value def __dir__(self): dict_keys = list(self.__dict__.keys()) dict_keys += ['maxweight', 'minweight', 'walkers', 'aggregate_walkers', 'successful_trajectories'] remove = ['__dict__'] for i in remove: try: dict_keys.remove(str(i)) except: pass return sorted(set(list(self.__dict__['raw'].keys()) + dict_keys)) @property def maxweight(self): ''' Returns information about the segment which has the largest weight for this iteration. ''' # Is there a faster or cleaner way to do this? Ah, maybe. walker = np.where(self.raw['weights'] == np.max(self.raw['weights']))[0][0] return self.__getitem__(walker) @property def minweight(self): ''' Returns information about the segment which has the smallest weight for this iteration. ''' walker = np.where(self.raw['weights'] == np.min(self.raw['weights']))[0][0] return self.__getitem__(walker) @property def successful_trajectories(self): ''' Returns which trajectories are successful. ''' #walker = np.where(self.raw['weights'] == np.min(self.raw['weights']))[0][0] # Find where we have a transition.... state_changes = np.where(self.raw['states'][:,:-1] != self.raw['states'][:,1:]) walkers = state_changes[0] # The index of the state change. new_states = state_changes[1] + 1 old_states = state_changes[1] walker = {} for z, (i, j) in enumerate(zip(old_states, new_states)): #if self.raw['states'][walkers[z], i] == istate and self.raw['states'][walkers[z], j] == jstate: istate = self.raw['states'][walkers[z], i] jstate = self.raw['states'][walkers[z], j] #print(z,i,j, istate, jstate) try: walker[istate,jstate].append(walkers[z]) except: walker[istate,jstate] = [walkers[z]] walker = WIPIDataset(raw=walker, key=None) return walker @property def walkers(self): ''' The number of walkers active in the current iteration. ''' # Returns number of walkers for iteration X. Assumes current iteration, but can go with different one. # Make this just... yeah, put this elsewhere. return self.parent.west['summary']['n_particles'][self.iteration-1] @property def aggregate_walkers(self): return self.parent.west['summary']['n_particles'][:self.iteration].sum() def __getitem__(self, value): ''' Responsible for handling whether this is treated like a dictionary of data sets, or an array of walker data. ''' # Check to see if we're indexing via any of the active string types. We should probably break it down via string or int, instead of 'what exists and what doesn't', but it works for now. active_items = ['kinavg', 'statepops', 'weights', 'pcoord', 'auxdata', 'parents', 'summary', 'seg_id', 'walkers', 'states', 'bins', 'populations', 'plot', 'instant_matrix', 'kinrw', 'matrix', 'rwstatepops'] #if value in active_items: if isinstance(value, str): # This should handle everything. Otherwise... try: return self.raw[value] except: print('{} is not a valid data structure.'.format(value)) elif isinstance(value, int) or isinstance(value, np.int64): # Otherwise, we assume they're trying to index for a seg_id. if value < self.walkers: current = {} current['plotter'] = {} for i in ['pcoord']: current[i] = WIPIDataset(raw=self.raw[i][value,:,:], key=i) current[i].plotter = Plotter(self.raw[i][value,:,:], i, iteration=self.iteration, interface='text') current[i].plot = current[i].plotter.plot current['states'] = self.raw['states'][value, :] current['bins'] = self.raw['bins'][value, :] current['parents'] = self.raw['parents'][value] current['seg_id'] = self.raw['seg_id'][value] current['weights'] = self.raw['weights'][value] try: current['auxdata'] = {} for key in list(self.raw['auxdata'].keys()): current['auxdata'][key] = self.raw['auxdata'][key][value] except: pass current = WIPIDataset(current, 'Segment {} in Iter {}'.format(value, self.iteration)) return current else: print('INVALID SEG_ID {}. SEG_ID should be less than {}.'.format(value, self.walkers)) # This handles the 'schemes', and all assorted data. class WIPIScheme(object): def __init__(self, scheme, name, parent, settings): self.__dict__ = {} self.raw = scheme #self.name = parent._schemename self.__analysis_schemes__ = scheme self.iteration = parent.iteration self.__dict__['name'] = None self.__settings = settings # Are these necessary? We'll try to edit these out. self.parent = parent self.data_reader = parent.data_reader def __setattr__(self, key, value): self.__dict__[key] = value def __repr__(self): return self.__str__() def __str__(self): # Right now, this returns w.scheme, NOT necessarily what we're pulling from... # So you can rely on this, but it's confusing. if self.name!= None: # Set it to None, then return the original value. rtn_string = self.name self.name = None return rtn_string else: return str(self.scheme) def __getitem__(self, value): if not isinstance(value, str): for ischeme, schemename in enumerate(self.__dict__['raw'].keys()): if ischeme == value: value = schemename # Check for some weird Ipython stuff. if '_ipython' in value: return self self.name = None if value in list(self.__dict__['raw'].keys()): # If we have it in there... self.name = value return self elif value in list(self.__dict__.keys()): self.name = value return self elif value in self.__dir__(): self.name = value return self def __getattr__(self, value): return self.__getitem__(value) def __dir__(self): dict_keys = ['assign', 'direct', 'state_labels', 'bin_labels', 'west', 'reweight', 'current', 'past', 'iteration'] if self.name != None: return sorted(set(dict_keys)) else: return sorted(set(self.__analysis_schemes__.keys())) @property def scheme(self): self.name = None return self.parent._schemename @property def list_schemes(self): ''' Lists what schemes are configured in west.cfg file. Schemes should be structured as follows, in west.cfg: west: system: analysis: directory: analysis analysis_schemes: scheme.1: enabled: True states: - label: unbound coords: [[7.0]] - label: bound coords: [[2.7]] bins: - type: RectilinearBinMapper boundaries: [[0.0, 2.80, 7, 10000]] ''' print("The following schemes are available:") print("") for ischeme, scheme in enumerate(self.__settings['analysis_schemes']): print('{}. Scheme: {}'.format(ischeme, scheme)) print("") print("Set via name, or via the index listed.") print("") print("Current scheme: {}".format(self.scheme)) @property def iteration(self): return self.parent.iteration @property def assign(self): return self.__analysis_schemes__[str(self.name)]['assign'] @property def direct(self): ''' The output from w_direct.py from the current scheme. ''' return self.__analysis_schemes__[str(self.name)]['direct'] @property def state_labels(self): print("State labels and definitions!") for istate, state in enumerate(self.assign['state_labels']): print('{}: {}'.format(istate, state)) print('{}: {}'.format(istate+1, 'Unknown')) @property def bin_labels(self): print("Bin definitions! ") for istate, state in enumerate(self.assign['bin_labels']): print('{}: {}'.format(istate, state)) @property def west(self): return self.data_reader.data_manager.we_h5file @property def reweight(self): # Need to fix this... if self.__settings['analysis_schemes'][str(self.name)]['postanalysis'] == True: return self.__analysis_schemes__[str(self.name)]['reweight'] else: value = "This sort of analysis has not been enabled." current = { 'bin_prob_evolution': value, 'color_prob_evolution': value, 'conditional_flux_evolution': value, 'rate_evolution': value, 'state_labels': value, 'state_prob_evolution': value } current.update({ 'bin_populations': value, 'iterations': value }) return current @property def current(self): ''' The current iteration. See help for __get_data_for_iteration__ ''' return __get_data_for_iteration__(value=self.iteration, parent=self) @property def past(self): ''' The previous iteration. See help for __get_data_for_iteration__ ''' if self.iteration > 1: return __get_data_for_iteration__(value=self.iteration - 1, seg_ids=self.current['parents'], parent=self) else: print("The current iteration is 1; there is no past.")
2.671875
3
src/M5_random_module.py
posguy99/comp660-fall2020
0
12430
import random # use of the random module print(random.random()) # a float value >= 0.0 and < 1.0 print(random.random()*100) # a float value >= 0.0 and < 100.0 # use of the randint method print(random.randint(1, 100)) # an int from 1 to 100 print(random.randint(101, 200)) # an int from 101 to 200 print(random.randint(0, 7)) # an int from 0 7 die1 = random.randint(1, 6) die2 = random.randint(1, 6) print("Your roll: ", die1, die2) print(random.randrange(1, 100)) # an int from 1 to 99 print(random.randrange(100, 200, 2)) # an even int from 100 to 198 print(random.randrange(11, 250, 2)) # an odd int from 11 to 249
3.796875
4
lnt/graphics/styles.py
flotwig/lnt
7
12431
from PyInquirer import style_from_dict, Token, prompt, Separator from lnt.graphics.utils import vars_to_string # Mark styles prompt_style = style_from_dict({ Token.Separator: '#6C6C6C', Token.QuestionMark: '#FF9D00 bold', #Token.Selected: '', # default Token.Selected: '#5F819D', Token.Pointer: '#FF9D00 bold', Token.Instruction: '', # default Token.Answer: '#5F819D bold', Token.Question: '', }) # Mark prompt configurations def get_channel_choice_from(channels): choices = [ {'name' : vars_to_string(c_id, c['local_balance'], c['remote_balance'], nick=None) } for c_id, c in channels.items() ] validate = lambda answer: 'You must choose at least one channel' if len(answer) == 0 else True return { "type" : "checkbox", "qmark": "⚡️", "message" : "CHOOSE FROM nick, channel id, local_balance, remote_balace, graphic", "name" : "channel_choices_from", "choices" : choices, "validate" : validate, } def get_channel_choice_to(channels): choices = [ {'name' : vars_to_string(c_id, c['local_balance'], c['remote_balance'], nick=None) } for c_id, c in channels.items() ] return { 'type': 'list', 'message': 'CHOOSE TO nick, channel id, local_balance, remote_balace, graphic', "name" : "channel_choices_to", 'choices': choices }
2.9375
3
dvrip.py
jackkum/python-dvr
149
12432
<filename>dvrip.py import os import struct import json from time import sleep import hashlib import threading from socket import socket, AF_INET, SOCK_STREAM, SOCK_DGRAM from datetime import * from re import compile import time import logging class SomethingIsWrongWithCamera(Exception): pass class DVRIPCam(object): DATE_FORMAT = "%Y-%m-%d %H:%M:%S" CODES = { 100: "OK", 101: "Unknown error", 102: "Unsupported version", 103: "Request not permitted", 104: "User already logged in", 105: "User is not logged in", 106: "Username or password is incorrect", 107: "User does not have necessary permissions", 203: "Password is incorrect", 511: "Start of upgrade", 512: "Upgrade was not started", 513: "Upgrade data errors", 514: "Upgrade error", 515: "Upgrade successful", } QCODES = { "AuthorityList": 1470, "Users": 1472, "Groups": 1474, "AddGroup": 1476, "ModifyGroup": 1478, "DelGroup": 1480, "AddUser": 1482, "ModifyUser": 1484, "DelUser": 1486, "ModifyPassword": <PASSWORD>, "AlarmInfo": 1504, "AlarmSet": 1500, "ChannelTitle": 1046, "EncodeCapability": 1360, "General": 1042, "KeepAlive": 1006, "OPMachine": 1450, "OPMailTest": 1636, "OPMonitor": 1413, "OPNetKeyboard": 1550, "OPPTZControl": 1400, "OPSNAP": 1560, "OPSendFile": 0x5F2, "OPSystemUpgrade": 0x5F5, "OPTalk": 1434, "OPTimeQuery": 1452, "OPTimeSetting": 1450, "NetWork.NetCommon": 1042, "OPNetAlarm": 1506, "SystemFunction": 1360, "SystemInfo": 1020, } KEY_CODES = { "M": "Menu", "I": "Info", "E": "Esc", "F": "Func", "S": "Shift", "L": "Left", "U": "Up", "R": "Right", "D": "Down", } OK_CODES = [100, 515] PORTS = { "tcp": 34567, "udp": 34568, } def __init__(self, ip, **kwargs): self.logger = logging.getLogger(__name__) self.ip = ip self.user = kwargs.get("user", "admin") hash_pass = kwargs.get("hash_pass") self.hash_pass = kwargs.get("hash_pass", self.sofia_hash(kwargs.get("password", ""))) self.proto = kwargs.get("proto", "tcp") self.port = kwargs.get("port", self.PORTS.get(self.proto)) self.socket = None self.packet_count = 0 self.session = 0 self.alive_time = 20 self.alive = None self.alarm = None self.alarm_func = None self.busy = threading.Condition() def debug(self, format=None): self.logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() if format: formatter = logging.Formatter(format) ch.setFormatter(formatter) self.logger.addHandler(ch) def connect(self, timeout=10): try: if self.proto == "tcp": self.socket_send = self.tcp_socket_send self.socket_recv = self.tcp_socket_recv self.socket = socket(AF_INET, SOCK_STREAM) self.socket.connect((self.ip, self.port)) elif self.proto == "udp": self.socket_send = self.udp_socket_send self.socket_recv = self.udp_socket_recv self.socket = socket(AF_INET, SOCK_DGRAM) else: raise f"Unsupported protocol {self.proto}" # it's important to extend timeout for upgrade procedure self.timeout = timeout self.socket.settimeout(timeout) except OSError: raise SomethingIsWrongWithCamera('Cannot connect to camera') def close(self): try: self.alive.cancel() self.socket.close() except: pass self.socket = None def udp_socket_send(self, bytes): return self.socket.sendto(bytes, (self.ip, self.port)) def udp_socket_recv(self, bytes): data, _ = self.socket.recvfrom(bytes) return data def tcp_socket_send(self, bytes): try: return self.socket.sendall(bytes) except: return None def tcp_socket_recv(self, bufsize): try: return self.socket.recv(bufsize) except: return None def receive_with_timeout(self, length): received = 0 buf = bytearray() start_time = time.time() while True: data = self.socket_recv(length - received) buf.extend(data) received += len(data) if length == received: break elapsed_time = time.time() - start_time if elapsed_time > self.timeout: return None return buf def receive_json(self, length): data = self.receive_with_timeout(length) if data is None: return {} self.packet_count += 1 self.logger.debug("<= %s", data) reply = json.loads(data[:-2]) return reply def send(self, msg, data={}, wait_response=True): if self.socket is None: return {"Ret": 101} # self.busy.wait() self.busy.acquire() if hasattr(data, "__iter__"): data = bytes(json.dumps(data, ensure_ascii=False), "utf-8") pkt = ( struct.pack( "BB2xII2xHI", 255, 0, self.session, self.packet_count, msg, len(data) + 2, ) + data + b"\x0a\x00" ) self.logger.debug("=> %s", pkt) self.socket_send(pkt) if wait_response: reply = {"Ret": 101} data = self.socket_recv(20) if data is None or len(data) < 20: return None ( head, version, self.session, sequence_number, msgid, len_data, ) = struct.unpack("BB2xII2xHI", data) reply = self.receive_json(len_data) self.busy.release() return reply def sofia_hash(self, password=""): md5 = hashlib.md5(bytes(password, "utf-8")).digest() chars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" return "".join([chars[sum(x) % 62] for x in zip(md5[::2], md5[1::2])]) def login(self): if self.socket is None: self.connect() data = self.send( 1000, { "EncryptType": "MD5", "LoginType": "DVRIP-Web", "PassWord": <PASSWORD>, "UserName": self.user, }, ) if data is None or data["Ret"] not in self.OK_CODES: return False self.session = int(data["SessionID"], 16) self.alive_time = data["AliveInterval"] self.keep_alive() return data["Ret"] in self.OK_CODES def getAuthorityList(self): data = self.send(self.QCODES["AuthorityList"]) if data["Ret"] in self.OK_CODES: return data["AuthorityList"] else: return [] def getGroups(self): data = self.send(self.QCODES["Groups"]) if data["Ret"] in self.OK_CODES: return data["Groups"] else: return [] def addGroup(self, name, comment="", auth=None): data = self.set_command( "AddGroup", { "Group": { "AuthorityList": auth or self.getAuthorityList(), "Memo": comment, "Name": name, }, }, ) return data["Ret"] in self.OK_CODES def modifyGroup(self, name, newname=None, comment=None, auth=None): g = [x for x in self.getGroups() if x["Name"] == name] if g == []: print(f'Group "{name}" not found!') return False g = g[0] data = self.send( self.QCODES["ModifyGroup"], { "Group": { "AuthorityList": auth or g["AuthorityList"], "Memo": comment or g["Memo"], "Name": newname or g["Name"], }, "GroupName": name, }, ) return data["Ret"] in self.OK_CODES def delGroup(self, name): data = self.send( self.QCODES["DelGroup"], {"Name": name, "SessionID": "0x%08X" % self.session,}, ) return data["Ret"] in self.OK_CODES def getUsers(self): data = self.send(self.QCODES["Users"]) if data["Ret"] in self.OK_CODES: return data["Users"] else: return [] def addUser( self, name, password, comment="", group="user", auth=None, sharable=True ): g = [x for x in self.getGroups() if x["Name"] == group] if g == []: print(f'Group "{group}" not found!') return False g = g[0] data = self.set_command( "AddUser", { "User": { "AuthorityList": auth or g["AuthorityList"], "Group": g["Name"], "Memo": comment, "Name": name, "Password": self.sofia_hash(password), "Reserved": False, "Sharable": sharable, }, }, ) return data["Ret"] in self.OK_CODES def modifyUser( self, name, newname=None, comment=None, group=None, auth=None, sharable=None ): u = [x for x in self.getUsers() if x["Name"] == name] if u == []: print(f'User "{name}" not found!') return False u = u[0] if group: g = [x for x in self.getGroups() if x["Name"] == group] if g == []: print(f'Group "{group}" not found!') return False u["AuthorityList"] = g[0]["AuthorityList"] data = self.send( self.QCODES["ModifyUser"], { "User": { "AuthorityList": auth or u["AuthorityList"], "Group": group or u["Group"], "Memo": comment or u["Memo"], "Name": newname or u["Name"], "Password": "", "Reserved": u["Reserved"], "Sharable": sharable or u["Sharable"], }, "UserName": name, }, ) return data["Ret"] in self.OK_CODES def delUser(self, name): data = self.send( self.QCODES["DelUser"], {"Name": name, "SessionID": "0x%08X" % self.session,}, ) return data["Ret"] in self.OK_CODES def changePasswd(self, newpass="", oldpass=None, user=None): data = self.send( self.QCODES["ModifyPassword"], { "EncryptType": "MD5", "NewPassWord": self.sofia_hash(newpass), "PassWord": oldpass or <PASSWORD>, "SessionID": "0x%08X" % self.session, "UserName": user or self.user, }, ) return data["Ret"] in self.OK_CODES def channel_title(self, titles): if isinstance(titles, str): titles = [titles] self.send( self.QCODES["ChannelTitle"], { "ChannelTitle": titles, "Name": "ChannelTitle", "SessionID": "0x%08X" % self.session, }, ) def channel_bitmap(self, width, height, bitmap): header = struct.pack("HH12x", width, height) self.socket_send( struct.pack( "BB2xII2xHI", 255, 0, self.session, self.packet_count, 0x041A, len(bitmap) + 16, ) + header + bitmap ) reply, rcvd = self.recv_json() if reply and reply["Ret"] != 100: return False return True def reboot(self): self.set_command("OPMachine", {"Action": "Reboot"}) self.close() def setAlarm(self, func): self.alarm_func = func def clearAlarm(self): self.alarm_func = None def alarmStart(self): self.alarm = threading.Thread( name="DVRAlarm%08X" % self.session, target=self.alarm_thread, args=[self.busy], ) self.alarm.start() return self.get_command("", self.QCODES["AlarmSet"]) def alarm_thread(self, event): while True: event.acquire() try: ( head, version, session, sequence_number, msgid, len_data, ) = struct.unpack("BB2xII2xHI", self.socket_recv(20)) sleep(0.1) # Just for receive whole packet reply = self.socket_recv(len_data) self.packet_count += 1 reply = json.loads(reply[:-2]) if msgid == self.QCODES["AlarmInfo"] and self.session == session: if self.alarm_func is not None: self.alarm_func(reply[reply["Name"]], sequence_number) except: pass finally: event.release() if self.socket is None: break def set_remote_alarm(self, state): self.set_command( "OPNetAlarm", {"Event": 0, "State": state}, ) def keep_alive(self): ret = self.send( self.QCODES["KeepAlive"], {"Name": "KeepAlive", "SessionID": "0x%08X" % self.session}, ) if ret is None: self.close() return self.alive = threading.Timer(self.alive_time, self.keep_alive) self.alive.daemon = True self.alive.start() def keyDown(self, key): self.set_command( "OPNetKeyboard", {"Status": "KeyDown", "Value": key}, ) def keyUp(self, key): self.set_command( "OPNetKeyboard", {"Status": "KeyUp", "Value": key}, ) def keyPress(self, key): self.keyDown(key) sleep(0.3) self.keyUp(key) def keyScript(self, keys): for k in keys: if k != " " and k.upper() in self.KEY_CODES: self.keyPress(self.KEY_CODES[k.upper()]) else: sleep(1) def ptz(self, cmd, step=5, preset=-1, ch=0): CMDS = [ "DirectionUp", "DirectionDown", "DirectionLeft", "DirectionRight", "DirectionLeftUp", "DirectionLeftDown", "DirectionRightUp", "DirectionRightDown", "ZoomTile", "ZoomWide", "FocusNear", "FocusFar", "IrisSmall", "IrisLarge", "SetPreset", "GotoPreset", "ClearPreset", "StartTour", "StopTour", ] # ptz_param = { "AUX" : { "Number" : 0, "Status" : "On" }, "Channel" : ch, "MenuOpts" : "Enter", "POINT" : { "bottom" : 0, "left" : 0, "right" : 0, "top" : 0 }, "Pattern" : "SetBegin", "Preset" : -1, "Step" : 5, "Tour" : 0 } ptz_param = { "AUX": {"Number": 0, "Status": "On"}, "Channel": ch, "MenuOpts": "Enter", "Pattern": "Start", "Preset": preset, "Step": step, "Tour": 1 if "Tour" in cmd else 0, } return self.set_command( "OPPTZControl", {"Command": cmd, "Parameter": ptz_param}, ) def set_info(self, command, data): return self.set_command(command, data, 1040) def set_command(self, command, data, code=None): if not code: code = self.QCODES[command] return self.send( code, {"Name": command, "SessionID": "0x%08X" % self.session, command: data} ) def get_info(self, command): return self.get_command(command, 1042) def get_command(self, command, code=None): if not code: code = self.QCODES[command] data = self.send(code, {"Name": command, "SessionID": "0x%08X" % self.session}) if data["Ret"] in self.OK_CODES and command in data: return data[command] else: return data def get_time(self): return datetime.strptime(self.get_command("OPTimeQuery"), self.DATE_FORMAT) def set_time(self, time=None): if time is None: time = datetime.now() return self.set_command("OPTimeSetting", time.strftime(self.DATE_FORMAT)) def get_netcommon(self): return self.get_command("NetWork.NetCommon") def get_system_info(self): return self.get_command("SystemInfo") def get_general_info(self): return self.get_command("General") def get_encode_capabilities(self): return self.get_command("EncodeCapability") def get_system_capabilities(self): return self.get_command("SystemFunction") def get_camera_info(self, default_config=False): """Request data for 'Camera' from the target DVRIP device.""" if default_config: code = 1044 else: code = 1042 return self.get_command("Camera", code) def get_encode_info(self, default_config=False): """Request data for 'Simplify.Encode' from the target DVRIP device. Arguments: default_config -- returns the default values for the type if True """ if default_config: code = 1044 else: code = 1042 return self.get_command("Simplify.Encode", code) def recv_json(self, buf=bytearray()): p = compile(b".*({.*})") packet = self.socket_recv(0xFFFF) if not packet: return None, buf buf.extend(packet) m = p.search(buf) if m is None: return None, buf buf = buf[m.span(1)[1] :] return json.loads(m.group(1)), buf def get_upgrade_info(self): return self.get_command("OPSystemUpgrade") def upgrade(self, filename="", packetsize=0x8000, vprint=None): if not vprint: vprint = lambda x: print(x) data = self.set_command( "OPSystemUpgrade", {"Action": "Start", "Type": "System"}, 0x5F0 ) if data["Ret"] not in self.OK_CODES: return data vprint("Ready to upgrade") blocknum = 0 sentbytes = 0 fsize = os.stat(filename).st_size rcvd = bytearray() with open(filename, "rb") as f: while True: bytes = f.read(packetsize) if not bytes: break header = struct.pack( "BB2xII2xHI", 255, 0, self.session, blocknum, 0x5F2, len(bytes) ) self.socket_send(header + bytes) blocknum += 1 sentbytes += len(bytes) reply, rcvd = self.recv_json(rcvd) if reply and reply["Ret"] != 100: vprint("Upgrade failed") return reply progress = sentbytes / fsize * 100 vprint(f"Uploaded {progress:.2f}%") vprint("End of file") pkt = struct.pack("BB2xIIxBHI", 255, 0, self.session, blocknum, 1, 0x05F2, 0) self.socket_send(pkt) vprint("Waiting for upgrade...") while True: reply, rcvd = self.recv_json(rcvd) print(reply) if not reply: return if reply["Name"] == "" and reply["Ret"] == 100: break while True: data, rcvd = self.recv_json(rcvd) print(reply) if data is None: vprint("Done") return if data["Ret"] in [512, 514, 513]: vprint("Upgrade failed") return data if data["Ret"] == 515: vprint("Upgrade successful") self.socket.close() return data vprint(f"Upgraded {data['Ret']}%") def reassemble_bin_payload(self, metadata={}): def internal_to_type(data_type, value): if data_type == 0x1FC or data_type == 0x1FD: if value == 1: return "mpeg4" elif value == 2: return "h264" elif value == 3: return "h265" elif data_type == 0x1F9: if value == 1 or value == 6: return "info" elif data_type == 0x1FA: if value == 0xE: return "g711a" elif data_type == 0x1FE and value == 0: return "jpeg" return None def internal_to_datetime(value): second = value & 0x3F minute = (value & 0xFC0) >> 6 hour = (value & 0x1F000) >> 12 day = (value & 0x3E0000) >> 17 month = (value & 0x3C00000) >> 22 year = ((value & 0xFC000000) >> 26) + 2000 return datetime(year, month, day, hour, minute, second) length = 0 buf = bytearray() start_time = time.time() while True: data = self.receive_with_timeout(20) ( head, version, session, sequence_number, total, cur, msgid, len_data, ) = struct.unpack("BB2xIIBBHI", data) packet = self.receive_with_timeout(len_data) frame_len = 0 if length == 0: media = None frame_len = 8 (data_type,) = struct.unpack(">I", packet[:4]) if data_type == 0x1FC or data_type == 0x1FE: frame_len = 16 (media, metadata["fps"], w, h, dt, length,) = struct.unpack( "BBBBII", packet[4:frame_len] ) metadata["width"] = w * 8 metadata["height"] = h * 8 metadata["datetime"] = internal_to_datetime(dt) if data_type == 0x1FC: metadata["frame"] = "I" elif data_type == 0x1FD: (length,) = struct.unpack("I", packet[4:frame_len]) metadata["frame"] = "P" elif data_type == 0x1FA: (media, samp_rate, length) = struct.unpack( "BBH", packet[4:frame_len] ) elif data_type == 0x1F9: (media, n, length) = struct.unpack("BBH", packet[4:frame_len]) # special case of JPEG shapshots elif data_type == 0xFFD8FFE0: return packet else: raise ValueError(data_type) if media is not None: metadata["type"] = internal_to_type(data_type, media) buf.extend(packet[frame_len:]) length -= len(packet) - frame_len if length == 0: return buf elapsed_time = time.time() - start_time if elapsed_time > self.timeout: return None def snapshot(self, channel=0): command = "OPSNAP" self.send( self.QCODES[command], { "Name": command, "SessionID": "0x%08X" % self.session, command: {"Channel": channel}, }, wait_response=False, ) packet = self.reassemble_bin_payload() return packet def start_monitor(self, frame_callback, user={}, stream="Main"): params = { "Channel": 0, "CombinMode": "NONE", "StreamType": stream, "TransMode": "TCP", } data = self.set_command("OPMonitor", {"Action": "Claim", "Parameter": params}) if data["Ret"] not in self.OK_CODES: return data self.send( 1410, { "Name": "OPMonitor", "SessionID": "0x%08X" % self.session, "OPMonitor": {"Action": "Start", "Parameter": params}, }, wait_response=False, ) self.monitoring = True while self.monitoring: meta = {} frame = self.reassemble_bin_payload(meta) frame_callback(frame, meta, user) def stop_monitor(self): self.monitoring = False
2.3125
2
tests/test_renderer.py
0xflotus/maildown
626
12433
<filename>tests/test_renderer.py import mock from maildown import renderer import mistune import pygments from pygments import lexers from pygments.formatters import html import premailer import jinja2 def test_highlight_renderer(monkeypatch): monkeypatch.setattr(mistune, "escape", mock.MagicMock()) monkeypatch.setattr(lexers, "get_lexer_by_name", mock.MagicMock()) monkeypatch.setattr(html, "HtmlFormatter", mock.MagicMock()) monkeypatch.setattr(pygments, "highlight", mock.MagicMock()) lexers.get_lexer_by_name.return_value = True html.HtmlFormatter.return_value = {} r = renderer.HighlightRenderer() r.block_code("code") mistune.escape.assert_called_with("code") r.block_code("code", "python") lexers.get_lexer_by_name.assert_called_with("python", stripall=True) pygments.highlight.assert_called_with("code", True, {}) def test_generate_content(monkeypatch): monkeypatch.setattr(mistune, "Markdown", mock.MagicMock()) monkeypatch.setattr(premailer, "transform", mock.MagicMock()) monkeypatch.setattr(renderer, "HighlightRenderer", mock.MagicMock()) monkeypatch.setattr(jinja2, "Template", mock.MagicMock()) renderer.HighlightRenderer.return_value = 1 premailer.transform.return_value = "" jinja2.Template.render.return_value = "" renderer.generate_content("") mistune.Markdown.assert_called_with(renderer=1)
2.328125
2
practice/practice_4/main.py
Norbert2808/programming
0
12434
from generator import * from iterator import * def nInput(): while True: try: n = int(input("Enter n(size): ")) if n <= 0: print("Input must be a positive integer!") continue except ValueError: print("Not the correct value n!") continue break return n def intInput(message): while True: try: k = int(input(message)) except ValueError: print("Not the correct value!") continue break return k def printGenerator(gen): for i in gen: print(i) def printIterator(iter): for i in range(0, n): print(iter.__next__()) if __name__ == "__main__": while True: print("Enter 1, if you want to generate prime Lucas Number.") print("Enter 2, if you want to iterate prime Lucas Number.") print("Or 0, if you want to get out: ") count = intInput("") if count == 1: n = nInput() print("First " + str(n) + " prime Lucas Number:") gen = generator(n) printGenerator(gen) elif count == 2: n = nInput() print("First " + str(n) + " prime Lucas Number:") iter = IteratorLucasNumbers() printIterator(iter) elif count == 0: break else: print("Enter 1, or 2, or 0!")
3.984375
4
setup.py
wgnet/grail
37
12435
<gh_stars>10-100 from setuptools import setup version = '1.0.10' setup( name='grail', version=version, classifiers=[ 'Intended Audience :: Developers', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', ], packages=[ 'grail', ], description='Grail is a library which allows test script creation based on steps. ' 'It helps to structure your tests and get rid of additional test documentation for your code.', include_package_data=True, author='<EMAIL>', author_email='<EMAIL>', url='https://github.com/wgnet/grail' )
1.179688
1
tests/test_free.py
qingyunha/boltdb
7
12436
<reponame>qingyunha/boltdb<gh_stars>1-10 import os import unittest import tempfile from boltdb import BoltDB class TestFree(unittest.TestCase): def setUp(self): self.db = BoltDB(tempfile.mktemp()) def tearDown(self): os.unlink(self.db.filename) def test_free(self): with self.db.update() as tx: b = tx.bucket() b.put(b"foo", b"bar") self.assertEqual(self.db.freelist.ids, [3]) with self.db.update() as tx: b = tx.bucket() b.put(b"foo", b"bar") self.assertEqual(self.db.freelist.ids, [4]) def test_free2(self): self.assertEqual(self.db.freepages(), [2]) with self.db.update() as tx: b = tx.bucket() b.put(b"foo", b"bar") self.assertEqual(sorted(self.db.freepages()), [2, 3]) with self.db.update() as tx: b = tx.bucket() b.put(b"foo", b"bar") self.assertEqual(sorted(self.db.freepages()), [2, 4])
2.5
2
xen/xen-4.2.2/tools/xm-test/tests/xapi/01_xapi-vm_basic.py
zhiming-shen/Xen-Blanket-NG
1
12437
#!/usr/bin/python # Copyright (C) International Business Machines Corp., 2006 # Author: <NAME> <<EMAIL>> # Basic VM creation test from XmTestLib import xapi from XmTestLib.XenAPIDomain import XmTestAPIDomain from XmTestLib import * from xen.xend import XendAPIConstants import commands import os try: # XmTestAPIDomain tries to establish a connection to XenD domain = XmTestAPIDomain() except Exception, e: SKIP("Skipping test. Error: %s" % str(e)) vm_uuid = domain.get_uuid() session = xapi.connect() domain.start(startpaused=True) res = session.xenapi.VM.get_power_state(vm_uuid) if res != XendAPIConstants.XEN_API_VM_POWER_STATE[XendAPIConstants.XEN_API_VM_POWER_STATE_PAUSED]: FAIL("VM was not started in 'paused' state") res = session.xenapi.VM.unpause(vm_uuid) res = session.xenapi.VM.get_power_state(vm_uuid) if res != XendAPIConstants.XEN_API_VM_POWER_STATE[XendAPIConstants.XEN_API_VM_POWER_STATE_RUNNING]: FAIL("VM could not be put into 'running' state") console = domain.getConsole() try: run = console.runCmd("cat /proc/interrupts") except ConsoleError, e: saveLog(console.getHistory()) FAIL("Could not access proc-filesystem") res = session.xenapi.VM.pause(vm_uuid) res = session.xenapi.VM.get_power_state(vm_uuid) if res != XendAPIConstants.XEN_API_VM_POWER_STATE[XendAPIConstants.XEN_API_VM_POWER_STATE_PAUSED]: FAIL("VM could not be put into 'paused' state") res = session.xenapi.VM.unpause(vm_uuid) res = session.xenapi.VM.get_power_state(vm_uuid) if res != XendAPIConstants.XEN_API_VM_POWER_STATE[XendAPIConstants.XEN_API_VM_POWER_STATE_RUNNING]: FAIL("VM could not be 'unpaused'") domain.stop() domain.destroy()
2.171875
2
formatter_sql.py
ZSCNetSupportDept/schedule-utils
0
12438
def format_sql(schedule, table): for week, schedule in schedule.items(): print(f"UPDATE {table} SET `block`=0, `week`={week} WHERE `name`='{schedule.leader}';") for block, staffs in schedule.staffs.items(): for staff in staffs: print(f"UPDATE {table} SET `block`={block}, `week`={week} WHERE `name`='{staff}';") pass
2.9375
3
nlp_fourier.py
neitzke/stokes-numerics
1
12439
<gh_stars>1-10 """Fourier transform non-linear Poisson solver""" # This module is concerned with solving the "non-linear Poisson" # equation # Delta(u) = f(u,z) # on a uniform rectangular mesh, with u = u0 on the boundary. # # We solve the equation by an iterative method, solving an # approximation to the linearized equation at u_i to get u_{i+1} and # terminating when u_{i+1} - u_i is small enough. # # The key feature of this solve is that we use a very coarse # approximation of the linearization---chosen specifically so that it # can be solved by Fourier transform methods. The coarse # approxmination means that each iteration makes little progress # toward the final solution, and many iterations are necessary. # However, the availability of efficient FFT routines means that each # iteration is very fast, and so in many cases there is a net gain # compared to a direct method. # # The exact linearized equation for v = u-u0 is # Delta(vdot) - d1F(v,z) vdot = F(v,z) - Delta(vdot) (*) # where # F(v,z) = f(u0+v,z) - Delta(u0) # We rewrite (*) as # (Delta - A)vdot = RHS # This is exactly solvable by Fourier methods if A is a constant # function. # # To approximate a solution, we replace A = d1F(v,z) by a constant # that is in some way representative of its values on he grid points. # We follow the suggestion of [1] to use the "minimax" value # # A = (max(d1F) + min(d1F)) / 2 # # where max and min are taken over the grid. # # References # # [1] <NAME>. and <NAME>. 1973. Use of fast direct methods for # the efficient numerical solution of nonseparable elliptic # equations. SIAM J. Numer. Anal., 10: 1103-1103. # # KNOWN ISSUES: # # * The initialization code assumes that u_0 is harmonic in a # neighborhood of the boundary of the mesh. This is not a # fundamental requirement of the method, but because u_0 cannot be # easily extended to a doubly-periodic function its Laplacian is # computed by a finite difference scheme rather than by FFT methods. # Being harmonic at the boundary allows us to simply zero out the # Laplacian at the edges and ignore this issue. # # (Note that this assumption is satisfied for the applications to # the self-duality equations for which this solver was developed0). from __future__ import absolute_import import numpy as np import scipy.signal from dst2 import dst2, idst2, dst2freq from solverexception import SolverException import time import logging logger = logging.getLogger(__name__) def _max_power_2_dividing(n): n = int(n) return n & (~(n-1)) def _suggest_sizes(n): if n == _max_power_2_dividing(n): return [n-1] a = np.log(n+1)/np.log(2.0) return [2**int(np.floor(a))-1, 2**int(np.ceil(a))-1] def _is_bad_size_for_dst(n): return float(n+1) / _max_power_2_dividing(n+1) > 5.0 class NLPFourier(object): """Solve the system Delta(u) = f(u,z) on a uniform mesh, with u = u0 on boundary, using Fourier transform methods. """ def __init__(self,f,d1f,u0,grid,thresh=0.0000001,maxiter=5000,relax=1.0,linear=False): """Initialize and run the solver. Parameters: f : f(u,z) d1f : [df/du](u,z) u0 : initial guess and boundary conditon grid : SquareGrid or similar object representing a rectangular mesh (use zm, nx, ny, dx, dy attributes) thresh : L^2 error goal for the solver maxiter : raise exception if threshold not met after this many iterations relax : Step by (relax)*(linearized solution) at each iteration; setting to less than 1.0 may enlarge domain of convergence at the cost of convergence speed. linear : is the equation to be solved actually linear? (not used) Return: None Output class attributes: u : Solution u u0 : Initial guess """ self._t0 = time.time() self.f = f self.d1f = d1f self.grid = grid self.u0func = u0 self.u0 = u0(self.grid.zm) self.thresh = thresh self.maxiter = maxiter self.relax = relax self.normcoef = self.grid.dx * self.grid.dy / (2.0*np.sqrt((self.grid.nx + 1)*(self.grid.ny+1))) self.warn_if_bad_sizes() # Capital K for unnormalized frequencies ("per index") # Lower k for real frequencies ("per unit x or y") KX, KY = dst2freq(self.u0) self.kx = 2*np.pi*KX/self.grid.dx # a row self.ky = 2*np.pi*KY/self.grid.dy # a column self.ksq = self.kx**2 + self.ky**2 # broadcasted to a 2D array now # Laplacian of initial guess and its transform # (These are relatively long-running computations.) # TODO 1: Remove implicit assumption that Delta(u0) vanishes on boundary. # TODO 2: Move this finite difference stuff into its own module. logger.info("Computing Laplacian of initial guess and its transform") idx,idy = 1.0/self.grid.dx, 1.0/self.grid.dy lap_stencil = np.array([[0,idy**2,0],[idx**2,-2*(idx**2 + idy**2),idx**2],[0,idy**2,0]],dtype=np.float64) Lap_u0_raw = scipy.signal.convolve2d(self.u0,lap_stencil,mode='same') self.Lap_u0 = np.zeros_like(Lap_u0_raw) self.Lap_u0[2:-2,2:-2] = Lap_u0_raw[2:-2,2:-2] self._t1 = time.time() self._t = self._t1 logger.info("Solving PDE: %dx%d grid, thresh=%g" % (self.grid.nx,self.grid.ny,self.thresh)) self.u = self._iterate() def _iterate(self): """Fourier solver main loop""" vhat = np.zeros_like(self.u0) n = 0 last_delta_norm = 0.0 while True: n = n + 1 # Compute the DST of the RHS of the inhomogeneous linearized equation v = idst2(vhat) vvec = v.reshape((self.grid.nx * self.grid.ny, )) Fvvec = self.F(vvec,self.grid.zv) Fv = Fvvec.reshape((self.grid.nx, self.grid.ny)) Fv_hat = dst2(Fv) Lapv_hat = -self.ksq * vhat RHS_hat = Fv_hat - Lapv_hat # Compute the L^2 norm of the inhomogeneous term # ( = 0 iff we have a solution ) residual = self.L2norm_hat(RHS_hat) now = time.time() logger.info("PDE: iter=%d L2error=%g L2delta=%g\t(%.2fs)" % (n,residual,last_delta_norm,now-self._t)) self._t = now if residual < self.thresh: logger.info('PDE: success\t(%.2fs total; %.2fs in main loop)',now - self._t0,now - self._t1) break if np.any(np.isnan(RHS_hat)): # Computing RHS revealed some failure in the computation # (usually means the linearized solution at the previous step was bad) raise SolverException("NAN encountered in RHS computation (overflow or underflow?)") if n >= self.maxiter: raise SolverException("Max iterations (%d) reached without meeting error threshold %g" % (self.maxiter,self.thresh)) # Solve a constant-coefficient approximation of the linear # equation in frequency space. # First compute the constant that approximates d1F. a = self.minimax(self.d1F(vvec,self.grid.zv)) # Now compute the transform of the exact solution to this # constant coef problem. delta_vhat = RHS_hat / (-self.ksq - a) last_delta_norm = self.L2norm_hat(delta_vhat) # Update vhat by adding this approx solution of the # linearization vhat = vhat + self.relax * delta_vhat self.iter = n self.delta = last_delta_norm self.error = residual return self.u0 + v def warn_if_bad_sizes(self): msgstr = '%s-size %d is a bad choice for the fourier solver; this computation will be inefficient. Good sizes have the form (2**n)-1. Consider using size %s instead.' if _is_bad_size_for_dst(self.grid.nx): logger.warning(msgstr % ('x',self.grid.nx, ' or '.join(str(n) for n in _suggest_sizes(self.grid.nx)))) if _is_bad_size_for_dst(self.grid.ny): logger.warning(msgstr % ('y',self.grid.ny, ' or '.join(str(n) for n in _suggest_sizes(self.grid.nx)))) def F(self,v,z): u0vec = self.u0.reshape((self.grid.nx * self.grid.ny, )) Lapu0vec = self.Lap_u0.reshape((self.grid.nx * self.grid.ny, )) return self.f(u0vec + v,z) - Lapu0vec def d1F(self,v,z): u0vec = self.u0.reshape((self.grid.nx * self.grid.ny, )) return self.d1f(u0vec + v,z) def minimax(self,m): """Return the average of max(m) and min(m)""" return 0.5*(np.max(m) + np.min(m)) def L2norm_hat(self,m): """L^2 norm of a function computed from its Fourier transform coefficients""" return np.linalg.norm(m) * self.normcoef
2.453125
2
app/models/fragment.py
saury2013/Memento
0
12440
# -*- coding: utf-8 -*- from datetime import datetime from sqlalchemy.dialects.mysql import LONGTEXT from sqlalchemy.orm import load_only from sqlalchemy import func from flask import abort from markdown import Markdown,markdown from app.models import db,fragment_tags_table from app.models.tag import Tag from app.whoosh import search_helper class Fragment(db.Model): '''知识碎片''' __tablename__ = 'fragment' __table_args__ = { "mysql_engine": "InnoDB", "mysql_charset": "utf8" } id = db.Column(db.Integer,nullable=False,primary_key=True,autoincrement=True) title = db.Column(db.String(255),nullable=False,default="",index=True) access = db.Column(db.Integer,nullable=False,default=1) status = db.Column(db.Integer,nullable=False,default=0) markdown = db.deferred(db.Column(LONGTEXT,default="",nullable=False)) html = db.deferred(db.Column(LONGTEXT,default="",nullable=False)) publish_markdown = db.deferred(db.Column(LONGTEXT,default="",nullable=False)) publish_html = db.deferred(db.Column(LONGTEXT,default="",nullable=False)) publish_timestamp = db.Column(db.DateTime,default=datetime.now,nullable=False) updatetime = db.Column(db.DateTime,default=datetime.now,nullable=False) user_id = db.Column(db.Integer,db.ForeignKey('user.id')) tags = db.relationship('Tag',secondary=fragment_tags_table,backref=db.backref('fragments')) # branch = db.relationship('Branch',back_populates='fragment',uselist=False) branch_id = db.Column(db.Integer,db.ForeignKey('branch.id')) # branch = db.relationship('Branch',foreign_keys=branch_id) def get(self,id): return Fragment.query.get(id) @staticmethod def get_or_404(id): fragment = Fragment.query.get(id) if fragment: return fragment abort(404) def save(self): self.html = self.markdown2html(self.markdown) db.session.add(self) db.session.commit() search_helper.add_document(self.title,str(self.id),self.markdown) def markdown2html(self,content): # md = Markdown(['codehilite', 'fenced_code', 'meta', 'tables']) # html = md.convert(content) html = markdown(content,extensions=[ 'markdown.extensions.extra', 'markdown.extensions.codehilite', 'markdown.extensions.toc', ]) return html @staticmethod def get_nearest_fragments(num=5): fragments = Fragment.query.filter().order_by(Fragment.updatetime.desc()).limit(num) res = [] from app.models.branch import Branch for fragment in fragments: fragment.branch = Branch.get(fragment.branch_id) res.append(fragment) return res
2.140625
2
qatrack/qa/migrations/0001_initial.py
crcrewso/qatrackplus
20
12441
<reponame>crcrewso/qatrackplus # -*- coding: utf-8 -*- from django.db import migrations, models import django.utils.timezone import django.db.models.deletion from django.conf import settings class Migration(migrations.Migration): dependencies = [ ('contenttypes', '0002_remove_content_type_name'), ('auth', '0006_require_contenttypes_0002'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('units', '0001_initial'), ] operations = [ migrations.CreateModel( name='AutoReviewRule', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('pass_fail', models.CharField(unique=True, max_length=15, choices=[(b'not_done', b'Not Done'), (b'ok', b'OK'), (b'tolerance', b'Tolerance'), (b'action', b'Action'), (b'no_tol', b'No Tol Set')])), ], ), migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(unique=True, max_length=255)), ('slug', models.SlugField(help_text='Unique identifier made of lowercase characters and underscores', unique=True, max_length=255)), ('description', models.TextField(help_text='Give a brief description of what type of tests should be included in this grouping')), ], options={ 'ordering': ('name',), 'verbose_name_plural': 'categories', }, ), migrations.CreateModel( name='Frequency', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(help_text='Display name for this frequency', unique=True, max_length=50)), ('slug', models.SlugField(help_text='Unique identifier made of lowercase characters and underscores for this frequency', unique=True)), ('nominal_interval', models.PositiveIntegerField(help_text='Nominal number of days between test completions')), ('due_interval', models.PositiveIntegerField(help_text='How many days since last completed until a test with this frequency is shown as due')), ('overdue_interval', models.PositiveIntegerField(help_text='How many days since last completed until a test with this frequency is shown as over due')), ], options={ 'ordering': ('nominal_interval',), 'verbose_name_plural': 'frequencies', 'permissions': (('can_choose_frequency', 'Choose QA by Frequency'),), }, ), migrations.CreateModel( name='Reference', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(help_text='Enter a short name for this reference', max_length=255)), ('type', models.CharField(default=b'numerical', max_length=15, choices=[(b'numerical', b'Numerical'), (b'boolean', b'Yes / No')])), ('value', models.FloatField(help_text='Enter the reference value for this test.')), ('created', models.DateTimeField(auto_now_add=True)), ('modified', models.DateTimeField(auto_now=True)), ('created_by', models.ForeignKey(related_name='reference_creators', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)), ('modified_by', models.ForeignKey(related_name='reference_modifiers', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)), ], ), migrations.CreateModel( name='Test', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(help_text='Name for this test', unique=True, max_length=255, db_index=True)), ('slug', models.SlugField(help_text='A short variable name consisting of alphanumeric characters and underscores for this test (to be used in composite calculations). ', max_length=128, verbose_name=b'Macro name')), ('description', models.TextField(help_text='A concise description of what this test is for (optional. You may use HTML markup)', null=True, blank=True)), ('procedure', models.CharField(help_text='Link to document describing how to perform this test', max_length=512, null=True, blank=True)), ('chart_visibility', models.BooleanField(default=True, verbose_name=b'Test item visible in charts?')), ('auto_review', models.BooleanField(default=False, verbose_name='Allow auto review of this test?')), ('type', models.CharField(default=b'simple', help_text='Indicate if this test is a Boolean,Simple Numerical,Multiple Choice,Constant,Composite,String,String Composite,File Upload', max_length=10, choices=[(b'boolean', b'Boolean'), (b'simple', b'Simple Numerical'), (b'multchoice', b'Multiple Choice'), (b'constant', b'Constant'), (b'composite', b'Composite'), (b'string', b'String'), (b'scomposite', b'String Composite'), (b'upload', b'File Upload')])), ('hidden', models.BooleanField(default=False, help_text="Don't display this test when performing QA", verbose_name='Hidden')), ('skip_without_comment', models.BooleanField(default=False, help_text='Allow users to skip this test without a comment', verbose_name='Skip without comment')), ('display_image', models.BooleanField(default=False, help_text='Image uploads only: Show uploaded images under the testlist', verbose_name=b'Display image')), ('choices', models.CharField(help_text='Comma seperated list of choices for multiple choice test types', max_length=2048, null=True, blank=True)), ('constant_value', models.FloatField(help_text='Only required for constant value types', null=True, blank=True)), ('calculation_procedure', models.TextField(help_text='For Composite Tests Only: Enter a Python snippet for evaluation of this test.', null=True, blank=True)), ('created', models.DateTimeField(auto_now_add=True)), ('modified', models.DateTimeField(auto_now=True)), ('category', models.ForeignKey(help_text='Choose a category for this test', to='qa.Category', on_delete=models.PROTECT)), ('created_by', models.ForeignKey(related_name='test_creator', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)), ('modified_by', models.ForeignKey(related_name='test_modifier', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)), ], ), migrations.CreateModel( name='TestInstance', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('review_date', models.DateTimeField(null=True, editable=False, blank=True)), ('pass_fail', models.CharField(db_index=True, max_length=20, editable=False, choices=[(b'not_done', b'Not Done'), (b'ok', b'OK'), (b'tolerance', b'Tolerance'), (b'action', b'Action'), (b'no_tol', b'No Tol Set')])), ('value', models.FloatField(help_text='For boolean Tests a value of 0 equals False and any non zero equals True', null=True)), ('string_value', models.CharField(max_length=1024, null=True, blank=True)), ('skipped', models.BooleanField(default=False, help_text='Was this test skipped for some reason (add comment)')), ('comment', models.TextField(help_text='Add a comment to this test', null=True, blank=True)), ('work_started', models.DateTimeField(editable=False, db_index=True)), ('work_completed', models.DateTimeField(default=django.utils.timezone.now, help_text=b'Format DD-MM-YY hh:mm (hh:mm is 24h time e.g. 31-05-12 14:30)', db_index=True)), ('created', models.DateTimeField(default=django.utils.timezone.now)), ('modified', models.DateTimeField(auto_now=True)), ('created_by', models.ForeignKey(related_name='test_instance_creator', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)), ('modified_by', models.ForeignKey(related_name='test_instance_modifier', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)), ('reference', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, editable=False, to='qa.Reference', null=True)), ('reviewed_by', models.ForeignKey(blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL)), ], options={ 'get_latest_by': 'work_completed', 'permissions': (('can_view_history', 'Can see test history when performing QA'), ('can_view_charts', 'Can view charts of test history'), ('can_review', 'Can review & approve tests'), ('can_skip_without_comment', 'Can skip tests without comment'), ('can_review_own_tests', 'Can review & approve self-performed tests')), }, ), migrations.CreateModel( name='TestInstanceStatus', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(help_text='Display name for this status type', unique=True, max_length=50)), ('slug', models.SlugField(help_text='Unique identifier made of lowercase characters and underscores for this status', unique=True)), ('description', models.TextField(help_text='Give a brief description of what type of test results should be given this status', null=True, blank=True)), ('is_default', models.BooleanField(default=False, help_text='Check to make this status the default for new Test Instances')), ('requires_review', models.BooleanField(default=True, help_text='Check to indicate that Test Instances with this status require further review')), ('export_by_default', models.BooleanField(default=True, help_text='Check to indicate whether tests with this status should be exported by default (e.g. for graphing/control charts)')), ('valid', models.BooleanField(default=True, help_text='If unchecked, data with this status will not be exported and the TestInstance will not be considered a valid completed Test')), ], options={ 'verbose_name_plural': 'statuses', }, ), migrations.CreateModel( name='TestList', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=255, db_index=True)), ('slug', models.SlugField(help_text='A short unique name for use in the URL of this list', unique=True)), ('description', models.TextField(help_text='A concise description of this test checklist. (You may use HTML markup)', null=True, blank=True)), ('created', models.DateTimeField(auto_now_add=True)), ('modified', models.DateTimeField(auto_now=True)), ('warning_message', models.CharField(default=b'Do not treat', help_text='Message given when a test value is out of tolerance', max_length=255)), ('created_by', models.ForeignKey(related_name='qa_testlist_created', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)), ('modified_by', models.ForeignKey(related_name='qa_testlist_modified', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)), ('sublists', models.ManyToManyField(help_text='Choose any sublists that should be performed as part of this list.', to='qa.TestList', blank=True)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='TestListCycle', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=255, db_index=True)), ('slug', models.SlugField(help_text='A short unique name for use in the URL of this list', unique=True)), ('description', models.TextField(help_text='A concise description of this test checklist. (You may use HTML markup)', null=True, blank=True)), ('created', models.DateTimeField(auto_now_add=True)), ('modified', models.DateTimeField(auto_now=True)), ('drop_down_label', models.CharField(default=b'Choose Day', max_length=128)), ('day_option_text', models.CharField(default=b'day', max_length=8, choices=[(b'day', b'Day'), (b'tlname', b'Test List Name')])), ('created_by', models.ForeignKey(related_name='qa_testlistcycle_created', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)), ('modified_by', models.ForeignKey(related_name='qa_testlistcycle_modified', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='TestListCycleMembership', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('order', models.IntegerField()), ('cycle', models.ForeignKey(to='qa.TestListCycle', on_delete=models.CASCADE)), ('test_list', models.ForeignKey(to='qa.TestList', on_delete=models.CASCADE)), ], options={ 'ordering': ('order',), }, ), migrations.CreateModel( name='TestListInstance', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('work_started', models.DateTimeField(db_index=True)), ('work_completed', models.DateTimeField(default=django.utils.timezone.now, null=True, db_index=True)), ('comment', models.TextField(help_text='Add a comment to this set of tests', null=True, blank=True)), ('in_progress', models.BooleanField(default=False, help_text='Mark this session as still in progress so you can complete later (will not be submitted for review)', db_index=True)), ('reviewed', models.DateTimeField(null=True, blank=True)), ('all_reviewed', models.BooleanField(default=False)), ('day', models.IntegerField(default=0)), ('created', models.DateTimeField(auto_now_add=True)), ('modified', models.DateTimeField()), ('created_by', models.ForeignKey(related_name='test_list_instance_creator', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)), ('modified_by', models.ForeignKey(related_name='test_list_instance_modifier', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)), ('reviewed_by', models.ForeignKey(related_name='test_list_instance_reviewer', blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL)), ('test_list', models.ForeignKey(editable=False, to='qa.TestList', on_delete=models.PROTECT)), ], options={ 'get_latest_by': 'work_completed', 'permissions': (('can_override_date', 'Can override date'), ('can_perform_subset', 'Can perform subset of tests'), ('can_view_completed', 'Can view previously completed instances')), }, ), migrations.CreateModel( name='TestListMembership', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('order', models.IntegerField(db_index=True)), ('test', models.ForeignKey(to='qa.Test', on_delete=models.CASCADE)), ('test_list', models.ForeignKey(to='qa.TestList', on_delete=models.CASCADE)), ], options={ 'ordering': ('order',), }, ), migrations.CreateModel( name='Tolerance', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('type', models.CharField(help_text='Select whether this will be an absolute or relative tolerance criteria', max_length=20, choices=[(b'absolute', b'Absolute'), (b'percent', b'Percentage'), (b'multchoice', b'Multiple Choice')])), ('act_low', models.FloatField(help_text='Value of lower Action level', null=True, verbose_name='Action Low', blank=True)), ('tol_low', models.FloatField(help_text='Value of lower Tolerance level', null=True, verbose_name='Tolerance Low', blank=True)), ('tol_high', models.FloatField(help_text='Value of upper Tolerance level', null=True, verbose_name='Tolerance High', blank=True)), ('act_high', models.FloatField(help_text='Value of upper Action level', null=True, verbose_name='Action High', blank=True)), ('mc_pass_choices', models.CharField(help_text='Comma seperated list of choices that are considered passing', max_length=2048, null=True, verbose_name='Multiple Choice OK Values', blank=True)), ('mc_tol_choices', models.CharField(help_text='Comma seperated list of choices that are considered at tolerance', max_length=2048, null=True, verbose_name='Multiple Choice Tolerance Values', blank=True)), ('created_date', models.DateTimeField(auto_now_add=True)), ('modified_date', models.DateTimeField(auto_now=True)), ('created_by', models.ForeignKey(related_name='tolerance_creators', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)), ('modified_by', models.ForeignKey(related_name='tolerance_modifiers', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)), ], options={ 'ordering': ['type', 'act_low', 'tol_low', 'tol_high', 'act_high'], }, ), migrations.CreateModel( name='UnitTestCollection', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('due_date', models.DateTimeField(help_text='Next time this item is due', null=True, blank=True)), ('auto_schedule', models.BooleanField(default=True, help_text='If this is checked, due_date will be auto set based on the assigned frequency')), ('active', models.BooleanField(default=True, help_text='Uncheck to disable this test on this unit', db_index=True)), ('object_id', models.PositiveIntegerField()), ('assigned_to', models.ForeignKey(to='auth.Group', help_text='QA group that this test list should nominally be performed by', null=True, on_delete=models.PROTECT)), ('content_type', models.ForeignKey(to='contenttypes.ContentType', on_delete=models.PROTECT)), ('frequency', models.ForeignKey(blank=True, to='qa.Frequency', help_text='Frequency with which this test list is to be performed', null=True, on_delete=models.SET_NULL)), ('last_instance', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, editable=False, to='qa.TestListInstance', null=True)), ('unit', models.ForeignKey(to='units.Unit', on_delete=models.PROTECT)), ('visible_to', models.ManyToManyField(help_text='Select groups who will be able to see this test collection on this unit', related_name='test_collection_visibility', to='auth.Group')), ], options={ 'verbose_name_plural': 'Assign Test Lists to Units', 'permissions': (('can_view_overview', 'Can view program overview'), ('can_review_non_visible_tli', "Can view tli and utc not visible to user's groups")), }, ), migrations.CreateModel( name='UnitTestInfo', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('active', models.BooleanField(default=True, help_text='Uncheck to disable this test on this unit', db_index=True)), ('assigned_to', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='auth.Group', help_text='QA group that this test list should nominally be performed by', null=True)), ('reference', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, verbose_name='Current Reference', blank=True, to='qa.Reference', null=True)), ('test', models.ForeignKey(to='qa.Test', on_delete=models.PROTECT)), ('tolerance', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='qa.Tolerance', null=True)), ('unit', models.ForeignKey(to='units.Unit', on_delete=models.PROTECT)), ], options={ 'verbose_name_plural': 'Set References & Tolerances', 'permissions': (('can_view_ref_tol', 'Can view Refs and Tols'),), }, ), migrations.AddField( model_name='testlistinstance', name='unit_test_collection', field=models.ForeignKey(editable=False, to='qa.UnitTestCollection', on_delete=models.PROTECT), ), migrations.AddField( model_name='testlistcycle', name='test_lists', field=models.ManyToManyField(to='qa.TestList', through='qa.TestListCycleMembership'), ), migrations.AddField( model_name='testlist', name='tests', field=models.ManyToManyField(help_text='Which tests does this list contain', to='qa.Test', through='qa.TestListMembership'), ), migrations.AddField( model_name='testinstance', name='status', field=models.ForeignKey(to='qa.TestInstanceStatus', on_delete=models.PROTECT), ), migrations.AddField( model_name='testinstance', name='test_list_instance', field=models.ForeignKey(editable=False, to='qa.TestListInstance', on_delete=models.CASCADE), ), migrations.AddField( model_name='testinstance', name='tolerance', field=models.ForeignKey(on_delete=models.PROTECT, blank=True, editable=False, to='qa.Tolerance', null=True), ), migrations.AddField( model_name='testinstance', name='unit_test_info', field=models.ForeignKey(editable=False, to='qa.UnitTestInfo', on_delete=models.PROTECT), ), migrations.AddField( model_name='autoreviewrule', name='status', field=models.ForeignKey(to='qa.TestInstanceStatus', on_delete=models.CASCADE), ), migrations.AlterUniqueTogether( name='unittestinfo', unique_together=set([('test', 'unit')]), ), migrations.AlterUniqueTogether( name='unittestcollection', unique_together=set([('unit', 'frequency', 'content_type', 'object_id')]), ), migrations.AlterUniqueTogether( name='testlistmembership', unique_together=set([('test_list', 'test')]), ), ]
1.9375
2
cloudygram_api_server/models/telethon_model.py
Maverick1983/cloudygram-api-server
2
12442
from .constants import SUCCESS_KEY, MESSAGE_KEY, DATA_KEY from cloudygram_api_server.scripts import CGMessage from typing import List class TtModels: @staticmethod def sing_in_failure(message) -> dict: return { SUCCESS_KEY : False, MESSAGE_KEY : message } @staticmethod def send_code_failure(message) -> dict: return { SUCCESS_KEY : False, MESSAGE_KEY : message } @staticmethod def message_list(messages) -> dict: mapped_messages: List[str] = [] for m in messages: mapped_messages.append(CGMessage.map_from_tt(m)) return { SUCCESS_KEY: True, DATA_KEY: mapped_messages }
2.109375
2
source1/bsp/entities/portal2_entity_handlers.py
tltneon/SourceIO
199
12443
import math from mathutils import Euler import bpy from .portal2_entity_classes import * from .portal_entity_handlers import PortalEntityHandler local_entity_lookup_table = PortalEntityHandler.entity_lookup_table.copy() local_entity_lookup_table.update(entity_class_handle) class Portal2EntityHandler(PortalEntityHandler): entity_lookup_table = local_entity_lookup_table pointlight_power_multiplier = 1000 def handle_prop_weighted_cube(self, entity: prop_weighted_cube, entity_raw: dict): obj = self._handle_entity_with_model(entity, entity_raw) self._put_into_collection('prop_weighted_cube', obj, 'props') def handle_prop_testchamber_door(self, entity: prop_testchamber_door, entity_raw: dict): obj = self._handle_entity_with_model(entity, entity_raw) self._put_into_collection('prop_testchamber_door', obj, 'props') def handle_prop_floor_button(self, entity: prop_floor_button, entity_raw: dict): obj = self._handle_entity_with_model(entity, entity_raw) self._put_into_collection('prop_floor_button', obj, 'props') def handle_prop_floor_ball_button(self, entity: prop_floor_ball_button, entity_raw: dict): obj = self._handle_entity_with_model(entity, entity_raw) self._put_into_collection('prop_floor_ball_button', obj, 'props') def handle_prop_floor_cube_button(self, entity: prop_floor_cube_button, entity_raw: dict): obj = self._handle_entity_with_model(entity, entity_raw) self._put_into_collection('prop_floor_cube_button', obj, 'props') def handle_prop_under_floor_button(self, entity: prop_under_floor_button, entity_raw: dict): obj = self._handle_entity_with_model(entity, entity_raw) self._put_into_collection('prop_under_floor_button', obj, 'props') def handle_prop_tractor_beam(self, entity: prop_tractor_beam, entity_raw: dict): obj = self._handle_entity_with_model(entity, entity_raw) self._put_into_collection('prop_tractor_beam', obj, 'props') def handle_logic_playmovie(self, entity: logic_playmovie, entity_raw: dict): obj = bpy.data.objects.new(self._get_entity_name(entity), None) self._set_location(obj, entity.origin) self._set_icon_if_present(obj, entity) self._set_entity_data(obj, {'entity': entity_raw}) self._put_into_collection('logic_playmovie', obj, 'logic') def handle_trigger_paint_cleanser(self, entity: trigger_paint_cleanser, entity_raw: dict): if 'model' not in entity_raw: return model_id = int(entity_raw.get('model')[1:]) mesh_object = self._load_brush_model(model_id, self._get_entity_name(entity)) self._set_location_and_scale(mesh_object, parse_float_vector(entity_raw.get('origin', '0 0 0'))) self._set_rotation(mesh_object, parse_float_vector(entity_raw.get('angles', '0 0 0'))) self._set_entity_data(mesh_object, {'entity': entity_raw}) self._put_into_collection('trigger_paint_cleanser', mesh_object, 'triggers') def handle_trigger_catapult(self, entity: trigger_catapult, entity_raw: dict): if 'model' not in entity_raw: return model_id = int(entity_raw.get('model')[1:]) mesh_object = self._load_brush_model(model_id, self._get_entity_name(entity)) self._set_location_and_scale(mesh_object, parse_float_vector(entity_raw.get('origin', '0 0 0'))) self._set_rotation(mesh_object, parse_float_vector(entity_raw.get('angles', '0 0 0'))) self._set_entity_data(mesh_object, {'entity': entity_raw}) self._put_into_collection('trigger_catapult', mesh_object, 'triggers') def handle_npc_wheatley_boss(self, entity: npc_wheatley_boss, entity_raw: dict): obj = self._handle_entity_with_model(entity, entity_raw) self._put_into_collection('npc_wheatley_boss', obj, 'npc') def handle_prop_exploding_futbol(self, entity: prop_exploding_futbol, entity_raw: dict): obj = self._handle_entity_with_model(entity, entity_raw) self._put_into_collection('prop_exploding_futbol', obj, 'props') def handle_prop_exploding_futbol_socket(self, entity: prop_exploding_futbol_socket, entity_raw: dict): obj = self._handle_entity_with_model(entity, entity_raw) self._put_into_collection('prop_exploding_futbol', obj, 'props') def handle_prop_exploding_futbol_spawnert(self, entity: prop_exploding_futbol_spawner, entity_raw: dict): obj = self._handle_entity_with_model(entity, entity_raw) self._put_into_collection('prop_exploding_futbol_spawner', obj, 'props')
2.03125
2
michelanglo_api/ss_parser.py
matteoferla/MichelaNGLo-api
1
12444
from collections import namedtuple class SSParser: """ Create a SS block from PDB data. Written to be agnostic of PDB parser, but for now only has PyMOL. .. code-block:: python import pymol2 with pymol2.PyMOL() as pymol: pymol.cmd.load('model.pdb', 'prot') ss = SSParser().parse_pymol(pymol.cmd) print(ss) # or SSParser.correct_file('model.pdb', True) Do note that the lines seem offset because SHEET has a name parameter. HELIX 1 HA GLY A 86 GLY A 94 1 9 SHEET 5 A 5 GLY A 52 PHE A 56 -1 N PHE A 56 O TRP A 71 SHEET 1 B 5 THR B 107 ARG B 110 0 """ # faux pymol atom Atom = namedtuple('Atom', ['ss', 'resi', 'resn', 'chain']) def __init__(self): # none of the attributes are actually public. self.ss = [] self.start = self.Atom('L', 0, 'XXX', 'X') self.previous = self.Atom('L', 0, 'XXX', 'X') self.ss_count = {'H': 1, 'S': 1, 'L': 0} def parse_pymol(self, cmd, selector: str = 'name ca') -> str: atoms = list(cmd.get_model(selector).atom) return self.parse(atoms) def parse(self, atoms: list) -> str: """ atoms is a list of objects with 'ss', 'resi', 'resn'. one per residue (CA). This does not collapse the list into a list of ranges, as resn is also require etc. :param atoms: :return: """ for current in atoms: if self.previous.ss != current.ss or self.previous.chain != current.chain: # different self._store_ss() # the previous ss has come to an end. # deal with current if current.ss in ('S', 'H'): # start of a new self.start = current # move on self.previous = current self._store_ss() return str(self) def _store_ss(self): """ The SS sequence has come to an end: store it. :return: """ if self.previous.ss == '': return # not AA? if int(self.previous.resi) == int(self.start.resi) + 1: return # too short cc = self.ss_count[self.previous.ss] if self.previous.ss == 'H': # previous was the other type self.ss.append( f'HELIX {cc: >3} {cc: >3} ' + f'{self.start.resn} {self.start.chain} {self.start.resi: >4} ' + f'{self.previous.resn} {self.previous.chain} {self.previous.resi: >4} 1' + ' ' + f'{int(self.previous.resi) - int(self.start.resi): >2}' ) self.ss_count[self.previous.ss] += 1 elif self.previous.ss == 'S': # previous was the other type self.ss.append( f'SHEET {cc: >3} {cc: >2}S 1 ' + f'{self.start.resn} {self.start.chain}{self.start.resi: >4} ' + f'{self.previous.resn} {self.previous.chain}{self.previous.resi: >4} 0') self.ss_count[self.previous.ss] += 1 else: # loop? Nothing. pass def __str__(self): return '\n'.join(self.ss) +'\n' @classmethod def correct_file(cls, filename: str, write:bool=True): import pymol2 with pymol2.PyMOL() as pymol: pymol.cmd.load(filename, 'prot') ss = cls().parse_pymol(pymol.cmd) with open(filename, 'r') as fh: block = fh.read() if write: with open(filename, 'w') as fh: fh.write(ss + block) return ss + block @classmethod def correct_block(cls, block: str): import pymol2 with pymol2.PyMOL() as pymol: pymol.cmd.read_pdbstr(block, 'prot') ss = cls().parse_pymol(pymol.cmd) return ss + block
3.078125
3
Net640/apps/user_posts/mixin.py
86Ilya/net640kb
1
12445
from django.urls import reverse from Net640.settings import FRONTEND_DATE_FORMAT class AsDictMessageMixin: """ Mixin for representing user messages(post, comments) as dictionaries """ def as_dict(self, executor): return {'content': self.content, 'user_has_like': self.has_like(executor), 'is_owner': self.user == executor, 'rating': round(self.get_rating(), 1), 'author': self.user.username, 'author_page': reverse('friends:user_view', kwargs={'user_id': self.user.id}), 'date': self.date.strftime(FRONTEND_DATE_FORMAT), 'id': self.id, 'author_thumbnail_url': self.user.get_thumbnail_url(), }
2.34375
2
squids/tfrecords/maker.py
mmgalushka/squids
0
12446
"""A module for converting a data source to TFRecords.""" import os import json import copy import csv from pathlib import Path from shutil import rmtree import PIL.Image as Image import tensorflow as tf from tqdm import tqdm from .feature import items_to_features from .errors import DirNotFoundError, InvalidDatasetFormat from ..config import IMAGE_WIDTH, IMAGE_HEIGHT, DATASET_DIR, TFRECORDS_SIZE # ------------------------------------------------------------------------------ # CSV/COCO Dataset Detectors # ------------------------------------------------------------------------------ def is_csv_input(input_dir: Path) -> bool: """ Tests if the input directory represents CSV dataset format. Args: input_dir (Path): The input directory to test. Returns: status (bool): Returns `True` if the input directory represents CSV dataset format and `False` otherwise. """ return set(os.listdir(input_dir)) == set( [ "images", "instances_train.csv", "instances_test.csv", "instances_val.csv", "categories.json", ] ) def is_coco_input(input_dir: Path) -> bool: """ Tests if the input directory represents COCO dataset format. Args: input_dir (Path): The input directory to test. Returns: status (bool): Returns `True` if the input directory represents COCO dataset format and `False` otherwise. """ root_artifacts = os.listdir(input_dir) if "annotations" in root_artifacts: annotations_artifacts = os.listdir(input_dir / "annotations") stems_artifacts = [ Path(artifact).stem for artifact in annotations_artifacts ] return set(stems_artifacts).issubset(set(root_artifacts)) return False # ------------------------------------------------------------------------------ # CSV/COCO Dataset Iterators # ------------------------------------------------------------------------------ class CategoriesMap: """ A dictionary-like object for intelligently mapping categories. The goal of this class is to remap user-specified categories for the compact one-hot encoding. Let's review a simple example. Assume, the original data has images that include objects belonging to the two categories: 15 and 20. If we do not remap these categories, then we need to create one-hot with length `max(15,20) + 1 = 21` (plus one is to allow one additional category "no object"). This creates unnecessary overhead during the model training. The most intuitive solution would be to remap the original categories to the following `{15: 1, 20: 2}`. In this case, the one-hot encoding length would be `max(1,2) + 1 = 3` . To initiate remapping the `selected_categories` argument should be defined. All selected category IDs will be sorted in ascending order with the consequent re-assignment to the new IDs. For example, let's assume the specified selected categories are `[12, 5, 3, 23]`, after sorting this list will have the following `[3, 5, 12, 23]` and the remapping `{3: 1, 5: 2, 12: 3, 23: 4}`. If the `selected_categories` argument is defined the following operation `map[ORIGINAL_ID]` returns `NEW_ID` (remapped category ID). If the `selected_categories` argument does not defined the following operation `map[ORIGINAL_ID]` returns `ORIGINAL_ID` (in other words its remaps ID to itself). Args: selected_categories (list): The list of categories to map. """ def __init__(self, selected_categories: list): self.__categories_mapping = {} if len(selected_categories) > 0: for new_category_id, old_category_id in enumerate( sorted(selected_categories) ): self.__categories_mapping[old_category_id] = ( new_category_id + 1 ) def __getitem__(self, category_id): """Returns the remapped category ID.""" if self.__categories_mapping: return self.__categories_mapping[category_id] else: return category_id def __contains__(self, category_id): """Tests if the specified category ID in the map.""" if self.__categories_mapping: return category_id in self.__categories_mapping else: return True class DatasetIterator: """ The base class for dataset records iterator. Args: records (list): The list with records to iterate. image_dir (Path): The base path for loading images. """ def __init__(self, records: list, image_dir: Path): self.__records = records self.__image_dir = image_dir self.__size = len(self.__records) self.__pointer = 0 def __iter__(self): """Returns the dataset records iterator.""" return self def __len__(self): """Returns a number of records in the dataset.""" return self.__size def __next__(self): """Returns the next record.""" if self.__pointer >= self.__size: raise StopIteration record = self.__records[self.__pointer] record["image"]["data"] = Image.open( self.__image_dir / record["image"]["file_name"] ) self.__pointer += 1 return record class CsvIterator(DatasetIterator): """ The CSV dataset iterator. Args: instance_file (Path): The path to the `csv`-file with records to iterate. selected_categories (list): The list of category IDs on which iteration should take place. If an image within a record does not contain a selected category it's skipped. If the selected category IDs do not define, then iterate goes via all images. """ def __init__(self, instance_file: Path, selected_categories: list): categories_map = CategoriesMap(selected_categories) categories = dict() with open(instance_file.parent / "categories.json") as fp: for category in json.load(fp)["categories"]: category_id = category["id"] if category_id in categories_map: # Remaps ald category ID to the new one. new_category = copy.deepcopy(category) new_category["id"] = categories_map[category["id"]] categories[new_category["id"]] = new_category records = [] with open(instance_file, newline="\n") as csv_fp: csv_reader = csv.DictReader(csv_fp, delimiter=",", quotechar='"') for row in csv_reader: annotations = [] for bbox, segmentation, category_id in zip( json.loads(row["bboxes"]), json.loads(row["segmentations"]), json.loads(row["category_ids"]), ): if category_id in categories_map: annotations.append( { "bbox": bbox, "iscrowd": 0, "segmentation": [segmentation], "category_id": categories_map[category_id], } ) # Here we discard all images which do not have any # annotations for the selected categories. if len(annotations) > 0: records.append( { "image": { "id": int(row["image_id"]), "file_name": row["file_name"], }, "annotations": annotations, "categories": categories, } ) super().__init__(records, instance_file.parent / "images") class CocoIterator(DatasetIterator): """ The COCO dataset iterator. Args: instance_file (Path): The path to the `json`-file with records to iterate. selected_categories (list): The list of category IDs on which iteration should take place. If an image within a record does not contain a selected category it's skipped. If the selected category IDs do not define, then iterate goes via all images. """ def __init__(self, instance_file: Path, selected_categories: list): categories_map = CategoriesMap(selected_categories) with open(instance_file) as f: content = json.load(f) annotations = dict() for annotation in content["annotations"]: category_id = annotation["category_id"] if category_id in categories_map: image_id = annotation["image_id"] if image_id not in annotations: annotations[image_id] = [] # Remaps ald category ID to the new one. new_annotation = copy.deepcopy(annotation) new_annotation["category_id"] = categories_map[category_id] annotations[image_id].append(new_annotation) categories = dict() for category in content["categories"]: category_id = category["id"] if category_id in categories_map: # Remaps ald category ID to the new one. new_category = copy.deepcopy(category) new_category["id"] = categories_map[category_id] categories[new_category["id"]] = new_category records = [] for image in content["images"]: if image["id"] in annotations: records.append( { "image": image, "annotations": annotations[image["id"]], "categories": categories, } ) super().__init__( records, instance_file.parent.parent / instance_file.stem ) # ------------------------------------------------------------------------------ # Dataset to TFRecords Transformer # ------------------------------------------------------------------------------ def instances_to_tfrecords( instance_file: Path, output_dir: Path, items: DatasetIterator, size: int, image_width: int, image_height: int, verbose: bool, ): """ Converse instances to tfrecords. Args: instance_file (Path): The path to the instance file to read data from. output_dir (Path): The path to the output directory to save generated TFRecords. items (DatasetIterator): The CSV or COCO dataset iterator. size (int): The number of images per partion. image_width (int): The TFRecords image width resize to. image_height (int): The TFRecords image height resize to. verbose (bool): The flag to set verbose mode. """ def get_example(item): image_id = item["image"]["id"] img = item["image"]["data"] annotations = item["annotations"] categories = item["categories"] category_max_id = max(list(categories.keys())) bboxes = [] segmentations = [] category_ids = [] for annotation in annotations: if annotation["iscrowd"] == 0: bboxes.append(annotation["bbox"]) segmentations.append(annotation["segmentation"][0]) category_ids.append(annotation["category_id"]) feature = items_to_features( image_id, img, image_width, image_height, bboxes, segmentations, category_ids, category_max_id, ) return tf.train.Example(features=tf.train.Features(feature=feature)) tfrecords_dir = output_dir / instance_file.stem tfrecords_dir.mkdir(exist_ok=True) # The TFRecords writer. writer = None # The index for the next TFRecords partition. part_index = -1 # The count of how many records stored in the TFRecords files. It # is set here to maximum capacity (as a trick) to make the "if" # condition in the loop equals to True and start 0 - partition. part_count = size # Initializes the progress bar of verbose mode is on. if verbose: pbar = tqdm(total=len(items)) for item in items: if item: if part_count >= size: # The current partition has been reached the maximum capacity, # so we need to start a new one. if writer is not None: # Closes the existing TFRecords writer. writer.close() part_index += 1 writer = tf.io.TFRecordWriter( str(tfrecords_dir / f"part-{part_index}.tfrecord") ) part_count = 0 example = get_example(item) if example: writer.write(example.SerializeToString()) part_count += 1 # Updates the progress bar of verbose mode is on. if verbose: pbar.update(1) # Closes the existing TFRecords writer after the last row. writer.close() def create_tfrecords( dataset_dir: str = DATASET_DIR, tfrecords_dir: str = None, size: int = TFRECORDS_SIZE, image_width: int = IMAGE_WIDTH, image_height: int = IMAGE_HEIGHT, selected_categories: list = [], verbose: bool = False, ): """ This function transforms CSV or COCO dataset to TFRecords. Args: dataset_dir (str): The path to the data set directory to transform. tfrecords_dir (str): The path to the output directory to save generated TFRecords. size (int): The number of images per partion. image_width (int): The TFRecords image width resize to. image_height (int): The TFRecords image height resize to. selected_categories (list): The list of selected category IDs. verbose (bool): The flag to set verbose mode. Raises: DirNotFoundError: If input or output directories do not exist. InvalidDatasetFormat: If the input dataset has invalid CSV or COCO format. """ input_dir = Path(dataset_dir) if not input_dir.exists(): raise DirNotFoundError("input dataset", input_dir) if tfrecords_dir is None: output_dir = input_dir.parent / (input_dir.name + "-tfrecords") else: output_dir = Path(tfrecords_dir) if not output_dir.parent.exists(): raise DirNotFoundError("parent (to output)", output_dir.parent) if output_dir.exists(): rmtree(output_dir) output_dir.mkdir(exist_ok=True) if is_csv_input(input_dir): for instance_file in input_dir.rglob("*.csv"): instances_to_tfrecords( instance_file, output_dir, CsvIterator(instance_file, selected_categories), size, image_width, image_height, verbose, ) elif is_coco_input(input_dir): for instance_file in (input_dir / "annotations").rglob("*.json"): instances_to_tfrecords( instance_file, output_dir, CocoIterator(instance_file, selected_categories), size, image_width, image_height, verbose, ) else: raise InvalidDatasetFormat()
2.84375
3
edexOsgi/com.raytheon.edex.plugin.gfe/utility/cave_static/user/GFETEST/gfe/userPython/smartTools/ExUtil1.py
srcarter3/awips2
0
12447
## # This software was developed and / or modified by Raytheon Company, # pursuant to Contract DG133W-05-CQ-1067 with the US Government. # # U.S. EXPORT CONTROLLED TECHNICAL DATA # This software product contains export-restricted data whose # export/transfer/disclosure is restricted by U.S. law. Dissemination # to non-U.S. persons whether in the United States or abroad requires # an export license or other authorization. # # Contractor Name: <NAME> # Contractor Address: 6825 Pine Street, Suite 340 # Mail Stop B8 # Omaha, NE 68106 # 402.291.0100 # # See the AWIPS II Master Rights File ("Master Rights File.pdf") for # further licensing information. ## # ---------------------------------------------------------------------------- # This software is in the public domain, furnished "as is", without technical # support, and with no warranty, express or implied, as to its usefulness for # any purpose. # # ExUtil1 # # Author: # ---------------------------------------------------------------------------- ToolType = "numeric" WeatherElementEdited = "T" from numpy import * import SmartScript import Common VariableList = [("Model:" , "", "D2D_model")] class Tool (SmartScript.SmartScript): def __init__(self, dbss): self._dbss = dbss SmartScript.SmartScript.__init__(self, dbss) def execute(self, GridTimeRange, Topo, varDict): "This tool accesses T grids directly" self._common = Common.Common(self._dbss) model = varDict["Model:"] # Convert Topo to meters topo_M = self._common._convertFtToM(Topo) # Make a sounding cubes for T # Height will increase in the sounding and be the # first dimension levels = ["MB1000","MB850", "MB700","MB500"] gh_Cube, t_Cube = self.makeNumericSounding( model, "t", levels, GridTimeRange) print "Cube shapes ", gh_Cube.shape, t_Cube.shape # Make an initial T grid with values of -200 # This is an out-of-range value to help us identify values that # have already been set. T = (Topo * 0) - 200 # Work "upward" in the cubes to assign T # We will only set the value once, i.e. the first time the # gh height is greater than the Topo # For each level for i in xrange(gh_Cube.shape[0]): # where ( gh > topo and T == -200), # set to t_Cube value, otherwise keep value already set)) T = where(logical_and(greater(gh_Cube[i], topo_M), equal(T,-200)), t_Cube[i], T) # Convert from K to F T_F = self.convertKtoF(T) return T_F
1.8125
2
venv/KryptoSkattScript/mining_income.py
odgaard/KryptoSkatt
0
12448
<reponame>odgaard/KryptoSkatt import pathlib import datetime path = 'c:/Users/Jacob/PycharmProjects/KryptoSkatt/Data/' trans_in = list() trans_out = list() bitcoin_dict = dict() ethereum_dict = dict() USD_NOK_dict = dict() def unix_time_to_date(timestamp): return datetime.datetime.fromtimestamp(int(timestamp)).strftime('%Y-%m-%d') def populate_electrum_wallet_transactions(file): # Exported data from Bitcoin wallet # All incoming transactions to my wallet with open(pathlib.PureWindowsPath(path + file), 'r') as f: total = 0 for line in f.read().split('\n'): new_line = line.split(',') if(len(new_line) > 3): if(new_line[3][0] == '+'): trans_in.append(new_line) if(new_line[3][0] == '-'): trans_out.append(new_line) def populate_bitcoin_price_index(file): # Gather Bitcoin price index from # https://blockchain.info/charts/market-price?timespan=2years with open(pathlib.PureWindowsPath(path + file), 'r') as f: for line in f.read().split('\n'): line = line.split(' ') final_line = [line[0], line[1].split(',')[1]] bitcoin_dict[final_line[0]] = float(final_line[1]) def populate_ethereum_price_index(file): # Gather Ethereum price index from # https://www.etherchain.org/charts/priceUSD with open(pathlib.PureWindowsPath(path + file), 'r') as f: for line in f.read().split('\n')[1:]: line = line.split(',') new_line = [line[0].replace('"',''), line[1]] # Max one entry per day date = new_line[0].split(' ')[0] price = float(new_line[1]) ethereum_dict[date] = float(price) def populate_USD_NOK_conversion(file): # Gather USD to NOK conversion from # https://data.norges-bank.no/api/data/EXR/B.USD.NOK.SP?StartPeriod=2017&EndPeriod=2018&format=csv-:-comma-true-flat with open(pathlib.PureWindowsPath(path + file), 'r') as f: for line in f.read().split('\n'): line = line.split(',') USD_NOK_dict[line[5]] = float(line[6]) def get_bitcoin_income(year, start_value): # Calculate total income based on data total_NOK, total_BTC = 0, 0 USD_NOK_exchange = start_value #First day of the year for trans in trans_in: date = trans[4].split(' ')[0] if(date.split('-')[0] != year): continue if(USD_NOK_dict.get(date)): USD_NOK_exchange = USD_NOK_dict[date] result = bitcoin_dict[date] * float(trans[3][1:]) * USD_NOK_exchange total_BTC += float(trans[3][1:]) total_NOK += result return total_NOK, total_BTC def get_ethereum_income(file, start_value): total = 0 # Gather mining data from ethermine (my mining pool) # https://ethermine.org/api/miner with open(pathlib.PureWindowsPath(path + file), 'r') as f: total_NOK, total_ETH = 0, 0 USD_NOK_exchange = start_value #First day of the year for line in f.read().split('\n')[1:]: new_line = [string.replace('"', '') for string in line.split(',')[1:]] amount = int(new_line[2])/10**18 ## Calculate income date = unix_time_to_date(new_line[4]) if(USD_NOK_dict.get(date)): USD_NOK_exchange = USD_NOK_dict[date] result = ethereum_dict[date]*amount*USD_NOK_exchange total_NOK += result total_ETH += amount return total_NOK, total_ETH def setup(): populate_USD_NOK_conversion('EXR.csv') populate_bitcoin_price_index('market-price-last-2-years.csv') populate_electrum_wallet_transactions('electrum-history.csv') populate_ethereum_price_index('ethereum-usd-price.csv') def main(): setup() # This should always be the last day of the year, or the first day of the next year. final_date_crypto = "2017-12-31" # This should always be the last working day for the Norwegian exchange, # or the first working day of the next year. final_date_USD_NOK = "2017-12-29" # The USD-NOK conversion for the first day of the year. # The Norwegian exchanges might be closes on this day, so it can't be computed directly start_value = 8.652 income_btc_in_NOK, total_BTC = get_bitcoin_income("2017", start_value) income_eth_in_NOK, total_ETH = get_ethereum_income('payouts.csv', start_value) income_in_NOK = income_btc_in_NOK + income_eth_in_NOK # Remove this if your using this script, this is a hard-coded compensation for my taxes. total_BTC-=0.11362 print("BTC Income:", round(income_btc_in_NOK, 2), "NOK") print("ETH Income:", round(income_eth_in_NOK, 2), "NOK") print("Total Income:", round(income_eth_in_NOK + income_btc_in_NOK), "NOK") print() capital_btc_in_NOK = USD_NOK_dict[final_date_USD_NOK]*bitcoin_dict[final_date_crypto]*total_BTC capital_eth_in_NOK = USD_NOK_dict[final_date_USD_NOK]*ethereum_dict[final_date_crypto]*total_ETH capital_in_NOK = capital_btc_in_NOK + capital_eth_in_NOK print("BTC Capital:") print(round(total_BTC, 6), "BTC") print(round(capital_btc_in_NOK, 2), "NOK") print() print("ETH Capital:") print(round(total_ETH, 4), "ETH") print(round(capital_eth_in_NOK, 2), "NOK") print() print("Total capital:", round(capital_in_NOK), "NOK") main()
2.8125
3
SymBOP_Analysis/ql_global.py
duttm/Octahedra_Nanoparticle_Project
0
12449
import numpy as np import scipy.special as ss import pathlib from Particle import Particle def ql_global(l, particles): # Keep only particles that have neighbors (this was changed 5/23/2020) particles = [i for i in particles if len(Particle.data[i].neighs)>0] neigh_total = sum([len(Particle.data[i].neighs) for i in particles]) if isinstance(l, int): if len(particles)!=0: # average slmbar weighted by the number of neighbors Qlmbar = list(sum([np.array(Particle.data[p].qlmbar[l], dtype=complex)*len(Particle.data[p].neighs)/neigh_total for p in particles])) Qlmtilde = list(sum([np.array(Particle.data[p].qlmtilde[l], dtype=complex)*len(Particle.data[p].neighs)/neigh_total for p in particles])) if l in Particle.qlmbar_ideal: Ql = np.abs(np.sqrt((4*np.pi/(2*l+1))*np.vdot(np.array(Qlmtilde, dtype=complex), np.array(Qlmtilde, dtype=complex)))) else: Qlmbar_mag_sq = np.abs(np.vdot(np.array(Qlmbar, dtype=complex), np.array(Qlmbar, dtype=complex))) Ql = np.abs(np.sqrt((4*np.pi/(2*l+1))*Qlmbar_mag_sq)) D = np.sqrt(Qlmbar_mag_sq) else: Qlmbar = [0]*(2*l+1) Qlmtilde = [0]*(2*l+1) Ql = 0.0 return [Ql, Qlmbar, Qlmtilde]
2.125
2
mycelium/__init__.py
suet-lee/mycelium
6
12450
from .switch import EKFSwitch, RelaySwitch, InitialModeSwitch from .camera_t265 import CameraT265 from .camera_d435 import CameraD435
1.046875
1
arsenal/sleep/openfaas/sleep-py/handler.py
nropatas/faasbenchmark
0
12451
import os import time import datetime def get_current_epoch(): return int((datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds() * 1000) def get_sleep_parameter(event): user_input = str(event.query["sleep"]) if not user_input or not user_input.isdigit() or int(user_input) < 0: return {"error": "invalid sleep parameter"} return int(user_input) def run_test(sleep_time): time.sleep(sleep_time / 1000.0) def is_warm(): is_warm = os.environ.get("warm") == "true" os.environ["warm"] = "true" return is_warm def handle(event): start = get_current_epoch() reused = is_warm() sleep_time = get_sleep_parameter(event) if type(sleep_time) != int: return { "statusCode": 200, "body": sleep_time } run_test(sleep_time) duration = (get_current_epoch() - start) * 1000000 return { "statusCode": 200, "body": { "duration": duration, "reused": reused } }
2.796875
3
lib/vapi_cli/users.py
nogayama/vision-tools
15
12452
<reponame>nogayama/vision-tools<filename>lib/vapi_cli/users.py #!/usr/bin/env python3 # IBM_PROLOG_BEGIN_TAG # # Copyright 2019,2020 IBM International Business Machines Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # IBM_PROLOG_END_TAG import logging as logger import sys import vapi import vapi_cli.cli_utils as cli_utils from vapi_cli.cli_utils import reportSuccess, reportApiError, translate_flags # All of Vision Tools requires python 3.6 due to format string # Make the check in a common location if sys.hexversion < 0x03060000: sys.exit("Python 3.6 or newer is required to run this program.") token_usage = """ Usage: users token --user=<user-name> --password=<password> Where: --user Required parameter containing the user login name --password Required parameter containing the user's password Gets an authentication token for the given user""" server = None # --- Token Operation ---------------------------------------------- def token(params): """ Handles getting an authentication token for a specific user""" user = params.get("--user", None) pw = params.get("--password", None) rsp = server.users.get_token(user, pw) if rsp is None or rsp.get("result", "fail") == "fail": reportApiError(server, f"Failed to get token for user '{user}'") else: reportSuccess(server, rsp["token"]) cmd_usage = f""" Usage: users {cli_utils.common_cmd_flags} <operation> [<args>...] Where: {cli_utils.common_cmd_flag_descriptions} <operation> is required and must be one of: token -- gets an authentication token for the given user Use 'users <operation> --help' for more information on a specific command.""" usage_stmt = { "usage": cmd_usage, "token": token_usage } operation_map = { "token": token } def main(params, cmd_flags=None): global server args = cli_utils.get_valid_input(usage_stmt, operation_map, argv=params, cmd_flags=cmd_flags) if args is not None: # When requesting a token, we need to ignore any existing token info if args.cmd_params["<operation>"] == "token": cli_utils.token = "" try: server = vapi.connect_to_server(cli_utils.host_name, cli_utils.token) except Exception as e: print("Error: Failed to setup server.", file=sys.stderr) logger.debug(e) return 1 args.operation(args.op_params) if __name__ == "__main__": main(None)
1.992188
2
smile_recognition.py
audreymychan/djsmile
5
12453
# This script loads the pre-trained scaler and models and contains the # predict_smile() function to take in an image and return smile predictions import joblib from tensorflow.keras.models import load_model from tensorflow.keras.preprocessing.image import img_to_array, array_to_img from PIL import Image import numpy as np # Set new frame size dimensions img_width, img_height = (100, 100) # Scaler and model imports scaler = joblib.load('./models/scaler.save') model = load_model('./models/my_model.h5') model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) def predict_smile(gray_img, box, count): """Make prediction on a new image whether a person is smiling or not. Parameters ---------- gray_img : numpy.ndarray of dtype int Grayscale image in numpy.ndarray of current frame. box : tuple (left, top, right, bottom) locating face bounding box in pixel locations. count : int Number of faces detected in current frame. Returns ------- numpy.ndarray of dtype float Probabilities of no smile (second number) and smile (first number). i.e. array([[0.972528 , 0.02747207]], dtype=float32) """ # Save a copy of current frame gray_img = gray_img.reshape(gray_img.shape+(1,)) # (height, width, 1) array_to_img(gray_img).save(f'./images/temp/current_frame_{count}.jpg') # Load image gray_img = Image.open(f'./images/temp/current_frame_{count}.jpg') # Crop face, resize to 100x100 pixels, and save a copy face_crop = gray_img.resize((img_width, img_height), box=box) face_crop.save(f'./images/temp/face_crop_current_frame_{count}.jpg') # Load image and convert to np.array face_crop = Image.open(f'./images/temp/face_crop_current_frame_{count}.jpg') new_face_array = np.array(img_to_array(face_crop)) # (100, 100, 1) # Reshape new_face_array = new_face_array.reshape(1, img_width*img_height) # (1, 10_000) # Transform with pre-trained scaler new_face_array = scaler.transform(new_face_array) new_face_array = new_face_array.reshape(1, img_width, img_height, 1) # (1, 100, 100, 1) return model.predict(new_face_array)
3.21875
3
comicstreamerlib/gui_qt.py
rlugojr/ComicStreamer
169
12454
import sys import webbrowser import os from comicstreamerlib.folders import AppFolders from PyQt4 import QtGui,QtCore class SystemTrayIcon(QtGui.QSystemTrayIcon): def __init__(self, icon, app): QtGui.QSystemTrayIcon.__init__(self, icon, None) self.app = app self.menu = QtGui.QMenu(None) exitAction = self.menu.addAction("Exit") self.setContextMenu(self.menu) exitAction.triggered.connect( self.quit ) def quit(self): QtCore.QCoreApplication.quit() class QtBasedGui(): def __init__(self, apiServer): self.apiServer = apiServer self.app = QtGui.QApplication(sys.argv) pixmap = QtGui.QPixmap(AppFolders.imagePath("trout.png")) icon = QtGui.QIcon( pixmap.scaled(16,16)) self.trayIcon = SystemTrayIcon(icon,self) self.trayIcon.show() def run(self): try: self.app.exec_() except KeyboardInterrupt: pass if __name__ == '__main__': QtGui().run()
2.28125
2
laserchicken/io/las_handler.py
eEcoLiDAR/eEcoLiDAR
0
12455
""" IO Handler for LAS (and compressed LAZ) file format """ import laspy import numpy as np from laserchicken import keys from laserchicken.io.base_io_handler import IOHandler from laserchicken.io.utils import convert_to_short_type, select_valid_attributes DEFAULT_LAS_ATTRIBUTES = { 'x', 'y', 'z', 'intensity', 'gps_time', 'raw_classification', } class LASHandler(IOHandler): """ Class for IO of point-cloud data in LAS file format """ def read(self, attributes=DEFAULT_LAS_ATTRIBUTES): """ Load the points from a LAS(LAZ) file into memory. :param attributes: list of attributes to read ('all' for all attributes in file) :return: point cloud data structure """ file = laspy.read(self.path) dtype = file.header.point_format.dtype() attributes_available = [el if el not in ['X', 'Y', 'Z'] else el.lower() for el in dtype.fields.keys()] attributes = select_valid_attributes(attributes_available, attributes) points = {} for name in attributes: if hasattr(file, name): file_data = getattr(file, name) data = np.zeros_like(file_data) data[:] = file_data points[name] = _get_attribute(data, data.dtype.name) return {keys.point: points} def write(self, point_cloud, attributes='all', file_version='1.2', point_format=3): """ Write point cloud to a LAS(LAZ) file. :param point_cloud: :param attributes: list of attributes to write ('all' for all attributes in point_cloud) :param file_version: :param point_format: :return: """ file = laspy.create(point_format=point_format, file_version=file_version) points = point_cloud[keys.point] attributes = select_valid_attributes([attr for attr in points.keys()], attributes) # NOTE: adding extra dims and assignment should be done in two steps, # some fields (e.g. raw_classification) are otherwise overwritten dtype = file.header.point_format.dtype() for attribute in attributes: data, type = _get_data_and_type(points[attribute]) type_short = convert_to_short_type(type) if attribute not in 'xyz': # x,y,z are not there but file methods can be used to convert coords to int4 if attribute not in dtype.fields: param = laspy.ExtraBytesParams(name=attribute, type=type) file.add_extra_dim(param) file_type_short = convert_to_short_type(getattr(file, attribute).dtype.name) if not file_type_short == type_short: raise TypeError('Data type in file does not match the one in point cloud: ' 'for {}, {} vs {}'.format(attribute, file_type_short, type_short)) for dim in 'xyz': data, _ = _get_data_and_type(points[dim]) setattr(file.header, '{}_offset'.format(dim), data.min()) setattr(file.header, '{}_scale'.format(dim), 0.001) for attribute in attributes: data, _ = _get_data_and_type(points[attribute]) if data.size == 0: raise ValueError('Cannot write empty point-cloud!') else: setattr(file, attribute, data) try: file.write(self.path) except ValueError as err: raise ValueError('Error in writing LAS file (file_version {}, point_format_id {}). ' 'laspy error below:\n{}'.format(file_version, point_format, err)) def _get_attribute(data, data_type): return {'type': data_type, 'data': data} def _get_data_and_type(attribute): return attribute['data'], attribute['type']
2.515625
3
prob.py
Y1fanHE/po_with_moead-levy
7
12456
<filename>prob.py import numpy as np import pandas as pd def read_file(prob_num): df = pd.read_csv("dat/port" + str(prob_num) + ".txt", header=None, delimiter="\s+", names=range(3)) # info on assets n = int(df[0][0]) # number of assets r = df[1: (n + 1)][0].values.reshape(n, 1) # mean of returns s = df[1: (n + 1)][1].values.reshape(n, 1) # std. of returns df = df.values c = np.zeros((n, n)) for it in np.arange(n,len(df)): i, j = int(df[it][0] - 1), int(df[it][1] - 1) c[i][j] = c[j][i] = df[it][2] # covariance between asset i, j return n, r, s, c def evaluate(x, r, s, c): M = - np.sum(np.dot(x.T, r)) # obj. 1: -1 * mean as return V = np.sum(np.dot(x, x.T) * np.dot(s, s.T) * c) # obj. 2: variance as risk return M, V def pf(prob_num): pf = np.genfromtxt("dat/portef" + str(prob_num) + ".txt") # points on pf M = [] V = [] for i in range(len(pf)): M += [pf[i][0]] V += [pf[i][1]] return M, V def set(instance): n, r, s, c = read_file(instance) lb, ub = np.zeros((n, 1)), np.ones((n, 1)) # upper and lower bounds port = evaluate mp, vp = pf(instance) return n, r, s, c, lb, ub, port, mp, vp
2.828125
3
nmr/testing_PyBMRB.py
jameshtwose/han_jms_collabs
0
12457
<reponame>jameshtwose/han_jms_collabs from pybmrb import Spectra, Histogram import plotly.io as pio pio.renderers.default = "browser" peak_list=Spectra.n15hsqc(bmrb_ids=15060, legend='residue') peak_list=Spectra.c13hsqc(bmrb_ids=15060, legend='residue') peak_list=Spectra.tocsy(bmrb_ids=15060, legend='residue')
1.945313
2
parser.py
sberczuk/powerschool-reporter
1
12458
<reponame>sberczuk/powerschool-reporter #!/usr/bin/env python3 import io import xml.etree.ElementTree as ET import argparse ns = {'ns1': 'http://www.sifinfo.org/infrastructure/2.x', 'ns2': 'http://stumo.transcriptcenter.com'} class StudentInfo: def __init__(self, first_name, middle_name, last_name, ): self.last_name = last_name self.middle_name = middle_name self.first_name = first_name class Grade: """A Wrapper for a single grade""" def __init__(self, year, grade_level, term, course_code, title, letter_grade, number_grade, comments, teacher_fn, teacher_ln, school_name): self.teacher_ln = teacher_ln self.teacher_fn = teacher_fn self.comments = comments self.number_grade = number_grade # Special case for the Pandemic special grading self.letter_grade = letter_grade if letter_grade is not None else "n/a" self.course_title = title self.course_code = course_code self.term = term self.year = year self.grade_level = grade_level self.school = school_name if (self.comments != None): self.comments = self.comments.strip() def __str__(self) -> str: return f"{self.year}-{self.term} (code: {self.course_code}) {self.course_title} {self.letter_grade}, {self.number_grade}, {self.comments}, {self.teacher_fn}, {self.teacher_ln} {self.school}" def pretty_print(self): return f"###{self.year}-{self.term} {self.school} {self.grade_level} (code: {self.course_code}) {self.course_title} | Instructor: {self.teacher_fn} {self.teacher_ln}\n*Letter Grade*: {self.letter_grade} | *Grade:* {self.number_grade}\nComments: {self.comments} " def print_description(self): return f"{self.year}-{self.term} {self.school} {self.grade_level} (code: {self.course_code}) {self.course_title}\nInstructor: {self.teacher_fn} {self.teacher_ln}\nLetter Grade: {self.letter_grade} \nGrade: {self.number_grade}\nComments:{self.comments} " def print_header(self): return f"{self.course_title} (code: {self.course_code}) {self.school}\n" def print_term_grade(self): return f"<em>{self.year}-{self.term} {self.grade_level}</em><br/>\n<b>{self.letter_grade}</b> / <b>{self.number_grade}</b>" def reporting_period(self): return f"{self.year}-{self.term}" def teacher_name(self): return f"{self.teacher_fn} {self.teacher_ln}" def format_comments(self): if (self.comments != None): return self.comments.replace('\n', ' ') else: return '' def process_course(course, year): title = course.find(".//ns1:CourseTitle", ns).text course_code = course.find(".//ns1:CourseCode", ns).text mark_data = course.find(".//ns1:MarkData", ns) grade_level = course.find(".//ns1:GradeLevelWhenTaken/ns1:Code", ns).text letter_grade = mark_data.find("ns1:Letter", ns).text number_grade = mark_data.find("ns1:Percentage", ns).text comments = mark_data.find("ns1:Narrative", ns).text # get extended info extended_info = course.find("ns1:SIF_ExtendedElements", ns) term = extended_info.find("ns1:SIF_ExtendedElement[@Name='StoreCode']", ns).text teacher_fn = extended_info.find("ns1:SIF_ExtendedElement[@Name='InstructorFirstName']", ns).text teacher_ln = extended_info.find("ns1:SIF_ExtendedElement[@Name='InstructorLastName']", ns).text school_name = extended_info.find("ns1:SIF_ExtendedElement[@Name='SchoolName']", ns).text return Grade(year, grade_level, term, course_code, title, letter_grade, number_grade, comments, teacher_fn, teacher_ln, school_name) # Placeholder for markdown format for a list of grades # Take the list and sort it with appropriate headers. # TBD if we need to figure pass in meta data, whether we figure it out, or if we make assumptions. def format_as_markdown(grades): pass def process_data(xmlDataFile): xml_string = extractValidXML(xmlDataFile) root = ET.fromstring(xml_string) ns_name = '{0}StudentDemographicRecord/{0}StudentPersonalData/{0}Name'.format('ns1:') name = root.find(ns_name, namespaces=ns) fn = name.find("ns1:FirstName", namespaces=ns).text mi = name.find("ns1:MiddleName", namespaces=ns).text ln = name.find("ns1:LastName", namespaces=ns).text (grades, years) = collect_grades(root) return (StudentInfo(fn, mi, ln), grades, years) def generate_year_report(student_info, year, grades_by_course, schools, terms): output = io.StringIO() # Write Report Card Header output.write(f"<h1> {student_info.first_name} {student_info.middle_name} {student_info.last_name}</h1>\n") output.write(f"<h1> {year}</h1>\n") for s in schools: output.write(f"<h2>{s}</h2>") for course in grades_by_course.keys(): output.write('<div class="course">\n') output.write(f"<h2>{headers_by_course.get(course)}</h2>") course_by_term = organize_by_term(grades_by_course[course]) grades_table = generate_grades_table(course_by_term, terms) output.write(grades_table) comments_table = generate_comments_table(course_by_term, terms) output.write(comments_table) output.write("</div>\n") return output.getvalue() def generate_grades_table(course_by_term, terms): term_headers = sorted(terms) with io.StringIO() as output: output.write("<table class='grades'>") output.write("<tr>") for th in term_headers: output.write(f"<th>{th}</th>") output.write("</tr>") output.write("<tr>") for th in term_headers: if (th in course_by_term): g = course_by_term[th] output.write(f"<td><em>{g.teacher_name()}</em><br/>\n{g.print_term_grade()}</td>") else: output.write(f"<td></td>") output.write("</tr>") output.write("</table>") return output.getvalue() def generate_comments_table(course_by_term, terms): term_headers = sorted(terms) with io.StringIO() as output: output.write("<table class='comments'>") output.write(f"<tr><th class='cbodyterm'>Term</th><th class='cbodytext'>Comments</th></tr>") for th in term_headers: output.write(f"<tr><td class='cbodyterm'>{th}</td>\n") if (th in course_by_term): g = course_by_term[th] if g.comments != None: output.write(f"<td class='cbodytext'>{g.format_comments()}</td>") else: output.write(f"<td class='cbodytext'></td>") else: output.write(f"<td class='cbodytext'></td>") output.write("</table>") return output.getvalue() def collect_grades(root): all_grades = [] all_years = [] findall = root.findall(".//ns1:Term", ns) for term in findall: year = term[0][0].text if year not in all_years: all_years.append(year) for courses in term.iter("{http://www.sifinfo.org/infrastructure/2.x}Courses"): for course in courses: grade = process_course(course, year) all_grades.append(grade) return (all_grades, all_years) def organize_by_term(grades): grade_list = sorted(grades, key=lambda gg: gg.term) grades_by_term = dict() for grade in grade_list: term = grade.term if term not in grades_by_term: grades_by_term[term] = [] grades_by_term[term] = grade return grades_by_term def organize_grades(all_grades): allCoursesByName = set() grades_by_course = dict() grades_by_period = dict() header_by_course = dict() for grade in all_grades: period = grade.reporting_period() allCoursesByName.add(grade.course_title) course_code = grade.course_code if course_code not in grades_by_course: grades_by_course[course_code] = [] if period not in grades_by_period: grades_by_period[period] = [] grades_by_period[period].append(grade) grades_by_course[course_code].append(grade) header_by_course[course_code] = grade.print_header() return (grades_by_course, grades_by_period, header_by_course) def extractValidXML(inFile): with open(inFile, 'r') as f: return parse_file(f) # concat all of the XML lines in the file, then return it # Skip all up to the start of the XML def parse_file(f): result = '' skip = True for line in f: if line.startswith('<?xml version="1.0" '): skip = False if not skip: # This is a known issue: last line being incomplete if (line.startswith('</StudentRec') and line != '</StudentRecordExchangeData>'): line = '</StudentRecordExchangeData>' result = result + line return result def generate_html_file(file_name, body_text): css_text = '' with open('reportCard.css') as css_file: css_text = css_file.read() with open(file_name, 'w') as f: f.write("<html>\n<head>\n") f.write(f"\n<style>{css_text}</style>\n") f.write("</head>\n<body>\n") f.write(body_text) f.write("\n</body>\n</html>") if __name__ == "__main__": import sys parser = argparse.ArgumentParser(description='Report Card Generator.') parser.add_argument('--output_basename', action='store', default='report_card', help='Output file to report results to (default: standard out)') # First arg is the data file parser.add_argument('data_file') args = parser.parse_args() basename = args.output_basename print("output = ", basename) print("parsing ", args.data_file) valid_xml = extractValidXML(args.data_file) (student_info, grades, years) = process_data(args.data_file) years.sort() for year in years: (grades_by_course, grades_by_period, headers_by_course) = organize_grades( [a for a in grades if (a.year == year)]) print("*******************", year, "***************") schools = [g.school for g in grades if (g.year == year)] terms = [g.term for g in grades if (g.year == year)] report_text = generate_year_report(student_info, year, grades_by_course, set(schools), set(terms)) file_name = f"{basename}-{year}.html" generate_html_file(file_name, report_text)
3.328125
3
eunite/eunite_data.py
jiasudemotuohe/deep_learning
0
12459
<filename>eunite/eunite_data.py # -*- coding: utf-8 -*- # @Time : 2020-04-11 12:34 # @Author : speeding_moto import numpy as np import pandas as pd from matplotlib import pyplot as plt EUNITE_PATH = "dataset/eunite.xlsx" PARSE_TABLE_NAME = "mainData" def load_eunite_data(): """ return the generated load data, include all the features wo handle """ data = open_file() X, Y = generate_features(data) return X.values, Y.values def load_eunite_train_data(): X, Y = load_eunite_data() trains_test_rate = int(len(X) * 0.7) train_x = X[0: trains_test_rate] train_y = Y[0: trains_test_rate] test_x = X[trains_test_rate:] test_y = Y[trains_test_rate:] return train_x, train_y, test_x, test_y def generate_features(df): """ parse the data, wo need to transfer the class number to ont_hot for our calculate later """ months = df["Month"] days = df["Day"] one_hot_months = cast_to_one_hot(months, n_classes=12) days = cast_to_one_hot(days, n_classes=31) one_hot_months = pd.DataFrame(one_hot_months) days = pd.DataFrame(days) df = pd.merge(left=df, right=one_hot_months, left_index=True, right_index=True) df = pd.merge(left=df, right=days, left_index=True, right_index=True) y = df['Max Load'] # think, maybe wo need to normalization the temperature data, temperature = normalization(df['Temp'].values) temperature = pd.DataFrame(temperature) df = pd.merge(left=df, right=temperature, left_index=True, right_index=True) drop_columns = ["ID", "Month", "Day", "Year", "Max Load", "Temp"] df.drop(drop_columns, axis=1, inplace=True) print(df[0:10], "\n", y[0]) return df, y def normalization(data): return (data - np.mean(data)) / np.max(np.abs(data)) def cast_to_one_hot(data, n_classes): """ cast the classifier data to one hot """ one_hot_months = np.eye(N=n_classes)[[data - 1]] return one_hot_months def show_month_temperature_load_image(df): plt.title("relation of temperature and load") max_load = df["Max Load"] temp = df['Temp'] * 15 plt.plot(max_load) plt.plot(temp) plt.xlabel('time') plt.annotate('temperature', xy=[200, 200], xytext=(300, 200)) plt.annotate('load', xy=[200, 600], xytext=(200, 800)) plt.show() def open_file(): """ open the eunite load excel file to return """ xlsx_file = pd.ExcelFile(EUNITE_PATH) return xlsx_file.parse(PARSE_TABLE_NAME) if __name__ == '__main__': df = open_file() show_month_temperature_load_image(df) x, y = load_eunite_data() print(x.shape)
3.40625
3
tests/test_fibsem.py
DeMarcoLab/piescope
4
12460
<filename>tests/test_fibsem.py import numpy as np import pytest from piescope.data.mocktypes import MockAdornedImage import piescope.fibsem autoscript = pytest.importorskip( "autoscript_sdb_microscope_client", reason="Autoscript is not available." ) try: from autoscript_sdb_microscope_client import SdbMicroscopeClient microscope = SdbMicroscopeClient() microscope.connect("localhost") except Exception as e: pytest.skip("AutoScript cannot connect to localhost, skipping all AutoScript tests.", allow_module_level=True) def test_initialize(): """Test connecting to the microscope offline with localhost.""" microscope = piescope.fibsem.initialize("localhost") @pytest.fixture def microscope(): from autoscript_sdb_microscope_client import SdbMicroscopeClient microscope = SdbMicroscopeClient() microscope.connect("localhost") return microscope @pytest.fixture def image(): image_array = np.random.random((10, 10)) return MockAdornedImage(image_array, pixelsize_x=1e-6, pixelsize_y=1e-6) def test_move_to_light_microscope(microscope): original_position = microscope.specimen.stage.current_position final_position = piescope.fibsem.move_to_light_microscope(microscope) assert np.isclose(final_position.x, original_position.x + 50e-3, atol=1e-7) assert np.isclose(final_position.y, original_position.y + 0.) assert np.isclose(final_position.z, original_position.z) assert np.isclose(final_position.r, original_position.r) assert np.isclose(final_position.t, original_position.t) def test_move_to_electron_microscope(microscope): original_position = microscope.specimen.stage.current_position final_position = piescope.fibsem.move_to_electron_microscope(microscope) assert np.isclose(final_position.x, original_position.x - 50e-3, atol=1e-7) assert np.isclose(final_position.y, original_position.y - 0.) assert np.isclose(final_position.z, original_position.z) assert np.isclose(final_position.r, original_position.r) assert np.isclose(final_position.t, original_position.t) def test_new_ion_image(microscope): result = piescope.fibsem.new_ion_image(microscope) assert microscope.imaging.get_active_view() == 2 assert result.data.shape == (884, 1024) def test_new_electron_image(microscope): result = piescope.fibsem.new_electron_image(microscope) assert microscope.imaging.get_active_view() == 1 assert result.data.shape == (884, 1024) def test_last_ion_image(microscope): result = piescope.fibsem.last_ion_image(microscope) assert microscope.imaging.get_active_view() == 2 assert result.data.shape == (884, 1024) def test_last_electron_image(microscope): result = piescope.fibsem.last_electron_image(microscope) assert microscope.imaging.get_active_view() == 1 assert result.data.shape == (884, 1024) def test_create_rectangular_pattern(microscope, image): x0 = 2 x1 = 8 y0 = 3 y1 = 7 depth = 1e-6 output = piescope.fibsem.create_rectangular_pattern( microscope, image, x0, x1, y0, y1, depth) expected_center_x = 0 expected_center_y = 0 expected_width = 6e-6 expected_height = 4e-6 assert np.isclose(output.center_x, expected_center_x) assert np.isclose(output.center_y, expected_center_y) assert np.isclose(output.width, expected_width) assert np.isclose(output.height, expected_height) assert np.isclose(output.depth, depth) # depth is unchanged assert np.isclose(output.rotation, 0) # no rotation by befault def test_empty_rectangular_pattern(microscope, image): x0 = None x1 = None y0 = 3 y1 = 7 depth = 1e-6 output = piescope.fibsem.create_rectangular_pattern( microscope, image, x0, x1, y0, y1, depth) assert output is None @pytest.mark.parametrize( "coord, expected_output", [ ([5, 5], [0, 0]), ([6, 5], [1e-6, 0]), ([5, 4], [0, 1e-6]), ([6, 4], [1e-6, 1e-6]), ([4, 6], [-1e-6, -1e-6]), ([4, 4], [-1e-6, 1e-6]), ([6, 6], [1e-6, -1e-6]), ], ) def test_pixel_to_realspace_coordinate(image, coord, expected_output): result = piescope.fibsem.pixel_to_realspace_coordinate(coord, image) assert np.allclose(np.array(result), np.array(expected_output)) def test_autocontrast(microscope): # This test checks autocontrast does not hit an error piescope.fibsem.autocontrast(microscope) @pytest.mark.parametrize( "resolution", [ ("1536x1024"), ("3072x2048"), ("6144x4096"), ("768x512"), ], ) def test_update_camera_settings(resolution): dwell_time = 1e-7 output = piescope.fibsem.update_camera_settings(dwell_time, resolution) assert output.dwell_time == dwell_time assert output.resolution == resolution
1.96875
2
newnew.py
jennycs005/Skyscraper-App
0
12461
<filename>newnew.py import streamlit as st import pandas as pd import matplotlib.pyplot as plt import csv import numpy as np import pydeck as pdk from PIL import Image def scatterplot(): skyscrapers_data = pd.read_csv("Skyscrapers2021.csv") completion_year_List = [] meters_List = [] for row in skyscrapers_data: # st.write(row) completion_year = pd.to_numeric(skyscrapers_data.COMPLETION) # print(completion_year) completion_year_List.append(completion_year) meters = skyscrapers_data.Meters.str.replace(r'\s+m', '').astype(float) meters_List.append(meters) plt.xlabel("Completion Year",fontsize=10) plt.ylabel("Meters",fontsize=10) plt.title("Height & Numbers along with completion year",fontsize=13) plt.scatter(completion_year_List, meters_List, alpha=0.3, marker=".", color="cornflowerblue") plt.show() #return plt #这个就是你之前的rank_map函数 我改名字为whole_mao(),为了和下面的rank_map()区分 def whole_map(): skyscrapers_data = pd.read_csv("Skyscrapers2021.csv") sky_df = pd.DataFrame(skyscrapers_data, columns=["RANK", "Latitude", "Longitude"]) sky_df.rename(columns={"Latitude": "lat", "Longitude": "lon"},inplace=True) st.map(sky_df) #新rank_map()函数,显示按照rank选择出来的大楼 def rank_map(select_rank): skyscrapers_data = pd.read_csv("Skyscrapers2021.csv") sky_df = pd.DataFrame(skyscrapers_data, columns=["RANK", "CITY", "Latitude", "Longitude"]) sky_df.rename(columns={"Latitude": "lat", "Longitude": "lon"}, inplace=True) select_rank_max = select_rank + 19 rank_df = sky_df[(sky_df['RANK'] >= select_rank) & (sky_df['RANK'] <= select_rank_max)] #在sidebar展示数据表格 rank_df_show = pd.DataFrame(rank_df, columns=['CITY', 'lon', 'lat']) st.sidebar.table(rank_df_show) #画出地图 st.pydeck_chart(pdk.Deck( map_style = 'mapbox://styles/mapbox/light-v9', initial_view_state=pdk.ViewState( latitude=rank_df['lat'].mean(), longitude=rank_df['lon'].mean(), zoom=1, pitch=0 ), layers = [ pdk.Layer( 'HexagonLayer', data=rank_df, get_position = '[lon, lat]', radius = 200000, elevation_scale = 10000, elevation_range = [400,1000], pickable = True, extruded = True, ), pdk.Layer( 'ScatterplotLayer', data=rank_df, get_position='[lon, lat]', get_color = '[200, 30, 0, 160]', get_radius = 200000, ), ], )) #按照选择年份画出平均高度的折线图 def average_height_line_chart(select_year): average_height_df =pd.DataFrame(columns=['Year', 'AverageHeight']) skyscrapers_data = pd.read_csv("Skyscrapers2021.csv") year_df = pd.DataFrame(skyscrapers_data, columns=["COMPLETION", "Meters"]) year_df.COMPLETION = pd.to_numeric(year_df.COMPLETION) year_df.Meters = year_df.Meters.str.replace(r'\s+m', '').astype(float) year_df = year_df[year_df['COMPLETION']<= select_year] year_df.sort_values(by = ['COMPLETION'], ascending = True, inplace= True) for year in range(1931, select_year):#之前你这写的是1931~1991,估计这是为什么只显示到1991吧 mean_height = (year_df[['Meters']][year_df['COMPLETION'] <= year]).mean() a = {'Year': year, 'AverageHeight': mean_height} average_height_df = average_height_df.append(a, ignore_index=True) plt.xlabel("Completion Year",fontsize=10) plt.ylabel("Average Height",fontsize=10) plt.title("Average Height along with completion year",fontsize=13) plt.plot(average_height_df.Year, average_height_df.AverageHeight) plt.show() def statisticchart(selection): if selection == "By Function": fp = open('Skyscrapers2021.csv', 'r') reader = csv.reader(fp) count = 0 d = {'office': 0, 'hotel': 0, 'residential': 0, 'hotel / office': 0, 'residential / office': 0, 'multifunction': 0} for row in reader: if count > 0: label = row[12] if label == 'office': d['office'] += 1 elif label == 'hotel': d['hotel'] += 1 elif label == 'residential': d['residential'] += 1 elif label == 'hotel / office': d['hotel / office'] += 1 elif label == 'residential / office': d['residential / office'] += 1 else: d['multifunction'] += 1 count += 1 label = [] values = [] for key in d: label.append(key) values.append(d[key]) EXPLODE_VALUE = 0.1 max_percentage = max(d.values()) max_percentage_index = values.index(max_percentage) explode_values = [0] * len(label) explode_values[max_percentage_index] = EXPLODE_VALUE colors = ["skyblue", "cadetblue", "cornflowerblue","powderblue","steelblue","lightslategray"] plt.pie(values, labels=label, colors=colors,explode=explode_values, autopct='%1.1f%%', startangle=90, textprops={'fontsize': 10}) plt.show() plt.rcParams.update({"font.size": 7}) plt.legend(loc="lower right", bbox_to_anchor=(1.5, 0)) plt.show() else: skyscrapers_data = pd.read_csv("Skyscrapers2021.csv") material_description = {} for i in skyscrapers_data["MATERIAL"]: if i in material_description: material_description[i] += 1 else: material_description[i] = 1 material_Percentage_Value = material_description.values() labels = material_description.keys() mfunction = [x for x in material_Percentage_Value] st.set_option('deprecation.showPyplotGlobalUse', False) EXPLODE_VALUE = 0.1 max_percentage = max(material_Percentage_Value) max_percentage_index = mfunction.index(max_percentage) explode_values = [0] * len(labels) explode_values[max_percentage_index] = EXPLODE_VALUE colors = ["tan", "peru", "orange", "gold"] plt.pie(mfunction, labels=labels, colors=colors, explode=explode_values, autopct='%1.1f%%', startangle=90, textprops={'fontsize': 10}) plt.legend(loc="lower right", bbox_to_anchor=(1.5, 0)) plt.show() return plt def main(): skyscrapers_data = pd.read_csv("Skyscrapers2021.csv") img = Image.open("photo.jpg") st.image(img, width=700) st.title("Top 100 Skyscrapers around the world!") if st.checkbox("Show DataFrame"): st.dataframe(skyscrapers_data, width=700, height=300) if st.checkbox("Show all 100 Skyscrapers in the map"): whole_map() # sidebar选择rank rank = st.sidebar.selectbox('Select rank:', ('1~20', '21~40', '41~60', '61~80', '81~100')) rank_list = {'1~20': 1, '21~40': 21, '41~60': 41, '61~80': 61, '81~100': 81} select_rank = rank_list[rank] # 用选好的年份画map st.write('Skyscrapers Rank ' + str(select_rank) + ' ~ ' + str(select_rank+19)) rank_map(select_rank) #去除一个不重要的警告信息 st.set_option('deprecation.showPyplotGlobalUse', False) st.pyplot(scatterplot()) # 插入pivot table skyscrapers_data = pd.read_csv("Skyscrapers2021.csv") skyscrapers_data.Meters = skyscrapers_data.Meters.str.replace(r'\s+m', '').astype(float) tt = pd.pivot_table(skyscrapers_data, index=['CITY', 'COMPLETION', 'MATERIAL'],values=['RANK', 'Meters']) st.dataframe(tt) # sidebar选择年份 select_year = st.sidebar.slider("Select years", 1931, 2020) # 用选好的年份画折线图 st.pyplot(average_height_line_chart(select_year)) selection = st.sidebar.selectbox("Select an option: ",("By Function", "By Material")) st.set_option('deprecation.showPyplotGlobalUse', False) st.pyplot(statisticchart(selection)) main()
3.796875
4
cifar_train.py
usumfabricae/sagemaker-multi-model-endpoint-tensorflow-computer-vision
4
12462
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.callbacks import ModelCheckpoint from tensorflow.keras.models import Sequential from tensorflow.keras.models import load_model from tensorflow.keras import utils import tensorflow as tf import numpy as np import argparse import logging import os # Set Log Level os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Seed for Reproducability SEED = 123 np.random.seed(SEED) tf.random.set_seed(SEED) # Setup Logger logger = logging.getLogger('sagemaker') logger.setLevel(logging.INFO) logger.addHandler(logging.StreamHandler()) def parse_args(): parser = argparse.ArgumentParser() # Hyperparameters sent by the client are passed as command-line arguments to the script parser.add_argument('--epochs', type=int, default=1) parser.add_argument('--data', type=str, default=os.environ.get('SM_CHANNEL_DATA')) parser.add_argument('--output', type=str, default=os.environ.get('SM_CHANNEL_OUTPUT')) parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN')) parser.add_argument('--val', type=str, default=os.environ.get('SM_CHANNEL_VAL')) parser.add_argument('--test', type=str, default=os.environ.get('SM_CHANNEL_TEST')) parser.add_argument('--model_dir', type=str, default=os.environ.get('SM_MODEL_DIR')) return parser.parse_known_args() def get_train_data(train_dir): X_train = np.load(os.path.join(train_dir, 'X_train.npy')) y_train = np.load(os.path.join(train_dir, 'y_train.npy')) logger.info(f'X_train: {X_train.shape} | y_train: {y_train.shape}') return X_train, y_train def get_validation_data(val_dir): X_validation = np.load(os.path.join(val_dir, 'X_validation.npy')) y_validation = np.load(os.path.join(val_dir, 'y_validation.npy')) logger.info(f'X_validation: {X_validation.shape} | y_validation: {y_validation.shape}') return X_validation, y_validation def get_test_data(test_dir): X_test = np.load(os.path.join(test_dir, 'X_test.npy')) y_test = np.load(os.path.join(test_dir, 'y_test.npy')) logger.info(f'X_test: {X_test.shape} | y_test: {y_test.shape}') return X_test, y_test if __name__ == '__main__': logger.info(f'[Using TensorFlow version: {tf.__version__}]') DEVICE = '/cpu:0' args, _ = parse_args() epochs = args.epochs # Load train, validation and test sets from S3 X_train, y_train = get_train_data(args.train) X_validation, y_validation = get_validation_data(args.val) X_test, y_test = get_test_data(args.test) with tf.device(DEVICE): # Data Augmentation TRAIN_BATCH_SIZE = 32 data_generator = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True) train_iterator = data_generator.flow(X_train, y_train, batch_size=TRAIN_BATCH_SIZE) # Define Model Architecture model = Sequential() # CONVOLUTIONAL LAYER 1 model.add(Conv2D(filters=16, kernel_size=2, padding='same', activation='relu', input_shape=(32, 32, 3))) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=2)) # CONVOLUTIONAL LAYER 1 model.add(Conv2D(filters=32, kernel_size=2, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=2)) # CONVOLUTIONAL LAYER 3 model.add(Conv2D(filters=64, kernel_size=2, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=2)) model.add(Dropout(0.3)) # FULLY CONNECTED LAYER model.add(Flatten()) model.add(Dense(500, activation='relu')) model.add(Dropout(0.4)) model.add(Dense(10, activation='softmax')) model.summary() # Compile Model model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # Train Model BATCH_SIZE = 32 STEPS_PER_EPOCH = int(X_train.shape[0]/TRAIN_BATCH_SIZE) model.fit(train_iterator, steps_per_epoch=STEPS_PER_EPOCH, batch_size=BATCH_SIZE, epochs=epochs, validation_data=(X_validation, y_validation), callbacks=[], verbose=2, shuffle=True) # Evaluate on Test Set result = model.evaluate(X_test, y_test, verbose=1) print(f'Test Accuracy: {result[1]}') # Save Model model.save(f'{args.model_dir}/1')
2.546875
3
corehq/form_processor/migrations/0049_case_attachment_props.py
kkrampa/commcare-hq
1
12463
<filename>corehq/form_processor/migrations/0049_case_attachment_props.py # -*- coding: utf-8 -*- from __future__ import unicode_literals from __future__ import absolute_import from django.db import models, migrations import jsonfield.fields class Migration(migrations.Migration): dependencies = [ ('form_processor', '0048_attachment_content_length_blob_id'), ] operations = [ migrations.AddField( model_name='xformattachmentsql', name='properties', field=jsonfield.fields.JSONField(default=dict), preserve_default=True, ), migrations.AddField( model_name='caseattachmentsql', name='attachment_from', field=models.TextField(null=True), preserve_default=True, ), migrations.AddField( model_name='caseattachmentsql', name='properties', field=jsonfield.fields.JSONField(default=dict), preserve_default=True, ), migrations.AddField( model_name='caseattachmentsql', name='attachment_src', field=models.TextField(null=True), preserve_default=True, ), migrations.AddField( model_name='caseattachmentsql', name='identifier', field=models.CharField(default='', max_length=255), preserve_default=False, ), ]
1.75
2
src/utils/es_async.py
karawallace/mygene
0
12464
<gh_stars>0 import re import json import tornado.web import tornado.httpclient tornado.httpclient.AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient") import tornadoes from utils.es import (ESQuery, ESQueryBuilder, MGQueryError, ElasticSearchException, ES_INDEX_NAME_ALL) from utils.dotfield import parse_dot_fields from config import ES_HOST class ESQueryAsync(ESQuery): es_connection = tornadoes.ESConnection(ES_HOST.split(':')[0]) es_connection.httprequest_kwargs = {'request_timeout': 120.} # increase default timeout from 20 to 120s def _search_async(self, q, species='all', callback=None): self._set_index(species) self.es_connection.search(callback=callback, index=self._index, type=self._doc_type, source=q) def _msearch_async(self, q, species='all', callback=None): self._set_index(species) path = '/'.join((self.es_connection.url, self._index, self._doc_type, '_msearch')) request_http = tornadoes.HTTPRequest(path, method="POST", body=q, **self.es_connection.httprequest_kwargs) self.es_connection.client.fetch(request=request_http, callback=callback) self._index = ES_INDEX_NAME_ALL # reset self._index def get_gene2(self, geneid, fields='all', **kwargs): '''for /gene/<geneid>''' callback = kwargs.pop('callback', None) is_async = callback is not None options = self._get_cleaned_query_options(fields, kwargs) qbdr = ESQueryBuilder(**options.kwargs) _q = qbdr.build_id_query(geneid, options.scopes) if options.rawquery: if is_async: callback(_q) return else: return _q if is_async: def inner_callback(response): res = json.loads(response.body) if not options.raw: res = self._cleaned_res(res, empty=None, single_hit=True, dotfield=options.dotfield) callback(res) self._search_async(_q, species=options.kwargs['species'], callback=inner_callback) return else: res = self._search(_q) if not options.raw: res = self._cleaned_res(res, empty=None, single_hit=True, dotfield=options.dotfield) return res def _normalize_msearch_res(self, res, geneid_list, options): assert len(res) == len(geneid_list) _res = [] for i in range(len(res)): hits = res[i] qterm = geneid_list[i] hits = self._cleaned_res(hits, empty=[], single_hit=False, dotfield=options.dotfield) if len(hits) == 0: _res.append({u'query': qterm, u'notfound': True}) elif 'error' in hits: _res.append({u'query': qterm, u'error': True}) else: for hit in hits: hit[u'query'] = qterm _res.append(hit) return _res def mget_gene2(self, geneid_list, fields=None, **kwargs): '''for /query post request''' callback = kwargs.pop('callback', None) is_async = callback is not None options = self._get_cleaned_query_options(fields, kwargs) qbdr = ESQueryBuilder(**options.kwargs) try: _q = qbdr.build_multiple_id_query(geneid_list, options.scopes) except MGQueryError as err: res = {'success': False, 'error': err.message} if is_async: callback(res) return else: return res if options.rawquery: if is_async: callback(_q) return else: return _q if is_async: def inner_callback(response): if response.code == 599 and response.body is None: res = {'success': False, 'error': 'timeout'} else: res = json.loads(response.body)['responses'] if not options.raw: res = self._normalize_msearch_res(res, geneid_list, options) callback(res) self._msearch_async(_q, species=kwargs['species'], callback=inner_callback) return else: res = self._msearch(_q, kwargs['species'])['responses'] return res if options.raw else self._normalize_msearch_res(res, geneid_list, options) @staticmethod def _normalize_query_res(res, options): if "error" in res: return {'success': False, 'error': "invalid query term."} _res = res['hits'] _res['took'] = res['took'] if "facets" in res: _res['facets'] = res['facets'] for v in _res['hits']: del v['_type'] del v['_index'] for attr in ['fields', '_source']: if attr in v: v.update(v[attr]) del v[attr] break if not options.dotfield: parse_dot_fields(v) res = _res return res def query(self, q, fields=None, **kwargs): '''for /query?q=<query>''' callback = kwargs.pop('callback', None) is_async = callback is not None options = self._get_cleaned_query_options(fields, kwargs) qbdr = ESQueryBuilder(**options.kwargs) q = re.sub(u'[\t\n\x0b\x0c\r\x00]+', ' ', q) q = q.strip() _q = None # Check if special interval query pattern exists interval_query = self._parse_interval_query(q) try: if interval_query: #should also passing a "taxid" along with interval. if qbdr.species != 'all': qbdr.species = [qbdr.species[0]] _q = qbdr.build_genomic_pos_query(**interval_query) else: res = {'success': False, 'error': 'genomic interval query cannot be combined with "species=all" parameter. Specify a single species.'} if is_async: callback(res) return else: return res # Check if fielded/boolean query, excluding special goid query # raw_string_query should be checked ahead of wildcard query, as raw_string may contain wildcard as well # e.g., a query "symbol:CDK?", should be treated as raw_string_query. elif self._is_raw_string_query(q) and not q.lower().startswith('go:'): _q = qbdr.build(q, mode=3) # raw string query elif self._is_wildcard_query(q): _q = qbdr.build(q, mode=2) # wildcard query else: # normal text query _q = qbdr.build(q, mode=1) except MGQueryError as err: res = {'success': False, 'error': err.message} if is_async: callback(res) return else: return res if _q: if options.rawquery: if is_async: callback(_q) return else: return _q if is_async: def inner_callback(response): res = json.loads(response.body) if not options.raw: res = self._normalize_query_res(res, options) callback(res) self._search_async(_q, species=kwargs['species'], callback=inner_callback) return else: try: res = self._search(_q, species=kwargs['species']) if not options.raw: res = self._normalize_query_res(res, options) except ElasticSearchException as err: err_msg = err.message if options.raw else "invalid query term." res = {'success': False, 'error': err_msg} else: res = {'success': False, 'error': "Invalid query. Please check parameters."} if is_async: callback(res) return else: return res
2.34375
2
processviz/test.py
jurgendn/processviz
0
12465
""" Thư viện này viết ra phục vụ cho môn học `Các mô hình ngẫu nhiên và ứng dụng` Sử dụng các thư viện `networkx, pandas, numpy, matplotlib` """ import networkx as nx import numpy as np import matplotlib.pyplot as plt from matplotlib.image import imread import pandas as pd def _gcd(a, b): if a == 0: return b return _gcd(b % a, a) def gcd(arr): if len(arr) == 0: return 0 if (len(arr) == 1): return arr[0] t = arr[0] for i in range(len(arr)): t = _gcd(t, arr[i]) return t class MarkovChain: """ Constructor function: Generate blank instance Có 2 cách để xích: - Nhập từ file csv: Sử dụng from_file - Nhập từ bàn phím: Sử dụng from_stdin """ def __init__(self): self.data = None self.state = None self.struct = None def from_stdin(self, state=None, data=None, pi=None): if state == None or data == None: return "Nothing is given" else: self.P = data self.pi = pi self.data = self.P self.state = state self.struct = self.__generate_struct__() def from_file(self, path='input.csv'): data = pd.read_csv(path) matrix = pd.DataFrame(data) data = matrix.values.tolist() self.pi = data[0] self.state = matrix.columns self.P = data[1:] self.data = self.P self.struct = self.__generate_struct__() """ Sinh ra cấu trúc của đồ thị Cấu trúc của đồ thị hiện tại như sau: ['đỉnh 1', 'đỉnh 2', '{'label':label}'] """ def __generate_struct__(self): struct = [] for i in range(len(self.data)): for j in range(len(self.data)): if self.data[i][j] > 0: struct.append([self.state[i], self.state[j], {'label': self.data[i][j]}]) return struct """ Sinh ma trận xác suất chuyển trạng thái của quá trình """ def matrix_at(self, n): self.data = np.matrix.round(np.linalg.matrix_power(self.P, n), 3) self.struct = self.__generate_struct__() """ Sinh đồ thị, đồ thị được lưu vào thư mục img """ def __get_state_vector__(self, n): self.matrix_at(n) self.state_vector = np.matmul(self.pi, self.data) def __get_state_track__(self, n): state = np.empty(shape=(len(self.pi), 1)) state = state.tolist() steps = [] for i in range(n): steps.append(i+1) self.__get_state_vector__(i) state.append(self.state_vector) state = np.transpose(state) return state.tolist(), steps def generate_state_graph(self, n): if self.pi == None: return "Not found origin state" else: state, steps = self.__get_state_track__(n) legend = self.state for i in range(len(self.pi)): plt.plot(steps, state[i][1:]) plt.legend(legend, loc='best') plt.title("Distribution state vector through time") plt.xlabel("Steps") plt.ylabel("Probability") plt.savefig('img/state_vector.svg', format='svg', dpi=1200) plt.show() def generate_graph(self, n=1): if self.state is None: return "Graph is empty. \n Nothing to show" else: self.matrix_at(n) self = nx.drawing.nx_agraph.to_agraph(nx.DiGraph(self.struct)) self.layout('dot') self.node_attr.update(color='red', height=0.5, width=0.5, fontname="Calibri", fontsize=10) self.edge_attr.update(color='blue', fontsize=8, fontname="Calibri", rotate=True) self.draw('img/Graph.svg') self.draw('img/Graph.png') img = imread('img/Graph.png') plt.axis("off") plt.imshow(img) def __convert_to_adjagecy__(self): adjagecy_vector = {i: [] for i in self.state} for i in range(len(self.P)): for j in range(len(self.P)): if self.P[i][j] != 0: adjagecy_vector[self.state[i]].append(self.state[j]) return adjagecy_vector def is_connected(self, source, target): vector = self.__convert_to_adjagecy__() visit_status = {i: False for i in self.state} queue = [] queue.append(source) while queue != []: current_state = queue[0] visit_status[current_state] = True queue.pop(0) for s in vector[current_state]: if target == s: return True if visit_status[s] == False: queue.append(s) return False # This part is unused -> comment for later use # ------------------------------------------ # def has_selfloop(self): # for i in range(len(self.P)): # if self.P[i][i] != 0: # return True # return False # def rank_test(self): # P = np.subtract(self.P, np.identity(len(self.P))) # if np.linalg.matrix_rank(P) == len(self.P): # return True # return False # ------------------------------------------- def is_regular(self): # Check is irreducible component = self.get_connected_component() if len(component) > 1: return False tmp = self.get_period(self.state[0]) if tmp == 1: return True return False # ---------------------------------------------------------- # Get period of a state # ---------------------------------------------------------- def __cycle_length__(self, source): vector = self.__convert_to_adjagecy__() visit_status = {i: False for i in self.state} step = 0 queue = [source] while queue != []: current_state = queue[0] visit_status[current_state] = True queue.pop(0) step += 1 for s in vector[current_state]: if s == source: return step if visit_status[s] == False: queue.append(s) return step def get_connected_component(self): connected_component = [[]] status = {i: False for i in self.state} while True: counter = 0 for i in self.state: for j in self.state: if (self.is_connected(i, j) and self.is_connected(j, i)): if status[i] == False: connected_component[counter].append(i) status[i] = True if status[j] == False: connected_component[counter].append(j) status[j] = True connected_component.append([]) counter += 1 if i == self.state[len(self.state) - 1] and j == self.state[len(self.state) - 1]: break connected_component = list(filter(None, connected_component)) return connected_component def get_period(self, target): component = self.get_connected_component() for sl in component: if target in sl: break t = [] if target not in sl: return 0 else: for i in sl: t.append(self.__cycle_length__(i)) return gcd(t) # ---------------------------------------------------- # Get steady state # ---------------------------------------------------- def get_steady_state(self): A = np.transpose(self.P) A = np.subtract(A, np.identity(len(A))) A = np.ndarray.tolist(A) A.append(np.ndarray.tolist(np.ones(len(A)))) b = np.ndarray.tolist(np.transpose(np.zeros(len(A)))) b[len(b)-1] = 1 # Calc return np.matmul(np.linalg.inv(np.matmul(np.transpose(A), A)), np.matmul(np.transpose(A), b)) # ---------------------------------------------------- # Get mean time spent # ---------------------------------------------------- def __get_index__(self, state_set): idx_list = [] tmp = list(self.state) try: for state in state_set: idx_list.append(tmp.index(state)) del tmp return idx_list except: return "State is not in the state set" def __get_absoring_state__(self): abr_state = [] for i in range((len(self.state))): if self.P[i][i] == 1: abr_state.append(self.state[i]) return abr_state def __get_mean_state_list__(self, state_set): tmp = list(self.state) tmp = [state for state in tmp if state not in rm_state] return tmp def __get_mean_time_absoring__(self): try: idx_list = self.__get_index__(self.__get_absoring_state__()) state_list = self.__get_mean_state_list__(target_set) P = self.data P = np.delete(P, idx_list, 0) P = np.delete(P, idx_list, 1) P = np.transpose(P) I = np.identity(len(P)) A = np.subtract(I, P) b = np.transpose(np.ones(len(P))) x = np.round(np.linalg.solve(A, b), 2) del idx_list, P, I, A, b mean_time = {"Mean time spent " + state: x_val for (state, x_val) in zip(state_list, x)} return mean_time except: return "Check your state or matrix" def __get_mean_time_transient__(self, source=None, target=None): idx_list = self.__get_index__(self.__get_absoring_state__()) P = self.data P = np.delete(P, idx_list, 0) P = np.delete(P, idx_list, 1) P = np.transpose(P) I = np.identity(len(P)) A = np.subtract(I, P) A = A.tolist() if source == None or target == None: return A
3.140625
3
src/check_results.py
jagwar/Sentiment-Analysis
0
12466
<filename>src/check_results.py<gh_stars>0 import os import json import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, TensorDataset, SequentialSampler from transformers import CamembertTokenizer, CamembertForSequenceClassification import pandas as pd from tqdm import tqdm, trange # tokenizer = CamembertTokenizer.from_pretrained('/home/crannou/workspace/sentiment-eai/data/36e8f471-821d-4270-be56-febb1be36c26') # model = CamembertForSequenceClassification.from_pretrained('/home/crannou/workspace/sentiment-eai/data/36e8f471-821d-4270-be56-febb1be36c26') # tokenizer = CamembertTokenizer.from_pretrained('/home/crannou/workspace/sentiment-eai/7a37b1e5-8e7b-45d1-9e87-7314e8e66c0c/') # model = CamembertForSequenceClassification.from_pretrained('/home/crannou/workspace/sentiment-eai/7a37b1e5-8e7b-45d1-9e87-7314e8e66c0c/') tokenizer = CamembertTokenizer.from_pretrained('/home/crannou/workspace/serving-preset-images/sentiment-analysis-fr/app/model_sources') model = CamembertForSequenceClassification.from_pretrained('/home/crannou/workspace/serving-preset-images/sentiment-analysis-fr/app/model_sources') def eval_model(): df = pd.read_csv('/home/crannou/notebooks/review_polarity_bin.csv', sep=';') preds = [] all_input_ids = [] all_attention_masks = [] df = df.sample(frac=0.1, random_state=42) all_labels = df['polarity'].values for sentence in df['review_content']: input_ids, attention_mask = get_features(sentence) all_input_ids.append(input_ids) all_attention_masks.append(attention_mask) t_inputs_ids = torch.tensor(all_input_ids, dtype=torch.long) t_attention_mask = torch.tensor(all_attention_masks, dtype=torch.long) t_labels = torch.tensor(all_labels, dtype=torch.long) dataset = TensorDataset(t_inputs_ids, t_attention_mask, t_labels) eval_sampler = SequentialSampler(dataset) eval_dataloader = DataLoader( dataset, sampler=eval_sampler, batch_size=32 ) model.eval() preds = None out_label_ids = None with torch.no_grad(): for batch in tqdm(eval_dataloader, desc="Evaluating"): batch = tuple(t.to("cpu") for t in batch) inputs = { "input_ids": batch[0], "attention_mask": batch[1], "labels": batch[2]} outputs = model(**inputs) _, logits = outputs[:2] if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs["labels"].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append( out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0) preds = np.argmax(preds, axis=1) result = {"acc": (preds == out_label_ids).mean()} print(result) def get_features(sentence): max_length=min(128, tokenizer.max_len) input_ids = tokenizer.encode( sentence, add_special_tokens=True, max_length=min(128, tokenizer.max_len), ) padding_length = max_length - len(input_ids) attention_mask = [1] * len(input_ids) input_ids = input_ids + ([0] * padding_length) attention_mask = attention_mask + ([0] * padding_length) return input_ids, attention_mask if __name__ == '__main__': eval_model()
1.898438
2
Pacotes/ex022.py
TonyRio/Python-Exercicios
0
12467
<filename>Pacotes/ex022.py<gh_stars>0 print (19 // 2 ) print( 19%2)
1.078125
1
uscampgrounds/models.py
adamfast/geodjango-uscampgrounds
1
12468
<reponame>adamfast/geodjango-uscampgrounds<filename>uscampgrounds/models.py from django.conf import settings from django.contrib.gis.db import models class Campground(models.Model): campground_code = models.CharField(max_length=64) name = models.CharField(max_length=256) campground_type = models.CharField(max_length=256) phone = models.CharField(max_length=256) comments = models.TextField() sites = models.CharField(max_length=256) elevation = models.CharField(max_length=256) hookups = models.CharField(max_length=256) amenities = models.TextField() point = models.PointField(srid=4326) def locator_point(self): return self.point def __unicode__(self): return self.name # integrate with the django-locator app for easy geo lookups if it's installed if 'locator.objects' in settings.INSTALLED_APPS: from locator.objects.models import create_locator_object models.signals.post_save.connect(create_locator_object, sender=Campground)
2.140625
2
blog/users/urls.py
simpleOnly1/blog
0
12469
#进行users 子应用的视图路由 from django.urls import path from users.views import RegisterView, ImageCodeView,SmsCodeView urlpatterns = [ #path的第一个参数:路由 #path的第二个函数:视图函数名 path('register/', RegisterView.as_view(),name='register'), #图片验证码的路由 path('imagecode/',ImageCodeView.as_view(),name='imagecode'), #短信发送 path('smscode/',SmsCodeView.as_view(),name='smscode'), ]
1.703125
2
src/config/fabric-ansible/ansible-playbooks/filter_plugins/import_lldp_info.py
EWERK-DIGITAL/tf-controller
0
12470
#!/usr/bin/python from builtins import object from builtins import str import sys import traceback sys.path.append("/opt/contrail/fabric_ansible_playbooks/module_utils") # noqa from filter_utils import _task_done, _task_error_log, _task_log, FilterLog from job_manager.job_utils import JobVncApi class FilterModule(object): def filters(self): return { 'import_lldp_info': self.import_lldp_info, } # end filters def _instantiate_filter_log_instance(self, device_name): FilterLog.instance("Import_lldp_info_Filter", device_name) # end _instantiate_filter_log_instance def import_lldp_info(self, job_ctx, prouter_fqname, prouter_vendor, lldp_neighbors_payload): """Topology discovery. :param job_ctx: Dictionary # example: # { # "auth_token": "<PASSWORD>", # "job_input": { # "fabric_fq_name": [ # "default-global-system-config", # "fab01" # ], # "device_auth": [{ # "username": "root", # "password": "<PASSWORD>" # }], # "management_subnets": [ # { # "cidr": "10.87.69.0/25", # "gateway": "10.87.69.1" # } # ], # "overlay_ibgp_asn": 64512, # "node_profiles": [ # { # "node_profile_name": "juniper-qfx5k" # } # ] # } # } :param prouter_fqname: List example: # [ # "default-global-system-config", # "5c3-qfx2" # ] :param prouter_vendor: String example: "juniper" :param lldp_neighbors_payload: Dictionary # example: # { # "neighbor_info_list": # [ # { # "local_physical_interface_name": "xe-0/0/0", # "remote_device_name": "5b5-qfx11", # "remote_physical_interface_port_id": "536" # }, # { # "local_physical_interface_name": "xe-0/0/2", # "remote_device_chassis_id": "00:1a:53:46:7b:9e", # "remote_physical_interface_port_id": "538" # } # ] # } :return: Dictionary # if success, returns # { # 'status': 'success', # 'topology_discovery_log': # <String: topology_discovery_log>, # 'topology_discovery_resp': # <Dictionary: topology_discovery_resp> # } # if failure, returns # { # 'status': 'failure', # 'error_msg': <String: exception message>, # 'topology_discovery_log': # <String: topology_discovery_log>, # 'topology_discovery_resp': # <Dictionary: topology_discovery_resp> # } :param topology_discovery_resp: Dictionary # example: # { # "lldp_neighbors_success_names": # <List: <String: lldp_neighbors_success_pair_string>>, # "lldp_neighbors_failed_info": # <List: <Dictionary: lldp_neighbor_failed_obj> > # } # :param lldp_neighbors_success_names: List # example: # ["bng-contrail-qfx51-15 : ge-0/0/36 --> dhawan : ge-2/3/1"] # :param lldp_neighbors_failed_info: List # example: # [ # { # "lldp_neighbor": # "bng-contrail-qfx51-15 : em0 --> sw174 : ge-1/0/46", # "warning_message": # "Unknown physical interface ng-contrail-qfx51-15:em0" # } # ] """ self._instantiate_filter_log_instance(prouter_fqname[-1]) _task_log("Starting Topology Discovery") try: _task_log("Creating neighboring links") topology_discovery_resp = self._create_neighbor_links( job_ctx, lldp_neighbors_payload, prouter_fqname, prouter_vendor) _task_done() return { 'status': 'success', 'topology_discovery_log': FilterLog.instance().dump(), 'topology_discovery_resp': topology_discovery_resp } except Exception as ex: _task_error_log(str(ex)) _task_error_log(traceback.format_exc()) return {'status': 'failure', 'error_msg': str(ex), 'topology_discovery_log': FilterLog.instance().dump()} # end import_lldp_info def get_vnc_payload(self, vnc_lib, prouter_fqname, prouter_vendor, lldp_neighbors_info): vnc_payload = [] chassis_id_device_name_map = self.get_chassis_id_to_device_name( vnc_lib, prouter_vendor) for lldp_neighbor_info in lldp_neighbors_info or []: local_phy_int = lldp_neighbor_info.get( 'local_physical_interface_name') phy_int_fqname = [] phy_int_fqname.extend(prouter_fqname) phy_int_fqname.append(local_phy_int.replace(":", "_")) remote_device_chassis_id = lldp_neighbor_info.get( 'remote_device_chassis_id') remote_device_name = chassis_id_device_name_map.get( remote_device_chassis_id) if not remote_device_name: remote_device_name = lldp_neighbor_info.get( 'remote_device_name') if remote_device_name: remote_phy_int_fqname_str = \ remote_device_name.replace( ":", "_") + ":" +\ lldp_neighbor_info.get( 'remote_physical_interface_port_id') vnc_payload.append((phy_int_fqname, remote_phy_int_fqname_str)) return vnc_payload # end get_vnc_payload # get chassis mac id to physical router name map # for all the physical routers in the fabric def get_chassis_id_to_device_name(self, vnc_lib, prouter_vendor): chassis_id_to_device_name_map = {} phy_routers_list = vnc_lib.physical_routers_list( fields=['device_chassis_refs']).get('physical-routers') for phy_router in phy_routers_list or []: if phy_router.get('device_chassis_refs'): device_chassis_id_info = phy_router.get( 'device_chassis_refs') for chassis_id_info in device_chassis_id_info or []: chassis_mac = chassis_id_info['to'][-1].split( prouter_vendor + '_')[1].replace('_', ':') chassis_id_to_device_name_map[chassis_mac] = \ phy_router['fq_name'][-1] return chassis_id_to_device_name_map # end get_chassis_id_to_device_name # group vnc functions def _create_neighbor_links(self, job_ctx, lldp_neighbors_payload, prouter_fqname, prouter_vendor): if not lldp_neighbors_payload.get('neighbor_info_list'): _task_log("No neighbors found") _task_done() return { 'lldp_neighbors_success_names': [], 'lldp_neighbors_failed_info': [] } vnc_lib = JobVncApi.vnc_init(job_ctx) vnc_topology_disc_payload = self.get_vnc_payload( vnc_lib, prouter_fqname, prouter_vendor, lldp_neighbors_payload['neighbor_info_list']) topology_disc_payload = self._do_further_parsing( vnc_lib, vnc_topology_disc_payload) _task_done("Parsed payload completely") _task_log("Creating links between neighboring physical interfaces") topology_discovery_resp = self._create_physical_interface_refs( vnc_lib, topology_disc_payload) return topology_discovery_resp # end _create_neighbor_links def _do_further_parsing(self, vnc_lib, neighbor_info_list): topology_disc_payload = [] for neighbor_info in neighbor_info_list or []: remote_neighbor_info = neighbor_info[1].split(":", 1) list_resp = vnc_lib.physical_interfaces_list( parent_fq_name=["default-global-system-config", remote_neighbor_info[0]], filters={"physical_interface_port_id": remote_neighbor_info[1]} ) if list_resp['physical-interfaces']: topology_disc_payload.append([neighbor_info[0], list_resp['physical-interfaces'] [0]['fq_name']]) return topology_disc_payload # end _do_further_parsing def _create_physical_interface_refs(self, vnc_lib, topology_disc_payload): # create or update refs between physical interfaces # on the local device to the remote device object_type = "physical_interface" lldp_neighbors_success_names = [] lldp_neighbors_failed_info = [] for topology_disc_info in topology_disc_payload or []: try: object_fqname = topology_disc_info[0] ref_fqname = topology_disc_info[1] pi_obj = vnc_lib.physical_interface_read(fq_name=object_fqname) # Check ref already present or not refs = pi_obj.get_physical_interface_refs() is_link_found = False if refs: for ref in refs: if ref['to'] == ref_fqname: is_link_found = True if not is_link_found: ref_uuid = vnc_lib.fq_name_to_id(object_type, ref_fqname) pi_obj.set_physical_interface_list([{"to": ref_fqname, "uuid": ref_uuid}]) vnc_lib.physical_interface_update(pi_obj) lldp_neighbors_success_names.append(object_fqname[-2] + " : " + object_fqname[-1] + " --> " + ref_fqname[-2] + " : " + ref_fqname[-1]) except Exception as ex: _task_error_log(str(ex)) _task_error_log(traceback.format_exc()) lldp_neighbor_failed_obj = { "lldp_neighbor": object_fqname[-2] + " : " + object_fqname[-1] + " --> " + ref_fqname[-2] + " : " + ref_fqname[-1], "warning_message": str(ex) } lldp_neighbors_failed_info.append(lldp_neighbor_failed_obj) return { 'lldp_neighbors_success_names': lldp_neighbors_success_names, 'lldp_neighbors_failed_info': lldp_neighbors_failed_info } # end _create_physical_interface_refs
1.96875
2
src/shared/_menu.py
MarcSkovMadsen/awesome-panel-starter
5
12471
"""Provides the MENU html string which is appended to all templates Please note that the MENU only works in [Fast](https://www.fast.design/) based templates. If you need some sort of custom MENU html string feel free to customize this code. """ from awesome_panel_extensions.frameworks.fast.fast_menu import to_menu from src.shared import config if config.applications: MENU = to_menu( config.applications.values(), accent_color=config.color_primary, expand=["Main"] ).replace("\n", "") else: MENU = ""
1.789063
2
prediction-api/app.py
BrokenImage/raptor-api
0
12472
<filename>prediction-api/app.py import os import boto3 import numpy as np import tensorflow as tf from flask import Flask from dotenv import load_dotenv from pymongo import MongoClient from keras.models import load_model from sklearn.preprocessing import LabelEncoder from werkzeug.datastructures import FileStorage from werkzeug.middleware.proxy_fix import ProxyFix from flask_restplus import Api, Resource from utils.Model import ModelManager load_dotenv() # Mongodb connection client = MongoClient(os.environ['MONGO_CLIENT_URL']) db = client.registry # AWS S3 connection session = boto3.Session( aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'], aws_secret_access_key=os.environ['AWS_SECRET_KEY'] ) s3 = session.resource('s3') # App and API setup app = Flask(__name__) app.wsgi_app = ProxyFix(app.wsgi_app) api = Api(app, version="1.0", title="Anomaly Detection", description="") ns = api.namespace('api') single_parser = api.parser() single_parser.add_argument("files", location="files", type=FileStorage, action='append', required=True) graph = tf.get_default_graph() backup_model = load_model("./models/backup/model.h5") backup_label_encoder = LabelEncoder() backup_label_encoder.classes_ = np.load("./models/backup/classes.npy") @ns.route("/classify") class MultiClassification(Resource): @api.doc(parser=single_parser, description='Upload an image of a solar panel') def post(self): model = ModelManager(db, s3, graph, backup_model, backup_label_encoder, bucket_name=os.environ['AWS_BUCKET_NAME']) model.load_latest_model() args = single_parser.parse_args() image_files = args.files preds = [] for image in image_files: image_array = model.preprocess(image) preds.append(model.predict(image_array)[0]) return {'prediction': str(preds)} if __name__ == "__main__": app.run(debug=True, host="0.0.0.0")
2.03125
2
misc/fill_blanks.py
netotz/codecamp
0
12473
<reponame>netotz/codecamp # Given an array containing None values fill in the None values with most recent # non None value in the array from random import random def generate_sample(n): rand = 0.9 while n: yield int(rand * 10) if rand % 1 > 1 / 3 else None rand = random() n -= 1 def fill1(array): for i in range(len(array)): if array[i] is None: array[i] = array[i - 1] return array def fill2(array): for i, num in enumerate(array): if num is None: array[i] = array[i - 1] return array test = list(map(int, input().split())) print(fill1(test)) print(fill2(test))
3.84375
4
beast/tests/helpers.py
marthaboyer/beast
0
12474
<gh_stars>0 # useful functions for BEAST tests # put here instead of having in every tests import os.path import numpy as np import h5py from astropy.io import fits from astropy.utils.data import download_file __all__ = ['download_rename', 'compare_tables', 'compare_fits', 'compare_hdf5'] def download_rename(filename): """Download a file and rename it to have the right extension. Otherwise, downloaded file will not have an extension at all and an extension is needed for the BEAST. Parameters ---------- filename : str name of file to download """ url_loc = 'http://www.stsci.edu/~kgordon/beast/' fname_dld = download_file('%s%s' % (url_loc, filename)) extension = filename.split('.')[-1] fname = '%s.%s' % (fname_dld, extension) os.rename(fname_dld, fname) return fname def compare_tables(table_cache, table_new): """ Compare two tables using astropy tables routines. Parameters ---------- table_cache : astropy table table_new : astropy table data for comparision. """ assert len(table_new) == len(table_cache) for tcolname in table_new.colnames: # test numerical types for closeness # and other types for equality if table_new[tcolname].data.dtype.kind in ['f', 'i']: np.testing.assert_allclose(table_new[tcolname], table_cache[tcolname], err_msg=('%s columns not equal' % tcolname)) else: np.testing.assert_equal(table_new[tcolname], table_cache[tcolname], err_msg=('%s columns not equal' % tcolname)) def compare_fits(fname_cache, fname_new): """ Compare two FITS files. Parameters ---------- fname_cache : str fname_new : type names to FITS files """ fits_cache = fits.open(fname_cache) fits_new = fits.open(fname_new) assert len(fits_new) == len(fits_cache) for k in range(1, len(fits_new)): qname = fits_new[k].header['EXTNAME'] np.testing.assert_allclose(fits_new[k].data, fits_cache[qname].data, err_msg=('%s FITS extension not equal' % qname)) def compare_hdf5(fname_cache, fname_new, ctype=None): """ Compare two hdf files. Parameters ---------- fname_cache : str fname_new : type names to hdf5 files ctype : str if set, string to identify the type of data being tested """ hdf_cache = h5py.File(fname_cache, 'r') hdf_new = h5py.File(fname_new, 'r') # go through the file and check if it is exactly the same for sname in hdf_cache.keys(): if isinstance(hdf_cache[sname], h5py.Dataset): cvalue = hdf_cache[sname] cvalue_new = hdf_new[sname] if ctype is not None: osname = '%s/%s' % (ctype, sname) else: osname = sname if cvalue.dtype.fields is None: np.testing.assert_allclose(cvalue.value, cvalue_new.value, err_msg='testing %s' % (osname), rtol=1e-6) else: for ckey in cvalue.dtype.fields.keys(): err_msg = 'testing %s/%s' % (osname, ckey) np.testing.assert_allclose(cvalue.value[ckey], cvalue_new.value[ckey], err_msg=err_msg, rtol=1e-5)
2.578125
3
sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations/_page_blob_operations.py
jalauzon-msft/azure-sdk-for-python
1
12475
<filename>sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations/_page_blob_operations.py # pylint: disable=too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import datetime from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse from azure.core.rest import HttpRequest from azure.core.tracing.decorator_async import distributed_trace_async from azure.core.utils import case_insensitive_dict from ... import models as _models from ..._vendor import _convert_request from ...operations._page_blob_operations import build_clear_pages_request, build_copy_incremental_request, build_create_request, build_get_page_ranges_diff_request, build_get_page_ranges_request, build_resize_request, build_update_sequence_number_request, build_upload_pages_from_url_request, build_upload_pages_request T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class PageBlobOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.storage.blob.aio.AzureBlobStorage`'s :attr:`page_blob` attribute. """ models = _models def __init__(self, *args, **kwargs) -> None: input_args = list(args) self._client = input_args.pop(0) if input_args else kwargs.pop("client") self._config = input_args.pop(0) if input_args else kwargs.pop("config") self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace_async async def create( # pylint: disable=inconsistent-return-statements self, content_length: int, blob_content_length: int, timeout: Optional[int] = None, tier: Optional[Union[str, "_models.PremiumPageBlobAccessTier"]] = None, metadata: Optional[Dict[str, str]] = None, blob_sequence_number: Optional[int] = 0, request_id_parameter: Optional[str] = None, blob_tags_string: Optional[str] = None, immutability_policy_expiry: Optional[datetime.datetime] = None, immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, legal_hold: Optional[bool] = None, blob_http_headers: Optional[_models.BlobHTTPHeaders] = None, lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, cpk_info: Optional[_models.CpkInfo] = None, cpk_scope_info: Optional[_models.CpkScopeInfo] = None, modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, **kwargs: Any ) -> None: """The Create operation creates a new page blob. :param content_length: The length of the request. :type content_length: long :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary. :type blob_content_length: long :param timeout: The timeout parameter is expressed in seconds. For more information, see :code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`. Default value is None. :type timeout: int :param tier: Optional. Indicates the tier to be set on the page blob. Default value is None. :type tier: str or ~azure.storage.blob.models.PremiumPageBlobAccessTier :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. Default value is None. :type metadata: dict[str, str] :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of the sequence number must be between 0 and 2^63 - 1. Default value is 0. :type blob_sequence_number: long :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. Default value is None. :type request_id_parameter: str :param blob_tags_string: Optional. Used to set blob tags in various blob operations. Default value is None. :type blob_tags_string: str :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy is set to expire. Default value is None. :type immutability_policy_expiry: ~datetime.datetime :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. Default value is None. :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode :param legal_hold: Specified if a legal hold should be set on the blob. Default value is None. :type legal_hold: bool :param blob_http_headers: Parameter group. Default value is None. :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders :param lease_access_conditions: Parameter group. Default value is None. :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions :param cpk_info: Parameter group. Default value is None. :type cpk_info: ~azure.storage.blob.models.CpkInfo :param cpk_scope_info: Parameter group. Default value is None. :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo :param modified_access_conditions: Parameter group. Default value is None. :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions :keyword blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Default value is "PageBlob". Note that overriding this default value may result in unsupported behavior. :paramtype blob_type: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {}) or {}) _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} blob_type = kwargs.pop('blob_type', _headers.pop('x-ms-blob-type', "PageBlob")) # type: str cls = kwargs.pop('cls', None) # type: ClsType[None] _blob_content_type = None _blob_content_encoding = None _blob_content_language = None _blob_content_md5 = None _blob_cache_control = None _lease_id = None _blob_content_disposition = None _encryption_key = None _encryption_key_sha256 = None _encryption_algorithm = None _encryption_scope = None _if_modified_since = None _if_unmodified_since = None _if_match = None _if_none_match = None _if_tags = None if blob_http_headers is not None: _blob_content_type = blob_http_headers.blob_content_type _blob_content_encoding = blob_http_headers.blob_content_encoding _blob_content_language = blob_http_headers.blob_content_language _blob_content_md5 = blob_http_headers.blob_content_md5 _blob_cache_control = blob_http_headers.blob_cache_control if lease_access_conditions is not None: _lease_id = lease_access_conditions.lease_id if blob_http_headers is not None: _blob_content_disposition = blob_http_headers.blob_content_disposition if cpk_info is not None: _encryption_key = cpk_info.encryption_key _encryption_key_sha256 = cpk_info.encryption_key_sha256 _encryption_algorithm = cpk_info.encryption_algorithm if cpk_scope_info is not None: _encryption_scope = cpk_scope_info.encryption_scope if modified_access_conditions is not None: _if_modified_since = modified_access_conditions.if_modified_since _if_unmodified_since = modified_access_conditions.if_unmodified_since _if_match = modified_access_conditions.if_match _if_none_match = modified_access_conditions.if_none_match _if_tags = modified_access_conditions.if_tags request = build_create_request( url=self._config.url, blob_type=blob_type, version=self._config.version, content_length=content_length, blob_content_length=blob_content_length, timeout=timeout, tier=tier, blob_content_type=_blob_content_type, blob_content_encoding=_blob_content_encoding, blob_content_language=_blob_content_language, blob_content_md5=_blob_content_md5, blob_cache_control=_blob_cache_control, metadata=metadata, lease_id=_lease_id, blob_content_disposition=_blob_content_disposition, encryption_key=_encryption_key, encryption_key_sha256=_encryption_key_sha256, encryption_algorithm=_encryption_algorithm, encryption_scope=_encryption_scope, if_modified_since=_if_modified_since, if_unmodified_since=_if_unmodified_since, if_match=_if_match, if_none_match=_if_none_match, if_tags=_if_tags, blob_sequence_number=blob_sequence_number, request_id_parameter=request_id_parameter, blob_tags_string=blob_tags_string, immutability_policy_expiry=immutability_policy_expiry, immutability_policy_mode=immutability_policy_mode, legal_hold=legal_hold, template_url=self.create.metadata['url'], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) raise HttpResponseError(response=response, model=error) response_headers = {} response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) if cls: return cls(pipeline_response, None, response_headers) create.metadata = {'url': "{url}/{containerName}/{blob}"} # type: ignore @distributed_trace_async async def upload_pages( # pylint: disable=inconsistent-return-statements self, content_length: int, body: IO, transactional_content_md5: Optional[bytearray] = None, transactional_content_crc64: Optional[bytearray] = None, timeout: Optional[int] = None, range: Optional[str] = None, request_id_parameter: Optional[str] = None, lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, cpk_info: Optional[_models.CpkInfo] = None, cpk_scope_info: Optional[_models.CpkScopeInfo] = None, sequence_number_access_conditions: Optional[_models.SequenceNumberAccessConditions] = None, modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, **kwargs: Any ) -> None: """The Upload Pages operation writes a range of pages to a page blob. :param content_length: The length of the request. :type content_length: long :param body: Initial data. :type body: IO :param transactional_content_md5: Specify the transactional md5 for the body, to be validated by the service. Default value is None. :type transactional_content_md5: bytearray :param transactional_content_crc64: Specify the transactional crc64 for the body, to be validated by the service. Default value is None. :type transactional_content_crc64: bytearray :param timeout: The timeout parameter is expressed in seconds. For more information, see :code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`. Default value is None. :type timeout: int :param range: Return only the bytes of the blob in the specified range. Default value is None. :type range: str :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. Default value is None. :type request_id_parameter: str :param lease_access_conditions: Parameter group. Default value is None. :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions :param cpk_info: Parameter group. Default value is None. :type cpk_info: ~azure.storage.blob.models.CpkInfo :param cpk_scope_info: Parameter group. Default value is None. :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo :param sequence_number_access_conditions: Parameter group. Default value is None. :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions :param modified_access_conditions: Parameter group. Default value is None. :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions :keyword comp: comp. Default value is "page". Note that overriding this default value may result in unsupported behavior. :paramtype comp: str :keyword page_write: Required. You may specify one of the following options: * Update: Writes the bytes specified by the request body into the specified range. The Range and Content-Length headers must match to perform the update. * Clear: Clears the specified range and releases the space used in storage for that range. To clear a range, set the Content-Length header to zero, and the Range header to a value that indicates the range to clear, up to maximum blob size. Default value is "update". Note that overriding this default value may result in unsupported behavior. :paramtype page_write: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {}) or {}) _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) comp = kwargs.pop('comp', _params.pop('comp', "page")) # type: str page_write = kwargs.pop('page_write', _headers.pop('x-ms-page-write', "update")) # type: str content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/octet-stream")) # type: Optional[str] cls = kwargs.pop('cls', None) # type: ClsType[None] _lease_id = None _encryption_key = None _encryption_key_sha256 = None _encryption_algorithm = None _encryption_scope = None _if_sequence_number_less_than_or_equal_to = None _if_sequence_number_less_than = None _if_sequence_number_equal_to = None _if_modified_since = None _if_unmodified_since = None _if_match = None _if_none_match = None _if_tags = None if lease_access_conditions is not None: _lease_id = lease_access_conditions.lease_id if cpk_info is not None: _encryption_key = cpk_info.encryption_key _encryption_key_sha256 = cpk_info.encryption_key_sha256 _encryption_algorithm = cpk_info.encryption_algorithm if cpk_scope_info is not None: _encryption_scope = cpk_scope_info.encryption_scope if sequence_number_access_conditions is not None: _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to if modified_access_conditions is not None: _if_modified_since = modified_access_conditions.if_modified_since _if_unmodified_since = modified_access_conditions.if_unmodified_since _if_match = modified_access_conditions.if_match _if_none_match = modified_access_conditions.if_none_match _if_tags = modified_access_conditions.if_tags _content = body request = build_upload_pages_request( url=self._config.url, comp=comp, page_write=page_write, version=self._config.version, content_type=content_type, content=_content, content_length=content_length, transactional_content_md5=transactional_content_md5, transactional_content_crc64=transactional_content_crc64, timeout=timeout, range=range, lease_id=_lease_id, encryption_key=_encryption_key, encryption_key_sha256=_encryption_key_sha256, encryption_algorithm=_encryption_algorithm, encryption_scope=_encryption_scope, if_sequence_number_less_than_or_equal_to=_if_sequence_number_less_than_or_equal_to, if_sequence_number_less_than=_if_sequence_number_less_than, if_sequence_number_equal_to=_if_sequence_number_equal_to, if_modified_since=_if_modified_since, if_unmodified_since=_if_unmodified_since, if_match=_if_match, if_none_match=_if_none_match, if_tags=_if_tags, request_id_parameter=request_id_parameter, template_url=self.upload_pages.metadata['url'], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) raise HttpResponseError(response=response, model=error) response_headers = {} response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) if cls: return cls(pipeline_response, None, response_headers) upload_pages.metadata = {'url': "{url}/{containerName}/{blob}"} # type: ignore @distributed_trace_async async def clear_pages( # pylint: disable=inconsistent-return-statements self, content_length: int, timeout: Optional[int] = None, range: Optional[str] = None, request_id_parameter: Optional[str] = None, lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, cpk_info: Optional[_models.CpkInfo] = None, cpk_scope_info: Optional[_models.CpkScopeInfo] = None, sequence_number_access_conditions: Optional[_models.SequenceNumberAccessConditions] = None, modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, **kwargs: Any ) -> None: """The Clear Pages operation clears a set of pages from a page blob. :param content_length: The length of the request. :type content_length: long :param timeout: The timeout parameter is expressed in seconds. For more information, see :code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`. Default value is None. :type timeout: int :param range: Return only the bytes of the blob in the specified range. Default value is None. :type range: str :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. Default value is None. :type request_id_parameter: str :param lease_access_conditions: Parameter group. Default value is None. :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions :param cpk_info: Parameter group. Default value is None. :type cpk_info: ~azure.storage.blob.models.CpkInfo :param cpk_scope_info: Parameter group. Default value is None. :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo :param sequence_number_access_conditions: Parameter group. Default value is None. :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions :param modified_access_conditions: Parameter group. Default value is None. :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions :keyword comp: comp. Default value is "page". Note that overriding this default value may result in unsupported behavior. :paramtype comp: str :keyword page_write: Required. You may specify one of the following options: * Update: Writes the bytes specified by the request body into the specified range. The Range and Content-Length headers must match to perform the update. * Clear: Clears the specified range and releases the space used in storage for that range. To clear a range, set the Content-Length header to zero, and the Range header to a value that indicates the range to clear, up to maximum blob size. Default value is "clear". Note that overriding this default value may result in unsupported behavior. :paramtype page_write: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {}) or {}) _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) comp = kwargs.pop('comp', _params.pop('comp', "page")) # type: str page_write = kwargs.pop('page_write', _headers.pop('x-ms-page-write', "clear")) # type: str cls = kwargs.pop('cls', None) # type: ClsType[None] _lease_id = None _encryption_key = None _encryption_key_sha256 = None _encryption_algorithm = None _encryption_scope = None _if_sequence_number_less_than_or_equal_to = None _if_sequence_number_less_than = None _if_sequence_number_equal_to = None _if_modified_since = None _if_unmodified_since = None _if_match = None _if_none_match = None _if_tags = None if lease_access_conditions is not None: _lease_id = lease_access_conditions.lease_id if cpk_info is not None: _encryption_key = cpk_info.encryption_key _encryption_key_sha256 = cpk_info.encryption_key_sha256 _encryption_algorithm = cpk_info.encryption_algorithm if cpk_scope_info is not None: _encryption_scope = cpk_scope_info.encryption_scope if sequence_number_access_conditions is not None: _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to if modified_access_conditions is not None: _if_modified_since = modified_access_conditions.if_modified_since _if_unmodified_since = modified_access_conditions.if_unmodified_since _if_match = modified_access_conditions.if_match _if_none_match = modified_access_conditions.if_none_match _if_tags = modified_access_conditions.if_tags request = build_clear_pages_request( url=self._config.url, comp=comp, page_write=page_write, version=self._config.version, content_length=content_length, timeout=timeout, range=range, lease_id=_lease_id, encryption_key=_encryption_key, encryption_key_sha256=_encryption_key_sha256, encryption_algorithm=_encryption_algorithm, encryption_scope=_encryption_scope, if_sequence_number_less_than_or_equal_to=_if_sequence_number_less_than_or_equal_to, if_sequence_number_less_than=_if_sequence_number_less_than, if_sequence_number_equal_to=_if_sequence_number_equal_to, if_modified_since=_if_modified_since, if_unmodified_since=_if_unmodified_since, if_match=_if_match, if_none_match=_if_none_match, if_tags=_if_tags, request_id_parameter=request_id_parameter, template_url=self.clear_pages.metadata['url'], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) raise HttpResponseError(response=response, model=error) response_headers = {} response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) if cls: return cls(pipeline_response, None, response_headers) clear_pages.metadata = {'url': "{url}/{containerName}/{blob}"} # type: ignore @distributed_trace_async async def upload_pages_from_url( # pylint: disable=inconsistent-return-statements self, source_url: str, source_range: str, content_length: int, range: str, source_content_md5: Optional[bytearray] = None, source_contentcrc64: Optional[bytearray] = None, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, copy_source_authorization: Optional[str] = None, cpk_info: Optional[_models.CpkInfo] = None, cpk_scope_info: Optional[_models.CpkScopeInfo] = None, lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, sequence_number_access_conditions: Optional[_models.SequenceNumberAccessConditions] = None, modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None, **kwargs: Any ) -> None: """The Upload Pages operation writes a range of pages to a page blob where the contents are read from a URL. :param source_url: Specify a URL to the copy source. :type source_url: str :param source_range: Bytes of source data in the specified range. The length of this range should match the ContentLength header and x-ms-range/Range destination range header. :type source_range: str :param content_length: The length of the request. :type content_length: long :param range: The range of bytes to which the source range would be written. The range should be 512 aligned and range-end is required. :type range: str :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read from the copy source. Default value is None. :type source_content_md5: bytearray :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be read from the copy source. Default value is None. :type source_contentcrc64: bytearray :param timeout: The timeout parameter is expressed in seconds. For more information, see :code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`. Default value is None. :type timeout: int :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. Default value is None. :type request_id_parameter: str :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. Default value is None. :type copy_source_authorization: str :param cpk_info: Parameter group. Default value is None. :type cpk_info: ~azure.storage.blob.models.CpkInfo :param cpk_scope_info: Parameter group. Default value is None. :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo :param lease_access_conditions: Parameter group. Default value is None. :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions :param sequence_number_access_conditions: Parameter group. Default value is None. :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions :param modified_access_conditions: Parameter group. Default value is None. :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions :param source_modified_access_conditions: Parameter group. Default value is None. :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions :keyword comp: comp. Default value is "page". Note that overriding this default value may result in unsupported behavior. :paramtype comp: str :keyword page_write: Required. You may specify one of the following options: * Update: Writes the bytes specified by the request body into the specified range. The Range and Content-Length headers must match to perform the update. * Clear: Clears the specified range and releases the space used in storage for that range. To clear a range, set the Content-Length header to zero, and the Range header to a value that indicates the range to clear, up to maximum blob size. Default value is "update". Note that overriding this default value may result in unsupported behavior. :paramtype page_write: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {}) or {}) _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) comp = kwargs.pop('comp', _params.pop('comp', "page")) # type: str page_write = kwargs.pop('page_write', _headers.pop('x-ms-page-write', "update")) # type: str cls = kwargs.pop('cls', None) # type: ClsType[None] _encryption_key = None _encryption_key_sha256 = None _encryption_algorithm = None _encryption_scope = None _lease_id = None _if_sequence_number_less_than_or_equal_to = None _if_sequence_number_less_than = None _if_sequence_number_equal_to = None _if_modified_since = None _if_unmodified_since = None _if_match = None _if_none_match = None _if_tags = None _source_if_modified_since = None _source_if_unmodified_since = None _source_if_match = None _source_if_none_match = None if cpk_info is not None: _encryption_key = cpk_info.encryption_key _encryption_key_sha256 = cpk_info.encryption_key_sha256 _encryption_algorithm = cpk_info.encryption_algorithm if cpk_scope_info is not None: _encryption_scope = cpk_scope_info.encryption_scope if lease_access_conditions is not None: _lease_id = lease_access_conditions.lease_id if sequence_number_access_conditions is not None: _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to if modified_access_conditions is not None: _if_modified_since = modified_access_conditions.if_modified_since _if_unmodified_since = modified_access_conditions.if_unmodified_since _if_match = modified_access_conditions.if_match _if_none_match = modified_access_conditions.if_none_match _if_tags = modified_access_conditions.if_tags if source_modified_access_conditions is not None: _source_if_modified_since = source_modified_access_conditions.source_if_modified_since _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since _source_if_match = source_modified_access_conditions.source_if_match _source_if_none_match = source_modified_access_conditions.source_if_none_match request = build_upload_pages_from_url_request( url=self._config.url, comp=comp, page_write=page_write, version=self._config.version, source_url=source_url, source_range=source_range, content_length=content_length, range=range, source_content_md5=source_content_md5, source_contentcrc64=source_contentcrc64, timeout=timeout, encryption_key=_encryption_key, encryption_key_sha256=_encryption_key_sha256, encryption_algorithm=_encryption_algorithm, encryption_scope=_encryption_scope, lease_id=_lease_id, if_sequence_number_less_than_or_equal_to=_if_sequence_number_less_than_or_equal_to, if_sequence_number_less_than=_if_sequence_number_less_than, if_sequence_number_equal_to=_if_sequence_number_equal_to, if_modified_since=_if_modified_since, if_unmodified_since=_if_unmodified_since, if_match=_if_match, if_none_match=_if_none_match, if_tags=_if_tags, source_if_modified_since=_source_if_modified_since, source_if_unmodified_since=_source_if_unmodified_since, source_if_match=_source_if_match, source_if_none_match=_source_if_none_match, request_id_parameter=request_id_parameter, copy_source_authorization=copy_source_authorization, template_url=self.upload_pages_from_url.metadata['url'], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) raise HttpResponseError(response=response, model=error) response_headers = {} response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) if cls: return cls(pipeline_response, None, response_headers) upload_pages_from_url.metadata = {'url': "{url}/{containerName}/{blob}"} # type: ignore @distributed_trace_async async def get_page_ranges( self, snapshot: Optional[str] = None, timeout: Optional[int] = None, range: Optional[str] = None, request_id_parameter: Optional[str] = None, marker: Optional[str] = None, maxresults: Optional[int] = None, lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, **kwargs: Any ) -> _models.PageList: """The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a page blob. :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see :code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>`. Default value is None. :type snapshot: str :param timeout: The timeout parameter is expressed in seconds. For more information, see :code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`. Default value is None. :type timeout: int :param range: Return only the bytes of the blob in the specified range. Default value is None. :type range: str :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. Default value is None. :type request_id_parameter: str :param marker: A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client. Default value is None. :type marker: str :param maxresults: Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or than the default of 5000. Default value is None. :type maxresults: int :param lease_access_conditions: Parameter group. Default value is None. :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions :param modified_access_conditions: Parameter group. Default value is None. :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions :keyword comp: comp. Default value is "pagelist". Note that overriding this default value may result in unsupported behavior. :paramtype comp: str :keyword callable cls: A custom type or function that will be passed the direct response :return: PageList, or the result of cls(response) :rtype: ~azure.storage.blob.models.PageList :raises: ~azure.core.exceptions.HttpResponseError """ error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) comp = kwargs.pop('comp', _params.pop('comp', "pagelist")) # type: str cls = kwargs.pop('cls', None) # type: ClsType[_models.PageList] _lease_id = None _if_modified_since = None _if_unmodified_since = None _if_match = None _if_none_match = None _if_tags = None if lease_access_conditions is not None: _lease_id = lease_access_conditions.lease_id if modified_access_conditions is not None: _if_modified_since = modified_access_conditions.if_modified_since _if_unmodified_since = modified_access_conditions.if_unmodified_since _if_match = modified_access_conditions.if_match _if_none_match = modified_access_conditions.if_none_match _if_tags = modified_access_conditions.if_tags request = build_get_page_ranges_request( url=self._config.url, comp=comp, version=self._config.version, snapshot=snapshot, timeout=timeout, range=range, lease_id=_lease_id, if_modified_since=_if_modified_since, if_unmodified_since=_if_unmodified_since, if_match=_if_match, if_none_match=_if_none_match, if_tags=_if_tags, request_id_parameter=request_id_parameter, marker=marker, maxresults=maxresults, template_url=self.get_page_ranges.metadata['url'], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) raise HttpResponseError(response=response, model=error) response_headers = {} response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) deserialized = self._deserialize('PageList', pipeline_response) if cls: return cls(pipeline_response, deserialized, response_headers) return deserialized get_page_ranges.metadata = {'url': "{url}/{containerName}/{blob}"} # type: ignore @distributed_trace_async async def get_page_ranges_diff( self, snapshot: Optional[str] = None, timeout: Optional[int] = None, prevsnapshot: Optional[str] = None, prev_snapshot_url: Optional[str] = None, range: Optional[str] = None, request_id_parameter: Optional[str] = None, marker: Optional[str] = None, maxresults: Optional[int] = None, lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, **kwargs: Any ) -> _models.PageList: """The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that were changed between target blob and previous snapshot. :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see :code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>`. Default value is None. :type snapshot: str :param timeout: The timeout parameter is expressed in seconds. For more information, see :code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`. Default value is None. :type timeout: int :param prevsnapshot: Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response will contain only pages that were changed between target blob and previous snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. Note that incremental snapshots are currently supported only for blobs created on or after January 1, 2016. Default value is None. :type prevsnapshot: str :param prev_snapshot_url: Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot of the target blob. The response will only contain pages that were changed between the target blob and its previous snapshot. Default value is None. :type prev_snapshot_url: str :param range: Return only the bytes of the blob in the specified range. Default value is None. :type range: str :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. Default value is None. :type request_id_parameter: str :param marker: A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client. Default value is None. :type marker: str :param maxresults: Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or than the default of 5000. Default value is None. :type maxresults: int :param lease_access_conditions: Parameter group. Default value is None. :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions :param modified_access_conditions: Parameter group. Default value is None. :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions :keyword comp: comp. Default value is "pagelist". Note that overriding this default value may result in unsupported behavior. :paramtype comp: str :keyword callable cls: A custom type or function that will be passed the direct response :return: PageList, or the result of cls(response) :rtype: ~azure.storage.blob.models.PageList :raises: ~azure.core.exceptions.HttpResponseError """ error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) comp = kwargs.pop('comp', _params.pop('comp', "pagelist")) # type: str cls = kwargs.pop('cls', None) # type: ClsType[_models.PageList] _lease_id = None _if_modified_since = None _if_unmodified_since = None _if_match = None _if_none_match = None _if_tags = None if lease_access_conditions is not None: _lease_id = lease_access_conditions.lease_id if modified_access_conditions is not None: _if_modified_since = modified_access_conditions.if_modified_since _if_unmodified_since = modified_access_conditions.if_unmodified_since _if_match = modified_access_conditions.if_match _if_none_match = modified_access_conditions.if_none_match _if_tags = modified_access_conditions.if_tags request = build_get_page_ranges_diff_request( url=self._config.url, comp=comp, version=self._config.version, snapshot=snapshot, timeout=timeout, prevsnapshot=prevsnapshot, prev_snapshot_url=prev_snapshot_url, range=range, lease_id=_lease_id, if_modified_since=_if_modified_since, if_unmodified_since=_if_unmodified_since, if_match=_if_match, if_none_match=_if_none_match, if_tags=_if_tags, request_id_parameter=request_id_parameter, marker=marker, maxresults=maxresults, template_url=self.get_page_ranges_diff.metadata['url'], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) raise HttpResponseError(response=response, model=error) response_headers = {} response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) deserialized = self._deserialize('PageList', pipeline_response) if cls: return cls(pipeline_response, deserialized, response_headers) return deserialized get_page_ranges_diff.metadata = {'url': "{url}/{containerName}/{blob}"} # type: ignore @distributed_trace_async async def resize( # pylint: disable=inconsistent-return-statements self, blob_content_length: int, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, cpk_info: Optional[_models.CpkInfo] = None, cpk_scope_info: Optional[_models.CpkScopeInfo] = None, modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, **kwargs: Any ) -> None: """Resize the Blob. :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary. :type blob_content_length: long :param timeout: The timeout parameter is expressed in seconds. For more information, see :code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`. Default value is None. :type timeout: int :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. Default value is None. :type request_id_parameter: str :param lease_access_conditions: Parameter group. Default value is None. :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions :param cpk_info: Parameter group. Default value is None. :type cpk_info: ~azure.storage.blob.models.CpkInfo :param cpk_scope_info: Parameter group. Default value is None. :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo :param modified_access_conditions: Parameter group. Default value is None. :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions :keyword comp: comp. Default value is "properties". Note that overriding this default value may result in unsupported behavior. :paramtype comp: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) comp = kwargs.pop('comp', _params.pop('comp', "properties")) # type: str cls = kwargs.pop('cls', None) # type: ClsType[None] _lease_id = None _encryption_key = None _encryption_key_sha256 = None _encryption_algorithm = None _encryption_scope = None _if_modified_since = None _if_unmodified_since = None _if_match = None _if_none_match = None _if_tags = None if lease_access_conditions is not None: _lease_id = lease_access_conditions.lease_id if cpk_info is not None: _encryption_key = cpk_info.encryption_key _encryption_key_sha256 = cpk_info.encryption_key_sha256 _encryption_algorithm = cpk_info.encryption_algorithm if cpk_scope_info is not None: _encryption_scope = cpk_scope_info.encryption_scope if modified_access_conditions is not None: _if_modified_since = modified_access_conditions.if_modified_since _if_unmodified_since = modified_access_conditions.if_unmodified_since _if_match = modified_access_conditions.if_match _if_none_match = modified_access_conditions.if_none_match _if_tags = modified_access_conditions.if_tags request = build_resize_request( url=self._config.url, comp=comp, version=self._config.version, blob_content_length=blob_content_length, timeout=timeout, lease_id=_lease_id, encryption_key=_encryption_key, encryption_key_sha256=_encryption_key_sha256, encryption_algorithm=_encryption_algorithm, encryption_scope=_encryption_scope, if_modified_since=_if_modified_since, if_unmodified_since=_if_unmodified_since, if_match=_if_match, if_none_match=_if_none_match, if_tags=_if_tags, request_id_parameter=request_id_parameter, template_url=self.resize.metadata['url'], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) raise HttpResponseError(response=response, model=error) response_headers = {} response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) if cls: return cls(pipeline_response, None, response_headers) resize.metadata = {'url': "{url}/{containerName}/{blob}"} # type: ignore @distributed_trace_async async def update_sequence_number( # pylint: disable=inconsistent-return-statements self, sequence_number_action: Union[str, "_models.SequenceNumberActionType"], timeout: Optional[int] = None, blob_sequence_number: Optional[int] = 0, request_id_parameter: Optional[str] = None, lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, **kwargs: Any ) -> None: """Update the sequence number of the blob. :param sequence_number_action: Required if the x-ms-blob-sequence-number header is set for the request. This property applies to page blobs only. This property indicates how the service should modify the blob's sequence number. :type sequence_number_action: str or ~azure.storage.blob.models.SequenceNumberActionType :param timeout: The timeout parameter is expressed in seconds. For more information, see :code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`. Default value is None. :type timeout: int :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of the sequence number must be between 0 and 2^63 - 1. Default value is 0. :type blob_sequence_number: long :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. Default value is None. :type request_id_parameter: str :param lease_access_conditions: Parameter group. Default value is None. :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions :param modified_access_conditions: Parameter group. Default value is None. :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions :keyword comp: comp. Default value is "properties". Note that overriding this default value may result in unsupported behavior. :paramtype comp: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) comp = kwargs.pop('comp', _params.pop('comp', "properties")) # type: str cls = kwargs.pop('cls', None) # type: ClsType[None] _lease_id = None _if_modified_since = None _if_unmodified_since = None _if_match = None _if_none_match = None _if_tags = None if lease_access_conditions is not None: _lease_id = lease_access_conditions.lease_id if modified_access_conditions is not None: _if_modified_since = modified_access_conditions.if_modified_since _if_unmodified_since = modified_access_conditions.if_unmodified_since _if_match = modified_access_conditions.if_match _if_none_match = modified_access_conditions.if_none_match _if_tags = modified_access_conditions.if_tags request = build_update_sequence_number_request( url=self._config.url, comp=comp, version=self._config.version, sequence_number_action=sequence_number_action, timeout=timeout, lease_id=_lease_id, if_modified_since=_if_modified_since, if_unmodified_since=_if_unmodified_since, if_match=_if_match, if_none_match=_if_none_match, if_tags=_if_tags, blob_sequence_number=blob_sequence_number, request_id_parameter=request_id_parameter, template_url=self.update_sequence_number.metadata['url'], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) raise HttpResponseError(response=response, model=error) response_headers = {} response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) if cls: return cls(pipeline_response, None, response_headers) update_sequence_number.metadata = {'url': "{url}/{containerName}/{blob}"} # type: ignore @distributed_trace_async async def copy_incremental( # pylint: disable=inconsistent-return-statements self, copy_source: str, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, **kwargs: Any ) -> None: """The Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. This API is supported since REST version 2016-05-31. :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob must either be public or must be authenticated via a shared access signature. :type copy_source: str :param timeout: The timeout parameter is expressed in seconds. For more information, see :code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`. Default value is None. :type timeout: int :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. Default value is None. :type request_id_parameter: str :param modified_access_conditions: Parameter group. Default value is None. :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions :keyword comp: comp. Default value is "incrementalcopy". Note that overriding this default value may result in unsupported behavior. :paramtype comp: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) comp = kwargs.pop('comp', _params.pop('comp', "incrementalcopy")) # type: str cls = kwargs.pop('cls', None) # type: ClsType[None] _if_modified_since = None _if_unmodified_since = None _if_match = None _if_none_match = None _if_tags = None if modified_access_conditions is not None: _if_modified_since = modified_access_conditions.if_modified_since _if_unmodified_since = modified_access_conditions.if_unmodified_since _if_match = modified_access_conditions.if_match _if_none_match = modified_access_conditions.if_none_match _if_tags = modified_access_conditions.if_tags request = build_copy_incremental_request( url=self._config.url, comp=comp, version=self._config.version, copy_source=copy_source, timeout=timeout, if_modified_since=_if_modified_since, if_unmodified_since=_if_unmodified_since, if_match=_if_match, if_none_match=_if_none_match, if_tags=_if_tags, request_id_parameter=request_id_parameter, template_url=self.copy_incremental.metadata['url'], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [202]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) raise HttpResponseError(response=response, model=error) response_headers = {} response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) if cls: return cls(pipeline_response, None, response_headers) copy_incremental.metadata = {'url': "{url}/{containerName}/{blob}"} # type: ignore
1.640625
2
Win_Source/ESP_Autostart.py
maschhoff/ESP32-433Mhz-Receiver-and-Tools
3
12476
# <NAME> <EMAIL> # The MIT License (MIT) # # Copyright (c) 2020 # # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import time from tkinter import * from tkinter import ttk from tkinter.messagebox import * import serial root=Tk() root.title("ESP Autostart Changer") err="" def serialOn(): global ser for port in range(3,9): comport="COM"+str(port)+":" try: ser = serial.Serial(port=comport,baudrate=115200) serialopen=True except Exception as e: #print ("error open serial port: " + str(e)) serialopen=False if serialopen == True: #ESPsend(chr(4)) ESPsend(chr(3)) time.sleep(1) if ser.inWaiting() != 0: ser.read() return (comport) else: serialopen=False return ("Error") def ESPsend(out): out+="\r\n" out=out.encode("utf-8") ser.write(out) time.sleep(0.1) def autooff(): if ser.isOpen() == False:start() ESPsend("import os") ESPsend("os.rename('main.py','mainxxx.py')") time.sleep(0.5) res="" while ser.inWaiting() != 0: a=ser.read() res+=a.decode("utf-8") pos=res.find("OSError") if pos==-1: hinweistxt="Autostart is off" else: hinweistxt="Autostart already off" hinweis.config(text=hinweistxt) stop() def autoon(): if ser.isOpen() == False:start() ESPsend("import os") ESPsend("os.rename('mainxxx.py','main.py')") res="" while ser.inWaiting() != 0: a=ser.read() res+=a.decode("utf-8") pos=res.find("OSError") if pos==-1: hinweistxt="Autostart is on" else: hinweistxt="Autostart already on" hinweis.config(text=hinweistxt) stop() def stop(): ser.close() def start(): while True: res="" err=serialOn() if err!="Error": statustxt="ESP connectet on: "+err status.config(text=statustxt) ESPsend("import os") ESPsend("os.listdir()") while ser.inWaiting() != 0: a=ser.read() res+=a.decode("utf-8") if "main.py" in res: hinweistxt="Autostart is on" else: hinweistxt="Autostart is off" hinweis.config(text=hinweistxt) break else: if askyesno("No ESP found!!! Try again?"): ser.close() pass else: exit() #---------------------------------------------------------------------------------- #---------- Witgets laden frameButton = Frame(root) frameButton.pack(fill='both') button2=Button(frameButton, text="Autostart ON ", command=autoon) button2.pack(side="right",padx="5",pady="2") button1=Button(frameButton, text="Autostart OFF ", command=autooff) button1.pack(side="right",padx="5") hinweis = Label(root, fg = "lightgreen",bg = "gray", font = "Verdana 10 bold" ) hinweis.pack(fill='both',padx="5",pady="2") hinweistxt="Change Autostart " hinweis.config(text=hinweistxt) status = Label(root) status.pack(fill='both',padx="5",pady="2") statustxt=" " status.config(text=statustxt) #------------------------------------------------------------------------------------ start() root.mainloop()
2.46875
2
app/core/apps.py
KarimTayie/djangoadmin-test
0
12477
<reponame>KarimTayie/djangoadmin-test from django.apps import AppConfig from django.contrib.admin.apps import AdminConfig class CoreConfig(AppConfig): name = "core" class AppAdminConfig(AdminConfig): default_site = "core.admin.AppAdmin"
1.507813
2
spike/compiler/Node.py
spikeio/lang
1
12478
class Node(object): # XXX: legacy code support kind = property(lambda self: self.__class__) def _iterChildren(self): for name in self.childAttrNames: yield (name, getattr(self, name)) return children = property(_iterChildren) def dump(self, stream, indent=0): # XXX: Expand argument lists? Show declspecs? (Ditto for # 'graphviz'.) for attr, child in self.children: print >>stream, "%s%s: %r" % (" " * indent, attr, child) if hasattr(child, 'dump'): child.dump(stream, indent + 1) return def graphviz(self, stream): print >>stream, ' n%d[label="%r"];' % (id(self), self) for attr, child in self.children: if child is None: pass elif hasattr(child, 'graphviz'): child.graphviz(stream) else: print >>stream, ' n%d[label="%r"];' % (id(child), child) print >>stream for attr, child in self.children: if child is None: continue print >>stream, ' n%d->n%d[label="%s"];' % ( id(self), id(child), attr) return
2.875
3
download_cifar100_teacher.py
valeoai/QuEST
3
12479
import os import urllib.request os.makedirs('saved_models', exist_ok=True) model_path = 'http://shape2prog.csail.mit.edu/repo/wrn_40_2_vanilla/ckpt_epoch_240.pth' model_dir = 'saved_models/wrn_40_2_vanilla' os.makedirs(model_dir, exist_ok=True) urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split('/')[-1])) print(f"Downloaded {model_path.split('repo/')[-1]} to saved_models/") model_path = 'http://shape2prog.csail.mit.edu/repo/resnet56_vanilla/ckpt_epoch_240.pth' model_dir = 'saved_models/resnet56_vanilla' os.makedirs(model_dir, exist_ok=True) urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split('/')[-1])) print(f"Downloaded {model_path.split('repo/')[-1]} to saved_models/") model_path = 'http://shape2prog.csail.mit.edu/repo/resnet110_vanilla/ckpt_epoch_240.pth' model_dir = 'saved_models/resnet110_vanilla' os.makedirs(model_dir, exist_ok=True) urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split('/')[-1])) print(f"Downloaded {model_path.split('repo/')[-1]} to saved_models/") model_path = 'http://shape2prog.csail.mit.edu/repo/resnet32x4_vanilla/ckpt_epoch_240.pth' model_dir = 'saved_models/resnet32x4_vanilla' os.makedirs(model_dir, exist_ok=True) urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split('/')[-1])) print(f"Downloaded {model_path.split('repo/')[-1]} to saved_models/") model_path = 'http://shape2prog.csail.mit.edu/repo/vgg13_vanilla/ckpt_epoch_240.pth' model_dir = 'saved_models/vgg13_vanilla' os.makedirs(model_dir, exist_ok=True) urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split('/')[-1])) print(f"Downloaded {model_path.split('repo/')[-1]} to saved_models/") model_path = 'http://shape2prog.csail.mit.edu/repo/ResNet50_vanilla/ckpt_epoch_240.pth' model_dir = 'saved_models/ResNet50_vanilla' os.makedirs(model_dir, exist_ok=True) urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split('/')[-1])) print(f"Downloaded {model_path.split('repo/')[-1]} to saved_models/")
2.828125
3
aliyunsdkcore/__init__.py
gikoluo/aliyun-python-sdk-core
0
12480
<gh_stars>0 __author__ = '<NAME>' __version__ = '2.3.3'
1.0625
1
leaderboard/scenarios/background_activity.py
casper-auto/leaderboard
68
12481
<filename>leaderboard/scenarios/background_activity.py #!/usr/bin/env python # # This work is licensed under the terms of the MIT license. # For a copy, see <https://opensource.org/licenses/MIT>. """ Scenario spawning elements to make the town dynamic and interesting """ import math from collections import OrderedDict import py_trees import numpy as np import carla from srunner.scenariomanager.carla_data_provider import CarlaDataProvider from srunner.scenariomanager.scenarioatomics.atomic_behaviors import AtomicBehavior from srunner.scenarios.basic_scenario import BasicScenario DEBUG_COLORS = { 'road': carla.Color(0, 0, 255), # Blue 'opposite': carla.Color(255, 0, 0), # Red 'junction': carla.Color(0, 0, 0), # Black 'entry': carla.Color(255, 255, 0), # Yellow 'exit': carla.Color(0, 255, 255), # Teal 'connect': carla.Color(0, 255, 0), # Green } DEBUG_TYPE = { 'small': [0.8, 0.1], 'medium': [0.5, 0.15], 'large': [0.2, 0.2], } def draw_string(world, location, string='', debug_type='road', persistent=False): """Utility function to draw debugging strings""" v_shift, _ = DEBUG_TYPE.get('small') l_shift = carla.Location(z=v_shift) color = DEBUG_COLORS.get(debug_type, 'road') life_time = 0.07 if not persistent else 100000 world.debug.draw_string(location + l_shift, string, False, color, life_time) def draw_point(world, location, point_type='small', debug_type='road', persistent=False): """Utility function to draw debugging points""" v_shift, size = DEBUG_TYPE.get(point_type, 'small') l_shift = carla.Location(z=v_shift) color = DEBUG_COLORS.get(debug_type, 'road') life_time = 0.07 if not persistent else 100000 world.debug.draw_point(location + l_shift, size, color, life_time) def get_same_dir_lanes(waypoint): """Gets all the lanes with the same direction of the road of a wp""" same_dir_wps = [waypoint] # Check roads on the right right_wp = waypoint while True: possible_right_wp = right_wp.get_right_lane() if possible_right_wp is None or possible_right_wp.lane_type != carla.LaneType.Driving: break right_wp = possible_right_wp same_dir_wps.append(right_wp) # Check roads on the left left_wp = waypoint while True: possible_left_wp = left_wp.get_left_lane() if possible_left_wp is None or possible_left_wp.lane_type != carla.LaneType.Driving: break if possible_left_wp.lane_id * left_wp.lane_id < 0: break left_wp = possible_left_wp same_dir_wps.append(left_wp) return same_dir_wps def get_opposite_dir_lanes(waypoint): """Gets all the lanes with opposite direction of the road of a wp""" other_dir_wps = [] other_dir_wp = None # Get the first lane of the opposite direction left_wp = waypoint while True: possible_left_wp = left_wp.get_left_lane() if possible_left_wp is None: break if possible_left_wp.lane_id * left_wp.lane_id < 0: other_dir_wp = possible_left_wp break left_wp = possible_left_wp if not other_dir_wp: return other_dir_wps # Check roads on the right right_wp = other_dir_wp while True: if right_wp.lane_type == carla.LaneType.Driving: other_dir_wps.append(right_wp) possible_right_wp = right_wp.get_right_lane() if possible_right_wp is None: break right_wp = possible_right_wp return other_dir_wps def get_lane_key(waypoint): """Returns a key corresponding to the waypoint lane. Equivalent to a 'Lane' object and used to compare waypoint lanes""" return '' if waypoint is None else get_road_key(waypoint) + '*' + str(waypoint.lane_id) def get_road_key(waypoint): """Returns a key corresponding to the waypoint road. Equivalent to a 'Road' object and used to compare waypoint roads""" return '' if waypoint is None else str(waypoint.road_id) class Source(object): """ Source object to store its position and its responsible actors """ def __init__(self, wp, actors, entry_lane_wp=''): self.wp = wp self.actors = actors # For road sources self.mapped_key = get_lane_key(wp) # For junction sources self.entry_lane_wp = entry_lane_wp self.previous_lane_keys = [] # Source lane and connecting lanes of the previous junction class Junction(object): """ Junction object. Stores its topology as well as its state, when active """ def __init__(self, junction, junction_id, route_entry_index=None, route_exit_index=None): # Topology self.junctions = [junction] self.id = junction_id self.route_entry_index = route_entry_index self.route_exit_index = route_exit_index self.exit_road_length = 0 self.route_entry_keys = [] self.route_exit_keys = [] self.opposite_entry_keys = [] self.opposite_exit_keys = [] self.entry_wps = [] self.exit_wps = [] self.entry_directions = {'ref': [], 'opposite': [], 'left': [], 'right': []} self.exit_directions = {'ref': [], 'opposite': [], 'left': [], 'right': []} # State self.entry_sources = [] self.exit_sources = [] self.exit_dict = OrderedDict() self.actor_dict = OrderedDict() self.scenario_info = { 'direction': None, 'remove_entries': False, 'remove_middle': False, 'remove_exits': False, } def contains(self, other_junction): """Checks whether or not a carla.Junction is part of the class""" other_id = other_junction.id for junction in self.junctions: if other_id == junction.id: return True return False class BackgroundActivity(BasicScenario): """ Implementation of a scenario to spawn a set of background actors, and to remove traffic jams in background traffic This is a single ego vehicle scenario """ def __init__(self, world, ego_vehicle, config, route, night_mode=False, debug_mode=False, timeout=0): """ Setup all relevant parameters and create scenario """ self._map = CarlaDataProvider.get_map() self.ego_vehicle = ego_vehicle self.route = route self.config = config self._night_mode = night_mode self.debug = debug_mode self.timeout = timeout # Timeout of scenario in seconds super(BackgroundActivity, self).__init__("BackgroundActivity", [ego_vehicle], config, world, debug_mode, terminate_on_failure=True, criteria_enable=True) def _create_behavior(self): """ Basic behavior do nothing, i.e. Idle """ # Check if a vehicle is further than X, destroy it if necessary and respawn it return BackgroundBehavior(self.ego_vehicle, self.route, self._night_mode) def _create_test_criteria(self): """ A list of all test criteria will be created that is later used in parallel behavior tree. """ pass def __del__(self): """ Remove all actors upon deletion """ pass class BackgroundBehavior(AtomicBehavior): """ Handles the background activity """ def __init__(self, ego_actor, route, night_mode=False, debug=False, name="BackgroundBehavior"): """ Setup class members """ super(BackgroundBehavior, self).__init__(name) self.debug = debug self._map = CarlaDataProvider.get_map() self._world = CarlaDataProvider.get_world() timestep = self._world.get_snapshot().timestamp.delta_seconds self._tm = CarlaDataProvider.get_client().get_trafficmanager( CarlaDataProvider.get_traffic_manager_port()) self._tm.global_percentage_speed_difference(0.0) self._night_mode = night_mode # Global variables self._ego_actor = ego_actor self._ego_state = 'road' self._route_index = 0 self._get_route_data(route) self._spawn_vertical_shift = 0.2 self._reuse_dist = 10 # When spawning actors, might reuse actors closer to this distance self._spawn_free_radius = 20 # Sources closer to the ego will not spawn actors self._fake_junction_ids = [] self._fake_lane_pair_keys = [] # Road variables self._road_actors = [] self._road_back_actors = {} # Dictionary mapping the actors behind the ego to their lane self._road_ego_key = None self._road_extra_front_actors = 0 self._road_sources = [] self._road_checker_index = 0 self._road_ego_key = "" self._road_front_vehicles = 3 # Amount of vehicles in front of the ego self._road_back_vehicles = 3 # Amount of vehicles behind the ego self._road_vehicle_dist = 8 # Distance road vehicles leave betweeen each other[m] self._road_spawn_dist = 11 # Initial distance between spawned road vehicles [m] self._road_new_sources_dist = 20 # Distance of the source to the start of the new lanes self._radius_increase_ratio = 1.8 # Meters the radius increases per m/s of the ego self._extra_radius = 0.0 # Extra distance to avoid the road behavior from blocking self._extra_radius_increase_ratio = 0.5 * timestep # Distance the radius increases per tick (0.5 m/s) self._max_extra_radius = 10 # Max extra distance self._base_min_radius = 0 self._base_max_radius = 0 self._min_radius = 0 self._max_radius = 0 self._junction_detection_dist = 0 self._get_road_radius() # Junction variables self._junctions = [] self._active_junctions = [] self._junction_sources_dist = 40 # Distance from the entry sources to the junction [m] self._junction_vehicle_dist = 8 # Distance junction vehicles leave betweeen each other[m] self._junction_spawn_dist = 10 # Initial distance between spawned junction vehicles [m] self._junction_sources_max_actors = 5 # Maximum vehicles alive at the same time per source # Opposite lane variables self._opposite_actors = [] self._opposite_sources = [] self._opposite_route_index = 0 self._opposite_removal_dist = 30 # Distance at which actors are destroyed self._opposite_sources_dist = 60 # Distance from the ego to the opposite sources [m] self._opposite_vehicle_dist = 10 # Distance opposite vehicles leave betweeen each other[m] self._opposite_spawn_dist = 20 # Initial distance between spawned opposite vehicles [m] self._opposite_sources_max_actors = 8 # Maximum vehicles alive at the same time per source # Scenario 2 variables self._is_scenario_2_active = False self._scenario_2_actors = [] self._activate_break_scenario = False self._break_duration = 7 # Duration of the scenario self._next_scenario_time = float('inf') # Scenario 4 variables self._is_scenario_4_active = False self._scenario_4_actors = [] self._ego_exitted_junction = False self._crossing_dist = None # Distance between the crossing object and the junction exit self._start_ego_wp = None # Junction scenario variables self.scenario_info = { 'direction': None, 'remove_entries': False, 'remove_middle': False, 'remove_exits': False, } # Same as the Junction.scenario_info, but this stores the data in case no junctions are active def _get_route_data(self, route): """Extract the information from the route""" self._route = [] # Transform the route into a list of waypoints self._accum_dist = [] # Save the total traveled distance for each waypoint prev_trans = None for trans, _ in route: self._route.append(self._map.get_waypoint(trans.location)) if prev_trans: dist = trans.location.distance(prev_trans.location) self._accum_dist.append(dist + self._accum_dist[-1]) else: self._accum_dist.append(0) prev_trans = trans self._route_length = len(route) self._route_index = 0 self._route_buffer = 3 def _get_road_radius(self): """ Computes the min and max radius of the road behaviorm which will determine the speed of the vehicles. Vehicles closer than the min radius maintain full speed, while those further than max radius are stopped. Between the two, the velocity decreases linearly""" self._base_min_radius = (self._road_front_vehicles + self._road_extra_front_actors) * self._road_spawn_dist self._base_max_radius = (self._road_front_vehicles + self._road_extra_front_actors + 1) * self._road_spawn_dist self._min_radius = self._base_min_radius self._max_radius = self._base_max_radius def initialise(self): """Creates the background activity actors. Pressuposes that the ego is at a road""" self._create_junction_dict() ego_wp = self._route[0] self._road_ego_key = get_lane_key(ego_wp) same_dir_wps = get_same_dir_lanes(ego_wp) self._initialise_road_behavior(same_dir_wps, ego_wp) self._initialise_opposite_sources() self._initialise_road_checker() def update(self): new_status = py_trees.common.Status.RUNNING prev_ego_index = self._route_index # Check if the TM destroyed an actor if self._route_index > 0: self._check_background_actors() # Get ego's odometry. For robustness, the closest route point will be used location = CarlaDataProvider.get_location(self._ego_actor) ego_wp = self._update_ego_route_location(location) ego_transform = ego_wp.transform if self.debug: string = 'EGO_' + self._ego_state[0].upper() draw_string(self._world, location, string, self._ego_state, False) # Parameters and scenarios self._update_parameters() self._manage_break_scenario() # Update ego state if self._ego_state == 'junction': self._monitor_ego_junction_exit(ego_wp) self._monitor_nearby_junctions() # Update_actors if self._ego_state == 'junction': self._monitor_ego_junction_exit(ego_wp) self._update_junction_actors() self._update_junction_sources() else: self._update_road_actors(prev_ego_index, self._route_index) self._move_road_checker(prev_ego_index, self._route_index) self._move_opposite_sources(prev_ego_index, self._route_index) self._update_opposite_sources() # Update non junction sources self._update_opposite_actors(ego_transform) self._update_road_sources(ego_transform.location) self._monitor_scenario_4_end(ego_transform.location) return new_status def terminate(self, new_status): """Destroy all actors""" all_actors = self._get_actors() for actor in list(all_actors): self._destroy_actor(actor) super(BackgroundBehavior, self).terminate(new_status) def _get_actors(self): """Returns a list of all actors part of the background activity""" actors = self._road_actors + self._opposite_actors for junction in self._active_junctions: actors.extend(list(junction.actor_dict)) return actors def _check_background_actors(self): """Checks if the Traffic Manager has removed a backgroudn actor""" background_actors = self._get_actors() alive_ids = [actor.id for actor in self._world.get_actors().filter('vehicle*')] for actor in background_actors: if actor.id not in alive_ids: self._remove_actor_info(actor) ################################ ## Junction cache ## ################################ def _create_junction_dict(self): """Extracts the junctions the ego vehicle will pass through.""" data = self._get_junctions_data() fake_data, filtered_data = self._filter_fake_junctions(data) self._get_fake_lane_pairs(fake_data) route_data = self._join_complex_junctions(filtered_data) self._add_junctions_topology(route_data) self._junctions = route_data def _get_junctions_data(self): """Gets all the junctions the ego passes through""" junction_data = [] junction_num = 0 start_index = 0 # Ignore the junction the ego spawns at for i in range(0, self._route_length - 1): if not self._is_junction(self._route[i]): start_index = i break for i in range(start_index, self._route_length - 1): next_wp = self._route[i+1] prev_junction = junction_data[-1] if len(junction_data) > 0 else None # Searching for the junction exit if prev_junction and prev_junction.route_exit_index is None: if not self._is_junction(next_wp) or next_wp.get_junction().id != junction_id: prev_junction.route_exit_index = i+1 # Searching for a junction elif self._is_junction(next_wp): junction_id = next_wp.get_junction().id if prev_junction: start_dist = self._accum_dist[i] prev_end_dist = self._accum_dist[prev_junction.route_exit_index] prev_junction.exit_road_length = start_dist - prev_end_dist # Same junction as the prev one and closer than 2 meters if prev_junction and prev_junction.junctions[-1].id == junction_id: start_dist = self._accum_dist[i] prev_end_dist = self._accum_dist[prev_junction.route_exit_index] distance = start_dist - prev_end_dist if distance < 2: prev_junction.junctions.append(next_wp.get_junction()) prev_junction.route_exit_index = None continue junction_data.append(Junction(next_wp.get_junction(), junction_num, i)) junction_num += 1 if len(junction_data) > 0: road_end_dist = self._accum_dist[self._route_length - 1] if junction_data[-1].route_exit_index: route_start_dist = self._accum_dist[junction_data[-1].route_exit_index] else: route_start_dist = self._accum_dist[self._route_length - 1] junction_data[-1].exit_road_length = road_end_dist - route_start_dist return junction_data def _filter_fake_junctions(self, data): """ Filters fake junctions. As a general note, a fake junction is that where no road lane divide in two. However, this might fail for some CARLA maps, so check junctions which have all lanes straight too """ fake_data = [] filtered_data = [] threshold = math.radians(15) for junction_data in data: used_entry_lanes = [] used_exit_lanes = [] for junction in junction_data.junctions: for entry_wp, exit_wp in junction.get_waypoints(carla.LaneType.Driving): entry_wp = self._get_junction_entry_wp(entry_wp) if not entry_wp: continue if get_lane_key(entry_wp) not in used_entry_lanes: used_entry_lanes.append(get_lane_key(entry_wp)) exit_wp = self._get_junction_exit_wp(exit_wp) if not exit_wp: continue if get_lane_key(exit_wp) not in used_exit_lanes: used_exit_lanes.append(get_lane_key(exit_wp)) if not used_entry_lanes and not used_exit_lanes: fake_data.append(junction_data) continue found_turn = False for entry_wp, exit_wp in junction_data.junctions[0].get_waypoints(carla.LaneType.Driving): entry_heading = entry_wp.transform.get_forward_vector() exit_heading = exit_wp.transform.get_forward_vector() dot = entry_heading.x * exit_heading.x + entry_heading.y * exit_heading.y if dot < math.cos(threshold): found_turn = True break if not found_turn: fake_data.append(junction_data) else: filtered_data.append(junction_data) return fake_data, filtered_data def _get_complex_junctions(self): """ Function to hardcode the topology of some complex junctions. This is done for the roundabouts, as the current API doesn't offer that info as well as others such as the gas station at Town04. If there are micro lanes between connected junctions, add them to the fake_lane_keys, connecting them when their topology is calculated """ complex_junctions = [] fake_lane_keys = [] if 'Town03' in self._map.name: # Roundabout, take it all as one complex_junctions.append([ self._map.get_waypoint_xodr(1100, -5, 16.6).get_junction(), self._map.get_waypoint_xodr(1624, -5, 25.3).get_junction(), self._map.get_waypoint_xodr(1655, -5, 8.3).get_junction(), self._map.get_waypoint_xodr(1772, 3, 16.2).get_junction(), self._map.get_waypoint_xodr(1206, -5, 5.9).get_junction()]) fake_lane_keys.extend([ ['37*-4', '36*-4'], ['36*-4', '37*-4'], ['37*-5', '36*-5'], ['36*-5', '37*-5'], ['38*-4', '12*-4'], ['12*-4', '38*-4'], ['38*-5', '12*-5'], ['12*-5', '38*-5']]) # Gas station complex_junctions.append([ self._map.get_waypoint_xodr(1031, -1, 11.3).get_junction(), self._map.get_waypoint_xodr(100, -1, 18.8).get_junction(), self._map.get_waypoint_xodr(1959, -1, 22.7).get_junction()]) fake_lane_keys.extend([ ['32*-2', '33*-2'], ['33*-2', '32*-2'], ['32*-1', '33*-1'], ['33*-1', '32*-1'], ['32*4', '33*4'], ['33*4', '32*4'], ['32*5', '33*5'], ['33*5', '32*5']]) elif 'Town04' in self._map.name: # Gas station complex_junctions.append([ self._map.get_waypoint_xodr(518, -1, 8.1).get_junction(), self._map.get_waypoint_xodr(886, 1, 10.11).get_junction(), self._map.get_waypoint_xodr(467, 1, 25.8).get_junction()]) self._fake_lane_pair_keys.extend(fake_lane_keys) return complex_junctions def _join_complex_junctions(self, filtered_data): """ Joins complex junctions into one. This makes it such that all the junctions, as well as their connecting lanes, are treated as the same junction """ route_data = [] prev_index = -1 # If entering a complex, add all its junctions to the list for junction_data in filtered_data: junction = junction_data.junctions[0] prev_junction = route_data[-1] if len(route_data) > 0 else None complex_junctions = self._get_complex_junctions() # Get the complex index current_index = -1 for i, complex_junctions in enumerate(complex_junctions): complex_ids = [j.id for j in complex_junctions] if junction.id in complex_ids: current_index = i break if current_index == -1: # Outside a complex, add it route_data.append(junction_data) elif current_index == prev_index: # Same complex as the previous junction prev_junction.route_exit_index = junction_data.route_exit_index else: # New complex, add it junction_ids = [j.id for j in junction_data.junctions] for complex_junction in complex_junctions: if complex_junction.id not in junction_ids: junction_data.junctions.append(complex_junction) route_data.append(junction_data) prev_index = current_index return route_data def _get_fake_lane_pairs(self, fake_data): """Gets a list of entry-exit lanes of the fake junctions""" for fake_junctions_data in fake_data: for junction in fake_junctions_data.junctions: for entry_wp, exit_wp in junction.get_waypoints(carla.LaneType.Driving): while self._is_junction(entry_wp): entry_wps = entry_wp.previous(0.5) if len(entry_wps) == 0: break # Stop when there's no prev entry_wp = entry_wps[0] if self._is_junction(entry_wp): continue # Triggered by the loops break while self._is_junction(exit_wp): exit_wps = exit_wp.next(0.5) if len(exit_wps) == 0: break # Stop when there's no prev exit_wp = exit_wps[0] if self._is_junction(exit_wp): continue # Triggered by the loops break self._fake_junction_ids.append(junction.id) self._fake_lane_pair_keys.append([get_lane_key(entry_wp), get_lane_key(exit_wp)]) def _get_junction_entry_wp(self, entry_wp): """For a junction waypoint, returns a waypoint outside of it that entrys into its lane""" # Exit the junction while self._is_junction(entry_wp): entry_wps = entry_wp.previous(0.2) if len(entry_wps) == 0: return None # Stop when there's no prev entry_wp = entry_wps[0] return entry_wp def _get_junction_exit_wp(self, exit_wp): """For a junction waypoint, returns a waypoint outside of it from which the lane exits the junction""" while self._is_junction(exit_wp): exit_wps = exit_wp.next(0.2) if len(exit_wps) == 0: return None # Stop when there's no prev exit_wp = exit_wps[0] return exit_wp def _get_closest_junction_waypoint(self, waypoint, junction_wps): """ Matches a given wp to another one inside the list. This is first done by checking its key, and if this fails, the closest wp is chosen """ # Check the lane keys junction_keys = [get_lane_key(waypoint_) for waypoint_ in junction_wps] if get_lane_key(waypoint) in junction_keys: return waypoint # Get the closest one closest_dist = float('inf') closest_junction_wp = None route_location = waypoint.transform.location for junction_wp in junction_wps: distance = junction_wp.transform.location.distance(route_location) if distance < closest_dist: closest_dist = distance closest_junction_wp = junction_wp return closest_junction_wp def _is_route_wp_behind_junction_wp(self, route_wp, junction_wp): """Checks if an actor is behind the ego. Uses the route transform""" route_location = route_wp.transform.location junction_transform = junction_wp.transform junction_heading = junction_transform.get_forward_vector() wps_vec = route_location - junction_transform.location if junction_heading.x * wps_vec.x + junction_heading.y * wps_vec.y < - 0.09: # 85º return True return False def _add_junctions_topology(self, route_data): """Gets the entering and exiting lanes of a multijunction""" for junction_data in route_data: used_entry_lanes = [] used_exit_lanes = [] entry_lane_wps = [] exit_lane_wps = [] if self.debug: print(' --------------------- ') for junction in junction_data.junctions: for entry_wp, exit_wp in junction.get_waypoints(carla.LaneType.Driving): entry_wp = self._get_junction_entry_wp(entry_wp) if not entry_wp: continue if get_lane_key(entry_wp) not in used_entry_lanes: used_entry_lanes.append(get_lane_key(entry_wp)) entry_lane_wps.append(entry_wp) if self.debug: draw_point(self._world, entry_wp.transform.location, 'small', 'entry', True) exit_wp = self._get_junction_exit_wp(exit_wp) if not exit_wp: continue if get_lane_key(exit_wp) not in used_exit_lanes: used_exit_lanes.append(get_lane_key(exit_wp)) exit_lane_wps.append(exit_wp) if self.debug: draw_point(self._world, exit_wp.transform.location, 'small', 'exit', True) # Check for connecting lanes. This is pretty much for the roundabouts, but some weird geometries # make it possible for single junctions to have the same road entering and exiting. Two cases, # Lanes that exit one junction and enter another (or viceversa) exit_lane_keys = [get_lane_key(wp) for wp in exit_lane_wps] entry_lane_keys = [get_lane_key(wp) for wp in entry_lane_wps] for wp in list(entry_lane_wps): if get_lane_key(wp) in exit_lane_keys: entry_lane_wps.remove(wp) if self.debug: draw_point(self._world, wp.transform.location, 'small', 'connect', True) for wp in list(exit_lane_wps): if get_lane_key(wp) in entry_lane_keys: exit_lane_wps.remove(wp) if self.debug: draw_point(self._world, wp.transform.location, 'small', 'connect', True) # Lanes with a fake junction in the middle (maps junction exit to fake junction entry and viceversa) for entry_key, exit_key in self._fake_lane_pair_keys: entry_wp = None for wp in entry_lane_wps: if get_lane_key(wp) == exit_key: # A junction exit is a fake junction entry entry_wp = wp break exit_wp = None for wp in exit_lane_wps: if get_lane_key(wp) == entry_key: # A junction entry is a fake junction exit exit_wp = wp break if entry_wp and exit_wp: entry_lane_wps.remove(entry_wp) exit_lane_wps.remove(exit_wp) if self.debug: draw_point(self._world, entry_wp.transform.location, 'small', 'connect', True) draw_point(self._world, exit_wp.transform.location, 'small', 'connect', True) junction_data.entry_wps = entry_lane_wps junction_data.exit_wps = exit_lane_wps # Filter the entries and exits that correspond to the route route_entry_wp = self._route[junction_data.route_entry_index] # Junction entry for wp in get_same_dir_lanes(route_entry_wp): junction_wp = self._get_closest_junction_waypoint(wp, entry_lane_wps) junction_data.route_entry_keys.append(get_lane_key(junction_wp)) for wp in get_opposite_dir_lanes(route_entry_wp): junction_wp = self._get_closest_junction_waypoint(wp, exit_lane_wps) junction_data.opposite_exit_keys.append(get_lane_key(junction_wp)) # Junction exit if junction_data.route_exit_index: # Can be None if route ends at a junction route_exit_wp = self._route[junction_data.route_exit_index] for wp in get_same_dir_lanes(route_exit_wp): junction_wp = self._get_closest_junction_waypoint(wp, exit_lane_wps) junction_data.route_exit_keys.append(get_lane_key(junction_wp)) for wp in get_opposite_dir_lanes(route_exit_wp): junction_wp = self._get_closest_junction_waypoint(wp, entry_lane_wps) junction_data.opposite_entry_keys.append(get_lane_key(junction_wp)) # Add the entry directions of each lane with respect to the route. Used for scenarios 7 to 9 route_entry_yaw = route_entry_wp.transform.rotation.yaw for wp in entry_lane_wps: diff = (wp.transform.rotation.yaw - route_entry_yaw) % 360 if diff > 330.0: direction = 'ref' elif diff > 225.0: direction = 'right' elif diff > 135.0: direction = 'opposite' elif diff > 30.0: direction = 'left' else: direction = 'ref' junction_data.entry_directions[direction].append(get_lane_key(wp)) # Supposing scenario vehicles go straight, these correspond to the exit lanes of the entry directions for wp in exit_lane_wps: diff = (wp.transform.rotation.yaw - route_entry_yaw) % 360 if diff > 330.0: direction = 'ref' elif diff > 225.0: direction = 'right' elif diff > 135.0: direction = 'opposite' elif diff > 30.0: direction = 'left' else: direction = 'ref' junction_data.exit_directions[direction].append(get_lane_key(wp)) if self.debug: exit_lane = self._route[junction_data.route_exit_index] if junction_data.route_exit_index else None print('> R Entry Lane: {}'.format(get_lane_key(self._route[junction_data.route_entry_index]))) print('> R Exit Lane: {}'.format(get_lane_key(exit_lane))) entry_print = '> J Entry Lanes: ' for entry_wp in entry_lane_wps: key = get_lane_key(entry_wp) entry_print += key + ' ' * (6 - len(key)) print(entry_print) exit_print = '> J Exit Lanes: ' for exit_wp in exit_lane_wps: key = get_lane_key(exit_wp) exit_print += key + ' ' * (6 - len(key)) print(exit_print) route_entry = '> R-J Entry Lanes: ' for entry_key in junction_data.route_entry_keys: route_entry += entry_key + ' ' * (6 - len(entry_key)) print(route_entry) route_exit = '> R-J Route Exit Lanes: ' for exit_key in junction_data.route_exit_keys: route_exit += exit_key + ' ' * (6 - len(exit_key)) print(route_exit) route_oppo_entry = '> R-J Oppo Entry Lanes: ' for oppo_entry_key in junction_data.opposite_entry_keys: route_oppo_entry += oppo_entry_key + ' ' * (6 - len(oppo_entry_key)) print(route_oppo_entry) route_oppo_exit = '> R-J Oppo Exit Lanes: ' for oppo_exit_key in junction_data.opposite_exit_keys: route_oppo_exit += oppo_exit_key + ' ' * (6 - len(oppo_exit_key)) print(route_oppo_exit) ################################ ## Waypoint related functions ## ################################ def _is_junction(self, waypoint): if not waypoint.is_junction or waypoint.junction_id in self._fake_junction_ids: return False return True ################################ ## Mode functions ## ################################ def _add_actor_dict_element(self, actor_dict, actor, exit_lane_key='', at_oppo_entry_lane=False): """Adds a new actor to the actor dictionary""" actor_dict[actor] = { 'state': 'junction_entry' if not exit_lane_key else 'junction_exit', 'exit_lane_key': exit_lane_key, 'at_oppo_entry_lane': at_oppo_entry_lane } def _switch_to_junction_mode(self, junction): """Prepares the junction mode, changing the state of the actors""" self._ego_state = 'junction' for actor in list(self._road_actors): self._add_actor_dict_element(junction.actor_dict, actor) self._road_actors.remove(actor) if not self._is_scenario_2_active: self._tm.vehicle_percentage_speed_difference(actor, 0) self._road_back_actors.clear() self._road_extra_front_actors = 0 self._opposite_sources.clear() def _initialise_junction_scenario(self, direction, remove_entries, remove_exits, remove_middle): """ Removes all vehicles in a particular 'direction' as well as all actors inside the junction. Additionally, activates some flags to ensure the junction is empty at all times """ if self._active_junctions: scenario_junction = self._active_junctions[0] scenario_junction.scenario_info = { 'direction': direction, 'remove_entries': remove_entries, 'remove_middle': remove_middle, 'remove_exits': remove_exits, } entry_direction_keys = scenario_junction.entry_directions[direction] actor_dict = scenario_junction.actor_dict if remove_entries: for entry_source in scenario_junction.entry_sources: if get_lane_key(entry_source.entry_lane_wp) in entry_direction_keys: # Source is affected actors = entry_source.actors for actor in list(actors): if actor_dict[actor]['state'] == 'junction_entry': # Actor is at the entry lane self._destroy_actor(actor) if remove_exits: for exit_dir in scenario_junction.exit_directions[direction]: for actor in list(scenario_junction.exit_dict[exit_dir]['actors']): self._destroy_actor(actor) if remove_middle: actor_dict = scenario_junction.actor_dict for actor in list(actor_dict): if actor_dict[actor]['state'] == 'junction_middle': self._destroy_actor(actor) def _handle_junction_scenario_end(self, junction): """Ends the junction scenario interaction. This is pretty much useless as the junction scenario ends at the same time as the active junction, but in the future it might not""" junction.scenario_info = { 'direction': None, 'remove_entries': False, 'remove_middle': False, 'remove_exits': False, } def _monitor_scenario_4_end(self, ego_location): """Monitors the ego distance to the junction to know if the scenario 4 has ended""" if self._ego_exitted_junction: ref_location = self._start_ego_wp.transform.location if ego_location.distance(ref_location) > self._crossing_dist: for actor in self._scenario_4_actors: self._tm.vehicle_percentage_speed_difference(actor, 0) self._is_scenario_4_active = False self._scenario_4_actors.clear() self._ego_exitted_junction = False self._crossing_dist = None def _handle_scenario_4_interaction(self, junction, ego_wp): """ Handles the interation between the scenario 4 of the Leaderboard and the background activity. This removes all vehicles near the bycicle path, and stops the others so that they don't interfere """ if not self._is_scenario_4_active: return self._ego_exitted_junction = True self._start_ego_wp = ego_wp min_crossing_space = 2 # Actor exitting the junction exit_dict = junction.exit_dict for exit_key in exit_dict: if exit_key not in junction.route_exit_keys: continue actors = exit_dict[exit_key]['actors'] exit_lane_wp = exit_dict[exit_key]['ref_wp'] exit_lane_location = exit_lane_wp.transform.location for actor in list(actors): actor_location = CarlaDataProvider.get_location(actor) if not actor_location: self._destroy_actor(actor) continue dist_to_scenario = exit_lane_location.distance(actor_location) - self._crossing_dist actor_length = actor.bounding_box.extent.x if abs(dist_to_scenario) < actor_length + min_crossing_space: self._destroy_actor(actor) continue if dist_to_scenario > 0: continue # Don't stop the actors that have already passed the scenario if get_lane_key(ego_wp) == get_lane_key(exit_lane_wp): self._destroy_actor(actor) continue # Actor at the ego lane and between the ego and scenario self._scenario_4_actors.append(actor) # Actor entering the junction for entry_source in junction.entry_sources: entry_lane_wp = entry_source.entry_lane_wp if get_lane_key(entry_lane_wp) in junction.opposite_entry_keys: # Source is affected actors = entry_source.actors entry_lane_location = entry_lane_wp.transform.location for actor in list(actors): actor_location = CarlaDataProvider.get_location(actor) if not actor_location: self._destroy_actor(actor) continue crossing_space = abs(entry_lane_location.distance(actor_location) - self._crossing_dist) actor_length = actor.bounding_box.extent.x if crossing_space < actor_length + min_crossing_space: self._destroy_actor(actor) continue # Actors blocking the path of the crossing obstacle self._scenario_4_actors.append(actor) # Actors entering the next junction if len(self._active_junctions) > 1: next_junction = self._active_junctions[1] actors_dict = next_junction.actor_dict for actor in list(actors_dict): if actors_dict[actor]['state'] != 'junction_entry': continue actor_location = CarlaDataProvider.get_location(actor) if not actor_location: self._destroy_actor(actor) continue dist_to_scenario = exit_lane_location.distance(actor_location) - self._crossing_dist actor_length = actor.bounding_box.extent.x if abs(dist_to_scenario) < actor_length + min_crossing_space: self._destroy_actor(actor) continue if dist_to_scenario > 0: continue # Don't stop the actors that have already passed the scenario actor_wp = self._map.get_waypoint(actor_location) if get_lane_key(ego_wp) == get_lane_key(actor_wp): self._destroy_actor(actor) continue # Actor at the ego lane and between the ego and scenario self._scenario_4_actors.append(actor) # Immediately freeze the actors for actor in self._scenario_4_actors: try: actor.set_target_velocity(carla.Vector3D(0, 0, 0)) self._tm.vehicle_percentage_speed_difference(actor, 100) except RuntimeError: pass # Just in case the actor is not alive def _end_junction_behavior(self, ego_wp, junction): """ Destroys unneeded actors (those behind the ego), moves the rest to other data structures and cleans up the variables. If no other junctions are active, starts road mode """ actor_dict = junction.actor_dict route_exit_keys = junction.route_exit_keys self._active_junctions.pop(0) for actor in list(actor_dict): location = CarlaDataProvider.get_location(actor) if not location or self._is_location_behind_ego(location): self._destroy_actor(actor) continue self._tm.vehicle_percentage_speed_difference(actor, 0) if actor_dict[actor]['at_oppo_entry_lane']: self._opposite_actors.append(actor) self._tm.ignore_lights_percentage(actor, 100) self._tm.ignore_signs_percentage(actor, 100) continue if not self._active_junctions and actor_dict[actor]['exit_lane_key'] in route_exit_keys: self._road_actors.append(actor) continue self._destroy_actor(actor) self._handle_scenario_4_interaction(junction, ego_wp) self._handle_junction_scenario_end(junction) self._switch_junction_road_sources(junction) if not self._active_junctions: self._ego_state = 'road' self._initialise_opposite_sources() self._initialise_road_checker() self._road_ego_key = self._get_ego_route_lane_key(ego_wp) for source in junction.exit_sources: self._road_back_actors[source.mapped_key] = [] def _switch_junction_road_sources(self, junction): """ Removes the sources part of the previous road and gets the ones of the exitted junction. """ self._road_sources.clear() new_sources = junction.exit_sources self._road_sources.extend(new_sources) def _search_for_next_junction(self): """Check if closeby to a junction. The closest one will always be the first""" if not self._junctions: return None ego_accum_dist = self._accum_dist[self._route_index] junction_accum_dist = self._accum_dist[self._junctions[0].route_entry_index] if junction_accum_dist - ego_accum_dist < self._junction_detection_dist: # Junctions closeby return self._junctions.pop(0) return None def _initialise_connecting_lanes(self, junction): """ Moves the actors currently at the exit lane of the last junction to entry actors of the newly created junction """ if len(self._active_junctions) > 0: prev_junction = self._active_junctions[-1] route_exit_keys = prev_junction.route_exit_keys exit_dict = prev_junction.exit_dict for exit_key in route_exit_keys: exit_actors = exit_dict[exit_key]['actors'] for actor in list(exit_actors): self._remove_actor_info(actor) self._add_actor_dict_element(junction.actor_dict, actor) self._tm.vehicle_percentage_speed_difference(actor, 0) def _monitor_nearby_junctions(self): """ Monitors when the ego approaches a junction, preparing the junction mode when it happens. This can be triggered even if there is another junction behavior happening """ junction = self._search_for_next_junction() if not junction: return if self._ego_state == 'road': self._switch_to_junction_mode(junction) self._initialise_junction_sources(junction) self._initialise_junction_exits(junction) self._initialise_connecting_lanes(junction) self._active_junctions.append(junction) def _monitor_ego_junction_exit(self, ego_wp): """ Monitors when the ego exits the junctions, preparing the road mode when that happens """ current_junction = self._active_junctions[0] exit_index = current_junction.route_exit_index if exit_index and self._route_index >= exit_index: self._end_junction_behavior(ego_wp, current_junction) def _add_incoming_actors(self, junction, source): """Checks nearby actors that will pass through the source, adding them to it""" source_location = source.wp.transform.location if not source.previous_lane_keys: source.previous_lane_keys = [get_lane_key(prev_wp) for prev_wp in source.wp.previous(self._reuse_dist)] source.previous_lane_keys.append(get_lane_key(source.wp)) for actor in self._get_actors(): if actor in source.actors: continue # Don't use actors already part of the source actor_location = CarlaDataProvider.get_location(actor) if actor_location is None: continue # No idea where the actor is, ignore it if source_location.distance(actor_location) > self._reuse_dist: continue # Don't use actors far away actor_wp = self._map.get_waypoint(actor_location) if get_lane_key(actor_wp) not in source.previous_lane_keys: continue # Don't use actors that won't pass through the source self._tm.vehicle_percentage_speed_difference(actor, 0) self._remove_actor_info(actor) source.actors.append(actor) at_oppo_entry_lane = get_lane_key(source.entry_lane_wp) in junction.opposite_entry_keys self._add_actor_dict_element(junction.actor_dict, actor, at_oppo_entry_lane=at_oppo_entry_lane) return actor def _update_road_sources(self, ego_location): """ Manages the sources that spawn actors behind the ego. Sources are destroyed after their actors are spawned """ for source in list(self._road_sources): if self.debug: draw_point(self._world, source.wp.transform.location, 'small', self._ego_state, False) draw_string(self._world, source.wp.transform.location, str(len(source.actors)), self._ego_state, False) if len(source.actors) >= self._road_back_vehicles: self._road_sources.remove(source) continue if len(source.actors) == 0: location = ego_location else: location = CarlaDataProvider.get_location(source.actors[-1]) if not location: continue distance = location.distance(source.wp.transform.location) # Spawn a new actor if the last one is far enough if distance > self._road_spawn_dist: actor = self._spawn_source_actor(source, ego_dist=self._road_vehicle_dist) if actor is None: continue self._tm.distance_to_leading_vehicle(actor, self._road_vehicle_dist) source.actors.append(actor) if self._ego_state == 'road': self._road_actors.append(actor) if source.mapped_key in self._road_back_actors: self._road_back_actors[source.mapped_key].append(actor) elif self._ego_state == 'junction': self._add_actor_dict_element(self._active_junctions[0].actor_dict, actor) ################################ ## Behavior related functions ## ################################ def _initialise_road_behavior(self, road_wps, ego_wp): """Initialises the road behavior, consisting on several vehicle in front of the ego, and several on the back. The ones on the back are spawned only outside junctions, and if not enough are spawned, sources are created that will do so later on""" spawn_wps = [] # Vehicles in front for wp in road_wps: next_wp = wp for _ in range(self._road_front_vehicles): next_wps = next_wp.next(self._road_spawn_dist) if len(next_wps) != 1 or self._is_junction(next_wps[0]): break # Stop when there's no next or found a junction next_wp = next_wps[0] spawn_wps.append(next_wp) for actor in self._spawn_actors(spawn_wps): self._tm.distance_to_leading_vehicle(actor, self._road_vehicle_dist) self._road_actors.append(actor) # Vehicles on the side for wp in road_wps: self._road_back_actors[get_lane_key(wp)] = [] if wp.lane_id == ego_wp.lane_id: continue actor = self._spawn_actors([wp])[0] self._tm.distance_to_leading_vehicle(actor, self._road_vehicle_dist) self._road_actors.append(actor) self._road_back_actors[get_lane_key(wp)].append(actor) # Vehicles behind for wp in road_wps: spawn_wps = [] prev_wp = wp for _ in range(self._road_back_vehicles): prev_wps = prev_wp.previous(self._road_spawn_dist) if len(prev_wps) != 1 or self._is_junction(prev_wps[0]): break # Stop when there's no next or found a junction prev_wp = prev_wps[0] spawn_wps.append(prev_wp) actors = self._spawn_actors(spawn_wps) for actor in actors: self._tm.distance_to_leading_vehicle(actor, self._road_vehicle_dist) self._road_actors.append(actor) self._road_back_actors[get_lane_key(wp)].append(actor) # If not spawned enough, create actor soruces behind the ego if len(actors) < self._road_back_vehicles: self._road_sources.append(Source(prev_wp, actors)) def _initialise_opposite_sources(self): """ Gets the waypoints where the actor sources that spawn actors in the opposite direction will be located. These are at a fixed distance from the ego, but never entering junctions """ self._opposite_route_index = None if not self._junctions: next_junction_index = self._route_length - 1 else: next_junction_index = self._junctions[0].route_entry_index ego_accum_dist = self._accum_dist[self._route_index] for i in range(self._route_index, next_junction_index): if self._accum_dist[i] - ego_accum_dist > self._opposite_sources_dist: self._opposite_route_index = i break if not self._opposite_route_index: # Junction is closer than the opposite source distance self._opposite_route_index = next_junction_index oppo_wp = self._route[self._opposite_route_index] for wp in get_opposite_dir_lanes(oppo_wp): self._opposite_sources.append(Source(wp, [])) def _initialise_road_checker(self): """ Gets the waypoints in front of the ego to continuously check if the road changes """ self._road_checker_index = None if not self._junctions: upper_limit = self._route_length - 1 else: upper_limit = self._junctions[0].route_entry_index ego_accum_dist = self._accum_dist[self._route_index] for i in range(self._route_index, upper_limit): if self._accum_dist[i] - ego_accum_dist > self._max_radius: self._road_checker_index = i break if not self._road_checker_index: self._road_checker_index = upper_limit def _initialise_junction_sources(self, junction): """ Initializes the actor sources to ensure the junction is always populated. They are placed at certain distance from the junction, but are stopped if another junction is found, to ensure the spawned actors always move towards the activated one """ remove_entries = junction.scenario_info['remove_entries'] direction = junction.scenario_info['direction'] entry_lanes = [] if not direction else junction.entry_directions[direction] for wp in junction.entry_wps: entry_lane_key = get_lane_key(wp) if entry_lane_key in junction.route_entry_keys: continue # Ignore the road from which the route enters if remove_entries and entry_lane_key in entry_lanes: continue # Ignore entries that are part of active junction scenarios moved_dist = 0 prev_wp = wp while moved_dist < self._junction_sources_dist: prev_wps = prev_wp.previous(5) if len(prev_wps) == 0 or self._is_junction(prev_wps[0]): break prev_wp = prev_wps[0] moved_dist += 5 junction.entry_sources.append(Source(prev_wp, [], entry_lane_wp=wp)) def _initialise_junction_exits(self, junction): """ Computes and stores the max capacity of the exit. Prepares the behavior of the next road by creating actors at the route exit, and the sources that'll create actors behind the ego """ exit_wps = junction.exit_wps route_exit_keys = junction.route_exit_keys remove_exits = junction.scenario_info['remove_exits'] direction = junction.scenario_info['direction'] exit_lanes = [] if not direction else junction.exit_directions[direction] for wp in exit_wps: max_actors = 0 max_distance = 0 exiting_wps = [] next_wp = wp for i in range(max(self._road_front_vehicles, 1)): # Get the moving distance (first jump is higher to allow space for another vehicle) if i == 0: move_dist = 2 * self._junction_spawn_dist else: move_dist = self._junction_spawn_dist # And move such distance next_wps = next_wp.next(move_dist) if len(next_wps) == 0: break # Stop when there's no next next_wp = next_wps[0] if max_actors > 0 and self._is_junction(next_wp): break # Stop when a junction is found max_actors += 1 max_distance += move_dist exiting_wps.insert(0, next_wp) junction.exit_dict[get_lane_key(wp)] = { 'actors': [], 'max_actors': max_actors, 'ref_wp': wp, 'max_distance': max_distance } exit_lane_key = get_lane_key(wp) if remove_exits and exit_lane_key in exit_lanes: continue # The direction is prohibited as a junction scenario is active if exit_lane_key in route_exit_keys: junction.exit_sources.append(Source(wp, [])) actors = self._spawn_actors(exiting_wps) for actor in actors: self._tm.distance_to_leading_vehicle(actor, self._junction_vehicle_dist) self._add_actor_dict_element(junction.actor_dict, actor, exit_lane_key=exit_lane_key) junction.exit_dict[exit_lane_key]['actors'] = actors def _update_junction_sources(self): """Checks the actor sources to see if new actors have to be created""" for junction in self._active_junctions: remove_entries = junction.scenario_info['remove_entries'] direction = junction.scenario_info['direction'] entry_lanes = [] if not direction else junction.entry_directions[direction] actor_dict = junction.actor_dict for source in junction.entry_sources: if self.debug: draw_point(self._world, source.wp.transform.location, 'small', 'junction', False) draw_string(self._world, source.wp.transform.location, str(len(source.actors)), 'junction', False) entry_lane_key = get_lane_key(source.entry_lane_wp) at_oppo_entry_lane = entry_lane_key in junction.opposite_entry_keys # The direction is prohibited as a junction scenario is active if remove_entries and entry_lane_key in entry_lanes: continue self._add_incoming_actors(junction, source) # Cap the amount of alive actors if len(source.actors) >= self._junction_sources_max_actors: continue # Calculate distance to the last created actor if len(source.actors) == 0: distance = self._junction_spawn_dist + 1 else: actor_location = CarlaDataProvider.get_location(source.actors[-1]) if not actor_location: continue distance = actor_location.distance(source.wp.transform.location) # Spawn a new actor if the last one is far enough if distance > self._junction_spawn_dist: actor = self._spawn_source_actor(source) if not actor: continue self._tm.distance_to_leading_vehicle(actor, self._junction_vehicle_dist) self._add_actor_dict_element(actor_dict, actor, at_oppo_entry_lane=at_oppo_entry_lane) source.actors.append(actor) def _found_a_road_change(self, old_index, new_index, ignore_false_junctions=True): """Checks if the new route waypoint is part of a new road (excluding fake junctions)""" if new_index == old_index: return False new_wp = self._route[new_index] old_wp = self._route[old_index] if get_road_key(new_wp) == get_road_key(old_wp): return False if ignore_false_junctions: new_wp_junction = new_wp.get_junction() if new_wp_junction and new_wp_junction.id in self._fake_junction_ids: return False return True def _move_road_checker(self, prev_index, current_index): """ Continually check the road in front to see if it has changed its topology. If so and the number of lanes have reduced, remove the actor of the lane that merges into others """ if self.debug: checker_wp = self._route[self._road_checker_index] draw_point(self._world, checker_wp.transform.location, 'small', 'road', False) if prev_index == current_index: return # Get the new route tracking wp checker_index = None last_index = self._junctions[0].route_entry_index if self._junctions else self._route_length - 1 current_accum_dist = self._accum_dist[current_index] for i in range(self._road_checker_index, last_index): accum_dist = self._accum_dist[i] if accum_dist - current_accum_dist >= self._max_radius: checker_index = i break if not checker_index: checker_index = last_index if self._found_a_road_change(self._road_checker_index, checker_index): new_wps = get_same_dir_lanes(self._route[checker_index]) old_wps = get_same_dir_lanes(self._route[self._road_checker_index]) if len(new_wps) >= len(old_wps): pass else: new_accum_dist = self._accum_dist[checker_index] prev_accum_dist = self._accum_dist[self._road_checker_index] route_move_dist = new_accum_dist - prev_accum_dist unmapped_lane_keys = [] for old_wp in list(old_wps): location = old_wp.transform.location mapped_wp = None for new_wp in new_wps: if location.distance(new_wp.transform.location) < 1.1 * route_move_dist: mapped_wp = new_wp break if not mapped_wp: unmapped_lane_keys.append(get_lane_key(old_wp)) for actor in list(self._road_actors): location = CarlaDataProvider.get_location(actor) if not location: continue wp = self._map.get_waypoint(location) if get_lane_key(wp) in unmapped_lane_keys: self._destroy_actor(actor) self._road_checker_index = checker_index def _move_opposite_sources(self, prev_index, current_index): """ Moves the sources of the opposite direction back. Additionally, tracks a point a certain distance in front of the ego to see if the road topology has to be recalculated """ if self.debug: for source in self._opposite_sources: draw_point(self._world, source.wp.transform.location, 'small', 'opposite', False) draw_string(self._world, source.wp.transform.location, str(len(source.actors)), 'opposite', False) route_wp = self._route[self._opposite_route_index] draw_point(self._world, route_wp.transform.location, 'small', 'opposite', False) if prev_index == current_index: return # Get the new route tracking wp oppo_route_index = None last_index = self._junctions[0].route_entry_index if self._junctions else self._route_length - 1 current_accum_dist = self._accum_dist[current_index] for i in range(self._opposite_route_index, last_index): accum_dist = self._accum_dist[i] if accum_dist - current_accum_dist >= self._opposite_sources_dist: oppo_route_index = i break if not oppo_route_index: oppo_route_index = last_index if self._found_a_road_change(self._opposite_route_index, oppo_route_index): # Recheck the left lanes as the topology might have changed new_opposite_sources = [] new_opposite_wps = get_opposite_dir_lanes(self._route[oppo_route_index]) # Map the old sources to the new wps, and add new ones / remove uneeded ones new_accum_dist = self._accum_dist[oppo_route_index] prev_accum_dist = self._accum_dist[self._opposite_route_index] route_move_dist = new_accum_dist - prev_accum_dist for wp in new_opposite_wps: location = wp.transform.location new_source = None for source in self._opposite_sources: if location.distance(source.wp.transform.location) < 1.1 * route_move_dist: new_source = source break if new_source: new_source.wp = wp new_opposite_sources.append(source) self._opposite_sources.remove(source) else: new_opposite_sources.append(Source(wp, [])) self._opposite_sources = new_opposite_sources else: prev_accum_dist = self._accum_dist[prev_index] current_accum_dist = self._accum_dist[current_index] move_dist = current_accum_dist - prev_accum_dist if move_dist <= 0: return for source in self._opposite_sources: wp = source.wp if not self._is_junction(wp): prev_wps = wp.previous(move_dist) if len(prev_wps) == 0: continue prev_wp = prev_wps[0] source.wp = prev_wp self._opposite_route_index = oppo_route_index def _update_opposite_sources(self): """Checks the opposite actor sources to see if new actors have to be created""" for source in self._opposite_sources: # Cap the amount of alive actors if len(source.actors) >= self._opposite_sources_max_actors: continue # Calculate distance to the last created actor if len(source.actors) == 0: distance = self._opposite_spawn_dist + 1 else: actor_location = CarlaDataProvider.get_location(source.actors[-1]) if not actor_location: continue distance = source.wp.transform.location.distance(actor_location) # Spawn a new actor if the last one is far enough if distance > self._opposite_spawn_dist: actor = self._spawn_source_actor(source) if actor is None: continue self._tm.distance_to_leading_vehicle(actor, self._opposite_vehicle_dist) self._opposite_actors.append(actor) source.actors.append(actor) def _update_parameters(self): """Changes the parameters depending on the blackboard variables and / or the speed of the ego""" road_behavior_data = py_trees.blackboard.Blackboard().get("BA_RoadBehavior") if road_behavior_data: num_front_vehicles, num_back_vehicles, vehicle_dist, spawn_dist = road_behavior_data if num_front_vehicles: self._road_front_vehicles = num_front_vehicles if num_back_vehicles: self._road_back_vehicles = num_back_vehicles if vehicle_dist: self._road_vehicle_dist = vehicle_dist if spawn_dist: self._road_spawn_dist = spawn_dist self._get_road_radius() py_trees.blackboard.Blackboard().set("BA_RoadBehavior", None, True) opposite_behavior_data = py_trees.blackboard.Blackboard().get("BA_OppositeBehavior") if opposite_behavior_data: source_dist, vehicle_dist, spawn_dist, max_actors = road_behavior_data if source_dist: if source_dist < self._junction_sources_dist: print("WARNING: Opposite sources distance is lower than the junction ones. Ignoring it") else: self._opposite_sources_dist = source_dist if vehicle_dist: self._opposite_vehicle_dist = vehicle_dist if spawn_dist: self._opposite_spawn_dist = spawn_dist if max_actors: self._opposite_sources_max_actors = max_actors py_trees.blackboard.Blackboard().set("BA_OppositeBehavior", None, True) junction_behavior_data = py_trees.blackboard.Blackboard().get("BA_JunctionBehavior") if junction_behavior_data: source_dist, vehicle_dist, spawn_dist, max_actors = road_behavior_data if source_dist: if source_dist > self._opposite_sources_dist: print("WARNING: Junction sources distance is higher than the opposite ones. Ignoring it") else: self._junction_sources_dist = source_dist if vehicle_dist: self._junction_vehicle_dist = vehicle_dist if spawn_dist: self._junction_spawn_dist = spawn_dist if max_actors: self._junction_sources_max_actors = max_actors py_trees.blackboard.Blackboard().set("BA_JunctionBehavior", None, True) break_duration = py_trees.blackboard.Blackboard().get("BA_Scenario2") if break_duration: if self._is_scenario_2_active: print("WARNING: A break scenario was requested but another one is already being triggered.") else: self._activate_break_scenario = True self._break_duration = break_duration py_trees.blackboard.Blackboard().set("BA_Scenario2", None, True) crossing_dist = py_trees.blackboard.Blackboard().get("BA_Scenario4") if crossing_dist: self._is_scenario_4_active = True self._crossing_dist = crossing_dist py_trees.blackboard.Blackboard().set("BA_Scenario4", None, True) direction = py_trees.blackboard.Blackboard().get("BA_Scenario7") if direction: self._initialise_junction_scenario(direction, True, True, True) py_trees.blackboard.Blackboard().set("BA_Scenario7", None, True) direction = py_trees.blackboard.Blackboard().get("BA_Scenario8") if direction: self._initialise_junction_scenario(direction, True, True, True) py_trees.blackboard.Blackboard().set("BA_Scenario8", None, True) direction = py_trees.blackboard.Blackboard().get("BA_Scenario9") if direction: self._initialise_junction_scenario(direction, True, False, True) py_trees.blackboard.Blackboard().set("BA_Scenario9", None, True) direction = py_trees.blackboard.Blackboard().get("BA_Scenario10") if direction: self._initialise_junction_scenario(direction, False, False, False) py_trees.blackboard.Blackboard().set("BA_Scenario10", None, True) self._compute_parameters() def _compute_parameters(self): """Computes the parameters that are dependent on the speed of the ego. """ ego_speed = CarlaDataProvider.get_velocity(self._ego_actor) # As the vehicles don't move if the agent doesn't, some agents might get blocked forever. # Partially avoid this by adding an extra distance to the radius when the vehicle is stopped # in the middle of the road and unaffected by any object such as traffic lights or stops. if ego_speed == 0 \ and not self._is_scenario_2_active \ and not self._ego_actor.is_at_traffic_light() \ and len(self._active_junctions) <= 0: self._extra_radius = min(self._extra_radius + self._extra_radius_increase_ratio, self._max_extra_radius) # At all cases, reduce it if the agent is moving if ego_speed > 0 and self._extra_radius > 0: self._extra_radius = max(self._extra_radius - self._extra_radius_increase_ratio, 0) self._min_radius = self._base_min_radius + self._radius_increase_ratio * ego_speed + self._extra_radius self._max_radius = self._base_max_radius + self._radius_increase_ratio * ego_speed + self._extra_radius self._junction_detection_dist = self._max_radius def _manage_break_scenario(self): """ Manages the break scenario, where all road vehicles in front of the ego suddenly stop, wait for a bit, and start moving again. This will never trigger unless done so from outside. """ if self._is_scenario_2_active: self._next_scenario_time -= self._world.get_snapshot().timestamp.delta_seconds if self._next_scenario_time <= 0: for actor in self._scenario_2_actors: self._tm.vehicle_percentage_speed_difference(actor, 0) lights = actor.get_light_state() lights &= ~carla.VehicleLightState.Brake actor.set_light_state(carla.VehicleLightState(lights)) self._scenario_2_actors = [] self._is_scenario_2_active = False elif self._activate_break_scenario: for actor in self._road_actors: location = CarlaDataProvider.get_location(actor) if location and not self._is_location_behind_ego(location): self._scenario_2_actors.append(actor) self._tm.vehicle_percentage_speed_difference(actor, 100) lights = actor.get_light_state() lights |= carla.VehicleLightState.Brake actor.set_light_state(carla.VehicleLightState(lights)) self._is_scenario_2_active = True self._activate_break_scenario = False self._next_scenario_time = self._break_duration ############################# ## Actor functions ## ############################# def _spawn_actors(self, spawn_wps): """Spawns several actors in batch""" spawn_transforms = [] for wp in spawn_wps: spawn_transforms.append( carla.Transform(wp.transform.location + carla.Location(z=self._spawn_vertical_shift), wp.transform.rotation) ) actors = CarlaDataProvider.request_new_batch_actors( 'vehicle.*', len(spawn_transforms), spawn_transforms, True, False, 'background', safe_blueprint=True, tick=False) if not actors: return actors for actor in actors: self._tm.auto_lane_change(actor, False) if self._night_mode: for actor in actors: actor.set_light_state(carla.VehicleLightState( carla.VehicleLightState.Position | carla.VehicleLightState.LowBeam)) return actors def _spawn_source_actor(self, source, ego_dist=0): """Given a source, spawns an actor at that source""" ego_location = CarlaDataProvider.get_location(self._ego_actor) source_transform = source.wp.transform if ego_location.distance(source_transform.location) < ego_dist: return None new_transform = carla.Transform( source_transform.location + carla.Location(z=self._spawn_vertical_shift), source_transform.rotation ) actor = CarlaDataProvider.request_new_actor( 'vehicle.*', new_transform, rolename='background', autopilot=True, random_location=False, safe_blueprint=True, tick=False) if not actor: return actor self._tm.auto_lane_change(actor, False) if self._night_mode: actor.set_light_state(carla.VehicleLightState( carla.VehicleLightState.Position | carla.VehicleLightState.LowBeam)) return actor def _is_location_behind_ego(self, location): """Checks if an actor is behind the ego. Uses the route transform""" ego_transform = self._route[self._route_index].transform ego_heading = ego_transform.get_forward_vector() ego_actor_vec = location - ego_transform.location if ego_heading.x * ego_actor_vec.x + ego_heading.y * ego_actor_vec.y < - 0.17: # 100º return True return False def _get_ego_route_lane_key(self, route_wp): """ Gets the route lane key of the ego. This corresponds to the same lane if the ego is driving normally, but if is is going in opposite direction, the route's leftmost one is chosen instead """ location = CarlaDataProvider.get_location(self._ego_actor) ego_true_wp = self._map.get_waypoint(location) if get_road_key(ego_true_wp) != get_road_key(route_wp): # Just return the default value as two different roads are being compared. # This might happen for when moving to a new road and should be fixed next frame return get_lane_key(route_wp) yaw_diff = (ego_true_wp.transform.rotation.yaw - route_wp.transform.rotation.yaw) % 360 if yaw_diff < 90 or yaw_diff > 270: return get_lane_key(ego_true_wp) else: # Get the first lane of the opposite direction leftmost_wp = route_wp while True: possible_left_wp = leftmost_wp.get_left_lane() if possible_left_wp is None or possible_left_wp.lane_id * leftmost_wp.lane_id < 0: break leftmost_wp = possible_left_wp return get_lane_key(leftmost_wp) def _update_road_actors(self, prev_ego_index, current_ego_index): """ Dynamically controls the actor speed in front of the ego. Not applied to those behind it so that they can catch up it """ route_wp = self._route[current_ego_index] scenario_actors = self._scenario_4_actors + self._scenario_2_actors for actor in self._road_actors: location = CarlaDataProvider.get_location(actor) if not location: continue if self.debug: back_actor = False for lane in self._road_back_actors: if actor in self._road_back_actors[lane]: back_actor = True if back_actor: draw_string(self._world, location, 'R(B)', 'road', False) else: draw_string(self._world, location, 'R(F)', 'road', False) if actor in scenario_actors: continue if self._is_location_behind_ego(location): continue distance = location.distance(route_wp.transform.location) speed_red = (distance - self._min_radius) / (self._max_radius - self._min_radius) * 100 speed_red = np.clip(speed_red, 0, 100) self._tm.vehicle_percentage_speed_difference(actor, speed_red) # Check how the vehicles behind are self._check_back_vehicles(prev_ego_index, current_ego_index) def _check_back_vehicles(self, prev_route_index, current_route_index): """ Checks if any of the vehicles that should be behind the ego are in front, updating the road radius. This is done by monitoring the closest lane key to the ego that is part of the route, and needs some remaping when the ego enters a new road """ route_wp = self._route[current_route_index] prev_route_wp = self._route[prev_route_index] check_dist = 1.1 * route_wp.transform.location.distance(prev_route_wp.transform.location) if prev_route_index != current_route_index: road_change = self._found_a_road_change(prev_route_index, current_route_index, ignore_false_junctions=False) if not self._is_junction(prev_route_wp) and road_change: # Get all the wps of the new road if not route_wp.is_junction: new_wps = get_same_dir_lanes(route_wp) else: # Entering a false junction new_wps = [] for enter_wp, _ in route_wp.get_junction().get_waypoints(carla.LaneType.Driving): if get_road_key(enter_wp) == get_road_key(route_wp): new_wps.append(enter_wp) # Get all the wps of the old road if not prev_route_wp.is_junction: old_wps = get_same_dir_lanes(prev_route_wp) else: # Exitting a false junction old_wps = [] for _, exit_wp in prev_route_wp.get_junction().get_waypoints(carla.LaneType.Driving): if get_road_key(exit_wp) == get_road_key(prev_route_wp): old_wps.append(exit_wp) # Map the new lanes to the old ones mapped_keys = {} unmapped_wps = new_wps for old_wp in list(old_wps): location = old_wp.transform.location mapped_wp = None for new_wp in unmapped_wps: if location.distance(new_wp.transform.location) < check_dist: mapped_wp = new_wp break if mapped_wp: unmapped_wps.remove(mapped_wp) mapped_keys[get_lane_key(old_wp)] = get_lane_key(mapped_wp) # Remake the road back actors dictionary new_road_back_actors = {} for lane_key in self._road_back_actors: if lane_key not in mapped_keys: continue # A lane ended at that road new_lane_key = mapped_keys[lane_key] new_road_back_actors[new_lane_key] = self._road_back_actors[lane_key] # For the active sources, change the mapped key to the new road keys for source in self._road_sources: if source.mapped_key in mapped_keys: source.mapped_key = mapped_keys[source.mapped_key] self._road_back_actors = new_road_back_actors # New lanes, add new sources for unmapped_wp in unmapped_wps: source_wps = unmapped_wp.next(self._road_new_sources_dist) if len(source_wps) != 1: continue new_source = Source(source_wps[0], []) self._road_sources.append(new_source) self._road_back_actors[new_source.mapped_key] = [] if not self._road_ego_key in mapped_keys: # Return the default. This might happen when the route lane ends and should be fixed next frame self._road_ego_key = get_lane_key(route_wp) else: self._road_ego_key = mapped_keys[self._road_ego_key] else: self._road_ego_key = self._get_ego_route_lane_key(route_wp) # Get the amount of vehicles in front of the ego if not self._road_ego_key in self._road_back_actors: return self._road_extra_front_actors = 0 for actor in self._road_back_actors[self._road_ego_key]: if not self._is_location_behind_ego(actor.get_location()): self._road_extra_front_actors += 1 self._get_road_radius() self._compute_parameters() def _update_junction_actors(self): """ Handles an actor depending on their previous state. Actors entering the junction have its exit monitored through their waypoint. When they exit, they are either moved to a connecting junction, or added to the exit dictionary. Actors that exited the junction will stop after a certain distance """ if len(self._active_junctions) == 0: return max_index = len(self._active_junctions) - 1 for i, junction in enumerate(self._active_junctions): if self.debug: route_keys = junction.route_entry_keys + junction.route_exit_keys route_oppo_keys = junction.opposite_entry_keys + junction.opposite_exit_keys for wp in junction.entry_wps + junction.exit_wps: if get_lane_key(wp) in route_keys: draw_point(self._world, wp.transform.location, 'medium', 'road', False) elif get_lane_key(wp) in route_oppo_keys: draw_point(self._world, wp.transform.location, 'medium', 'opposite', False) else: draw_point(self._world, wp.transform.location, 'medium', 'junction', False) actor_dict = junction.actor_dict exit_dict = junction.exit_dict remove_middle = junction.scenario_info['remove_middle'] for actor in list(actor_dict): if actor not in actor_dict: continue # Actor was removed during the loop location = CarlaDataProvider.get_location(actor) if not location: continue state, exit_lane_key, _ = actor_dict[actor].values() if self.debug: string = 'J' + str(i+1) + "_" + state[9:11] draw_string(self._world, location, string, self._ego_state, False) # Monitor its entry if state == 'junction_entry': actor_wp = self._map.get_waypoint(location) if self._is_junction(actor_wp) and junction.contains(actor_wp.get_junction()): if remove_middle: self._destroy_actor(actor) # Don't clutter the junction if a junction scenario is active continue actor_dict[actor]['state'] = 'junction_middle' # Monitor its exit and destroy an actor if needed elif state == 'junction_middle': actor_wp = self._map.get_waypoint(location) actor_lane_key = get_lane_key(actor_wp) if not self._is_junction(actor_wp) and actor_lane_key in exit_dict: if i < max_index and actor_lane_key in junction.route_exit_keys: # Exited through a connecting lane in the route direction. self._remove_actor_info(actor) other_junction = self._active_junctions[i+1] self._add_actor_dict_element(other_junction.actor_dict, actor) elif i > 0 and actor_lane_key in junction.opposite_exit_keys: # Exited through a connecting lane in the opposite direction. # THIS SHOULD NEVER HAPPEN, an entry source should have already added it. other_junction = self._active_junctions[i-1] if actor not in other_junction.actor_dict: self._remove_actor_info(actor) self._add_actor_dict_element(other_junction.actor_dict, actor, at_oppo_entry_lane=True) else: # Check the lane capacity exit_dict[actor_lane_key]['ref_wp'] = actor_wp actor_dict[actor]['state'] = 'junction_exit' actor_dict[actor]['exit_lane_key'] = actor_lane_key actors = exit_dict[actor_lane_key]['actors'] if len(actors) > 0 and len(actors) >= exit_dict[actor_lane_key]['max_actors']: self._destroy_actor(actors[0]) # This is always the front most vehicle actors.append(actor) # Deactivate them when far from the junction elif state == 'junction_exit': distance = location.distance(exit_dict[exit_lane_key]['ref_wp'].transform.location) if distance > exit_dict[exit_lane_key]['max_distance']: self._tm.vehicle_percentage_speed_difference(actor, 100) actor_dict[actor]['state'] = 'junction_inactive' # Wait for something to happen elif state == 'junction_inactive': pass def _update_opposite_actors(self, ref_transform): """ Updates the opposite actors. This involves tracking their position, removing if too far behind the ego """ max_dist = max(self._opposite_removal_dist, self._opposite_spawn_dist) for actor in list(self._opposite_actors): location = CarlaDataProvider.get_location(actor) if not location: continue if self.debug: draw_string(self._world, location, 'O', 'opposite', False) distance = location.distance(ref_transform.location) if distance > max_dist and self._is_location_behind_ego(location): self._destroy_actor(actor) def _remove_actor_info(self, actor): """Removes all the references of the actor""" if actor in self._road_actors: self._road_actors.remove(actor) if actor in self._opposite_actors: self._opposite_actors.remove(actor) if actor in self._scenario_2_actors: self._scenario_2_actors.remove(actor) if actor in self._scenario_4_actors: self._scenario_4_actors.remove(actor) for road_source in self._road_sources: if actor in road_source.actors: road_source.actors.remove(actor) break for lane in self._road_back_actors: if actor in self._road_back_actors[lane]: self._road_back_actors[lane].remove(actor) break for opposite_source in self._opposite_sources: if actor in opposite_source.actors: opposite_source.actors.remove(actor) break for junction in self._active_junctions: junction.actor_dict.pop(actor, None) for exit_source in junction.exit_sources: if actor in exit_source.actors: exit_source.actors.remove(actor) break for entry_source in junction.entry_sources: if actor in entry_source.actors: entry_source.actors.remove(actor) break for exit_keys in junction.exit_dict: exit_actors = junction.exit_dict[exit_keys]['actors'] if actor in exit_actors: exit_actors.remove(actor) break def _destroy_actor(self, actor): """Destroy the actor and all its references""" self._remove_actor_info(actor) try: actor.destroy() except RuntimeError: pass def _update_ego_route_location(self, location): """Returns the closest route location to the ego""" for index in range(self._route_index, min(self._route_index + self._route_buffer, self._route_length)): route_wp = self._route[index] route_wp_dir = route_wp.transform.get_forward_vector() # Waypoint's forward vector veh_wp_dir = location - route_wp.transform.location # vector waypoint - vehicle dot_ve_wp = veh_wp_dir.x * route_wp_dir.x + veh_wp_dir.y * route_wp_dir.y + veh_wp_dir.z * route_wp_dir.z if dot_ve_wp > 0: self._route_index = index return self._route[self._route_index]
2.734375
3
Python/Interfacing_C_C++_Fortran/F2py/comp_pi_f2py.py
Gjacquenot/training-material
115
12482
#!/usr/bin/env python from argparse import ArgumentParser import sys from comp_pi import compute_pi def main(): arg_parser = ArgumentParser(description='compute pi using Fortran ' 'function') arg_parser.add_argument('n', default=1000, nargs='?', help='number of random points') options = arg_parser.parse_args() print(compute_pi(options.n)) return 0 if __name__ == '__main__': status = main() sys.exit(status)
3.078125
3
test/lazy/test_cat_lazy_tensor.py
Mehdishishehbor/gpytorch
0
12483
#!/usr/bin/env python3 import unittest import torch from Lgpytorch.lazy import CatLazyTensor, NonLazyTensor from Lgpytorch.test.lazy_tensor_test_case import LazyTensorTestCase class TestCatLazyTensor(LazyTensorTestCase, unittest.TestCase): seed = 1 def create_lazy_tensor(self): root = torch.randn(6, 7) self.psd_mat = root.matmul(root.t()) slice1_mat = self.psd_mat[:2, :].requires_grad_() slice2_mat = self.psd_mat[2:4, :].requires_grad_() slice3_mat = self.psd_mat[4:6, :].requires_grad_() slice1 = NonLazyTensor(slice1_mat) slice2 = NonLazyTensor(slice2_mat) slice3 = NonLazyTensor(slice3_mat) return CatLazyTensor(slice1, slice2, slice3, dim=-2) def evaluate_lazy_tensor(self, lazy_tensor): return self.psd_mat.detach().clone().requires_grad_() class TestCatLazyTensorColumn(LazyTensorTestCase, unittest.TestCase): seed = 1 def create_lazy_tensor(self): root = torch.randn(6, 7) self.psd_mat = root.matmul(root.t()) slice1_mat = self.psd_mat[:, :2].requires_grad_() slice2_mat = self.psd_mat[:, 2:4].requires_grad_() slice3_mat = self.psd_mat[:, 4:6].requires_grad_() slice1 = NonLazyTensor(slice1_mat) slice2 = NonLazyTensor(slice2_mat) slice3 = NonLazyTensor(slice3_mat) return CatLazyTensor(slice1, slice2, slice3, dim=-1) def evaluate_lazy_tensor(self, lazy_tensor): return self.psd_mat.detach().clone().requires_grad_() class TestCatLazyTensorBatch(LazyTensorTestCase, unittest.TestCase): seed = 0 def create_lazy_tensor(self): root = torch.randn(3, 6, 7) self.psd_mat = root.matmul(root.transpose(-2, -1)) slice1_mat = self.psd_mat[..., :2, :].requires_grad_() slice2_mat = self.psd_mat[..., 2:4, :].requires_grad_() slice3_mat = self.psd_mat[..., 4:6, :].requires_grad_() slice1 = NonLazyTensor(slice1_mat) slice2 = NonLazyTensor(slice2_mat) slice3 = NonLazyTensor(slice3_mat) return CatLazyTensor(slice1, slice2, slice3, dim=-2) def evaluate_lazy_tensor(self, lazy_tensor): return self.psd_mat.detach().clone().requires_grad_() class TestCatLazyTensorMultiBatch(LazyTensorTestCase, unittest.TestCase): seed = 0 # Because these LTs are large, we'll skil the big tests skip_slq_tests = True def create_lazy_tensor(self): root = torch.randn(4, 3, 6, 7) self.psd_mat = root.matmul(root.transpose(-2, -1)) slice1_mat = self.psd_mat[..., :2, :].requires_grad_() slice2_mat = self.psd_mat[..., 2:4, :].requires_grad_() slice3_mat = self.psd_mat[..., 4:6, :].requires_grad_() slice1 = NonLazyTensor(slice1_mat) slice2 = NonLazyTensor(slice2_mat) slice3 = NonLazyTensor(slice3_mat) return CatLazyTensor(slice1, slice2, slice3, dim=-2) def evaluate_lazy_tensor(self, lazy_tensor): return self.psd_mat.detach().clone().requires_grad_() class TestCatLazyTensorBatchCat(LazyTensorTestCase, unittest.TestCase): seed = 0 # Because these LTs are large, we'll skil the big tests skip_slq_tests = True def create_lazy_tensor(self): root = torch.randn(5, 3, 6, 7) self.psd_mat = root.matmul(root.transpose(-2, -1)) slice1_mat = self.psd_mat[:2, ...].requires_grad_() slice2_mat = self.psd_mat[2:3, ...].requires_grad_() slice3_mat = self.psd_mat[3:, ...].requires_grad_() slice1 = NonLazyTensor(slice1_mat) slice2 = NonLazyTensor(slice2_mat) slice3 = NonLazyTensor(slice3_mat) return CatLazyTensor(slice1, slice2, slice3, dim=0) def evaluate_lazy_tensor(self, lazy_tensor): return self.psd_mat.detach().clone().requires_grad_() if __name__ == "__main__": unittest.main()
2.328125
2
dbSchema.py
zikasak/ReadOnlyBot
1
12484
<filename>dbSchema.py import datetime from sqlalchemy import Column, Integer, Boolean, ForeignKey, String, DateTime, UniqueConstraint, ForeignKeyConstraint from sqlalchemy.orm import relationship from dbConfig import Base, engine class GroupStatus(Base): __tablename__ = "groupstatus" id = Column(Integer, primary_key=True) status = Column(Boolean, default=False) wel_message = Column(String) new_users_blocked = Column(Boolean, default=False) time_to_mute = Column(Integer, default=30) messages = relationship("GroupMessage", cascade="save-update, merge, delete, delete-orphan") banned_users = relationship("BannedUser", cascade="save-update, merge, delete, delete-orphan") mutted_users = relationship("MutedUser", backref="chat", cascade="save-update, merge, delete, delete-orphan") blocked_phrases = relationship("BlockedPhrases", backref="chat", cascade="save-update, merge, delete, delete-orphan") def add_muted(self, user_id, message_id): m = MutedUser() m.chat_id = self.id m.user_id = user_id m.welcome_msg_id = message_id m.mute_date = datetime.datetime.utcnow() if m not in self.mutted_users: self.mutted_users.append(m) class GroupMessage(Base): __tablename__ = "groupmessage" chat_id = Column(Integer, ForeignKey("groupstatus.id"), primary_key=True) message = Column(String) command = Column(String, primary_key=True) description = Column(String, default="") UniqueConstraint('chat_id', 'command') def __repr__(self): return "{!r} - {!r}".format(self.command, self.description) class MutedUser(Base): __tablename__ = "muted" chat_id = Column(Integer, ForeignKey("groupstatus.id"), primary_key=True) user_id = Column(Integer, primary_key=True) mute_date = Column(DateTime(timezone=True), nullable=False, default=datetime.datetime.utcnow()) welcome_msg_id = Column(Integer, nullable=False) time_messages = relationship("TimeExceededMessage", cascade="save-update, merge, delete, delete-orphan", primaryjoin="and_(MutedUser.chat_id==TimeExceededMessage.chat_id, " "MutedUser.user_id==TimeExceededMessage.user_id)") def __eq__(self, obj: object) -> bool: if type(obj) != MutedUser: return super().__eq__(obj) return (self.chat_id == obj.chat_id) and (self.user_id == obj.user_id) class TimeExceededMessage(Base): __tablename__ = "mutedMessages" id = Column(Integer, primary_key=True) chat_id = Column(Integer) user_id = Column(Integer) welcome_msg_id = Column(Integer, ForeignKey("muted.welcome_msg_id")) msg_id = Column(Integer, nullable=False) __table_args__ = (ForeignKeyConstraint([chat_id, user_id], [MutedUser.chat_id, MutedUser.user_id]), {}) class BannedUser(Base): __tablename__ = "bannedusers" chat_id = Column(Integer, ForeignKey("groupstatus.id"), primary_key=True) user_id = Column(Integer, primary_key=True) username = Column(String) reason = Column(String) class BlockedPhrases(Base): __tablename__ = "blockedPhrases" id = Column(Integer, primary_key=True) chat_id = Column(Integer, ForeignKey("groupstatus.id")) blockedPhrase = Column(String, nullable=False) Base.metadata.create_all(engine)
2.59375
3
dqn/dqn_noisy_networks/model.py
AgentMaker/Paddle-RLBooks
127
12485
<filename>dqn/dqn_noisy_networks/model.py import paddle import paddle.nn as nn import paddle.nn.functional as F from paddle.nn.initializer import Assign import math class NoisyLinear(nn.Linear): def __init__(self, in_features, out_features, sigma_zero=0.4, bias=True): super(NoisyLinear, self).__init__(in_features, out_features) sigma_init = sigma_zero / math.sqrt(in_features) sigma_weight = self.create_parameter( shape=[in_features, out_features], default_initializer=Assign( paddle.full((in_features, out_features), sigma_init) ) ) self.add_parameter("sigma_weight", sigma_weight) self.register_buffer("epsilon_input", paddle.zeros((1, in_features))) self.register_buffer("epsilon_output", paddle.zeros((out_features, 1))) if bias: sigma_bias = self.create_parameter( shape=[out_features], default_initializer=Assign( paddle.full([out_features], sigma_init) ) ) self.add_parameter("sigma_bias", sigma_bias) def _scale_noise(self, shape): x = paddle.randn(shape) return x.sign().multiply(x.abs().sqrt()) def forward(self, inputs): with paddle.no_grad(): eps_in = self._scale_noise(self.epsilon_input.shape) eps_out = self._scale_noise(self.epsilon_output.shape) noise_v = paddle.multiply(eps_in, eps_out).detach() return F.linear(inputs, self.weight + self.sigma_weight * noise_v.t(), self.bias + self.sigma_bias * eps_out.squeeze().t()) class Model(nn.Layer): def __init__(self, num_inputs, num_actions): super(Model, self).__init__() self.conv1 = nn.Conv2D(num_inputs, 32, 3, stride=3) self.conv2 = nn.Conv2D(32, 32, 3, stride=3) self.conv3 = nn.Conv2D(32, 64, 3, stride=1) self.flatten = nn.Flatten() self.linear = NoisyLinear(64 * 3 * 2, 256) self.fc = NoisyLinear(256, num_actions) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = self.flatten(x) x = self.linear(x) return self.fc(x)
2.46875
2
players/__init__.py
lejbron/arkenstone
0
12486
<filename>players/__init__.py<gh_stars>0 default_app_config = 'players.apps.PlayersConfig'
1.242188
1
forte/utils/utils_io.py
swapnull7/forte
2
12487
<filename>forte/utils/utils_io.py # Copyright 2019 The Forte Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility functions related to input/output. """ import os __all__ = [ "maybe_create_dir", "ensure_dir", "get_resource" ] import sys def maybe_create_dir(dirname: str) -> bool: r"""Creates directory if it does not exist. Args: dirname (str): Path to the directory. Returns: bool: Whether a new directory is created. """ if not os.path.isdir(dirname): os.makedirs(dirname) return True return False def ensure_dir(filename: str): """ Args: filename: Returns: """ d = os.path.dirname(filename) if d: maybe_create_dir(d) def get_resource(path_name, is_file=True): for dirname in sys.path: candidate = os.path.join(dirname, path_name) if is_file: if os.path.isfile(candidate): return candidate else: if os.path.exists(candidate): return candidate raise FileNotFoundError("Can't find file %s in python path." % path_name)
2.09375
2
src/AFN.py
mbampi/LinguagensRegulares
0
12488
import re from AFD import AFD class AFN: def __init__(self, nome=None, estados=[], simbolos=[], estado_inicial=None, estados_finais=[], funcoes_programa={}): self.nome = nome self.estados = estados self.simbolos = simbolos self.estado_inicial = estado_inicial self.estados_finais = estados_finais self.funcoes_programa = funcoes_programa def __str__(self): output = f'\nnome={self.nome}' output += f'\nestados={self.estados}' output += f'\nsimbolos={self.simbolos}' output += f'\nestado_inicial={self.estado_inicial}' output += f'\nestados_finais={self.estados_finais}' output += f'\nfuncoes_programa=' output += str([str(fp) + ' -> ' + str(e) for fp, e in self.funcoes_programa.items()]) return output @staticmethod def afn_de_arquivo(caminho_arquivo): ''' Le um arquivo dado pelo caminho especificado e retorna um automato finito deterministico''' with open(caminho_arquivo) as file: first_line = file.readline().split("=", 1) nome = first_line[0] # retira parenteses str_definicao = first_line[1][1:-1] # troca '{}' por '()' str_definicao = str_definicao.replace( '{', '(').replace('}', ')') # regex para achar elementos entre ',' ou conjuntos de elementos entre '{}' regex_exp = "[^,()]*(?:\([^)]*\))*[^,]*" definicao = re.findall(regex_exp, str_definicao) # tira os '()' e tira espacos em branco definicao = [i.strip().replace('(', '').replace(')', '') for i in definicao if i] # separa string pelas ',' definicao = [i.split(',') for i in definicao] estados = definicao[0] simbolos = definicao[1] estado_inicial = definicao[2][0] estados_finais = definicao[3] # descarta linha 'Prog' file.readline() funcoes_programa = {} for line in file.readlines(): estado = re.search('^\((.*),', line)[0][1: -1] simbolo = re.search(',(.*)\)=', line)[0][1: -2] estado_resultante = re.search('=(.*)$', line)[0][1:] if funcoes_programa.get((estado, simbolo)): funcoes_programa[(estado, simbolo)].append( estado_resultante) else: funcoes_programa[(estado, simbolo)] = [estado_resultante] return AFN(nome, estados, simbolos, estado_inicial, estados_finais, funcoes_programa) @staticmethod def _saidas_novo_estado(estado, simbolo, funcoes_programa): estados = estado.split('+') saidas = [] for e in estados: estado_resultante = funcoes_programa.get((e, simbolo)) if estado_resultante: saidas.extend(estado_resultante) if saidas == []: return 'QM' return '+'.join(sorted(list(set(saidas)))) @staticmethod def _define_estados_finais(estados, estados_finais): finais = [] for estado in estados: for ef in estados_finais: if ef in estado: finais.append(estado) return finais def para_AFD(self): q = [] t = {} q.append(self.estado_inicial) estado_morto = 'QM' for simbolo in self.simbolos: estado_resultante = self.funcoes_programa.get( (self.estado_inicial, simbolo)) if estado_resultante: t[(self.estado_inicial, simbolo)] = '+'.join(estado_resultante) else: t[(self.estado_inicial, simbolo)] = estado_morto while(set(q) != set(t.values())): for er in list(t.values()): if er not in q: q.append(er) for simbolo in self.simbolos: if '+' in er: t[(er, simbolo)] = AFN._saidas_novo_estado( er, simbolo, self.funcoes_programa) else: estado_resultante = self.funcoes_programa.get( (er, simbolo)) if estado_resultante: t[(er, simbolo)] = '+'.join(estado_resultante) else: t[(er, simbolo)] = estado_morto estados_finais = AFN._define_estados_finais(q, self.estados_finais) return AFD(nome=self.nome, estados=q, simbolos=self.simbolos, estado_inicial=self.estado_inicial, estados_finais=estados_finais, funcoes_programa=t)
3
3
cs_tools/tools/_searchable-dependencies/app.py
thoughtspot/cs_tools
1
12489
from typing import List, Dict import pathlib import shutil import enum from typer import Option as O_ import typer from cs_tools.helpers.cli_ux import console, frontend, CSToolsGroup, CSToolsCommand from cs_tools.util.datetime import to_datetime from cs_tools.tools.common import run_tql_command, run_tql_script, tsload from cs_tools.util.algo import chunks from cs_tools.settings import TSConfig from cs_tools.const import FMT_TSLOAD_DATETIME from cs_tools.thoughtspot import ThoughtSpot from cs_tools.tools import common from .util import FileQueue HERE = pathlib.Path(__file__).parent class SystemType(str, enum.Enum): """ Reversible mapping of system to friendly names. """ ONE_TO_ONE_LOGICAL = 'system table' USER_DEFINED = 'imported data' WORKSHEET = 'worksheet' AGGR_WORKSHEET = 'view' PINBOARD_ANSWER_BOOK = 'pinboard' QUESTION_ANSWER_BOOK = 'saved answer' MATERIALIZED_VIEW = 'materialized view' CALENDAR_TABLE = 'custom calendar' FORMULA = 'formula' @classmethod def to_friendly(cls, value) -> str: return getattr(cls, value).value @classmethod def to_system(cls, value) -> str: return getattr(cls, value).name class ParentType(str, enum.Enum): """ Limits the type of objects passed on via CLI. """ SYSTEM_TABLE = 'system table' IMPORTED_DATA = 'imported data' WORKSHEET = 'worksheet' VIEW = 'view' def _format_metadata_objects(queue, metadata: List[Dict]): """ Standardize data in an expected format. This is a simple transformation layer, we are fitting our data to be record-based and in the format that's expected for an eventual tsload command. """ for parent in metadata: queue.put({ 'guid_': parent['id'], 'name': parent['name'], 'description': parent.get('description'), 'author_guid': parent['author'], 'author_name': parent['authorName'], 'author_display_name': parent['authorDisplayName'], 'created': to_datetime(parent['created'], unit='ms').strftime(FMT_TSLOAD_DATETIME), 'modified': to_datetime(parent['modified'], unit='ms').strftime(FMT_TSLOAD_DATETIME), # 'modified_by': parent['modifiedBy'] # user.guid 'type': SystemType.to_friendly(parent['type']) if parent.get('type') else 'column', 'context': parent.get('owner') }) def _format_dependency(queue, parent_guid, dependencies: Dict[str, Dict]): """ Standardize data in an expected format. This is a simple transformation layer, we are fitting our data to be record-based and in the format that's expected for an eventual tsload command. """ for dependency in dependencies: queue.put({ 'guid_': dependency['id'], 'parent_guid': parent_guid, 'name': dependency['name'], 'description': dependency.get('description'), 'author_guid': dependency['author'], 'author_name': dependency['authorName'], 'author_display_name': dependency['authorDisplayName'], 'created': to_datetime(dependency['created'], unit='ms').strftime(FMT_TSLOAD_DATETIME), 'modified': to_datetime(dependency['modified'], unit='ms').strftime(FMT_TSLOAD_DATETIME), # 'modified_by': dependency['modifiedBy'] # user.guid 'type': SystemType.to_friendly(dependency['type']) }) def _get_dependents(api: ThoughtSpot, queue, parent: str, metadata: List[Dict]): for chunk in chunks(metadata, n=50): r = api._dependency.list_dependents( id=[_['id'] for _ in chunk], type='LOGICAL_COLUMN' if parent in ('formula', 'column') else 'LOGICAL_TABLE', batchsize=-1, timeout=None if parent == 'column' else -1 ) for parent_guid, dependent_data in r.json().items(): for dependency_type, dependencies in dependent_data.items(): for dependency in dependencies: dependency['type'] = dependency.get('type', dependency_type) queue.put({ 'guid_': dependency['id'], 'parent_guid': parent_guid, 'name': dependency['name'], 'description': dependency.get('description'), 'author_guid': dependency['author'], 'author_name': dependency['authorName'], 'author_display_name': dependency['authorDisplayName'], 'created': to_datetime(dependency['created'], unit='ms').strftime(FMT_TSLOAD_DATETIME), 'modified': to_datetime(dependency['modified'], unit='ms').strftime(FMT_TSLOAD_DATETIME), # 'modified_by': dependency['modifiedBy'] # user.guid 'type': SystemType.to_friendly(dependency['type']) }) def _get_recordset_metadata(api: ThoughtSpot) -> Dict[str, List]: _seen = {} metadata = { 'system table': [], 'imported data': [], 'worksheet': [], 'view': [], 'formula': [], 'column': [], 'other': [] } active_users = common.batched( api._metadata.list, type='USER', batchsize=5000, transformer=lambda r: r.json()['headers'] ) r = [ *common.batched( api._metadata.list, type='LOGICAL_TABLE', batchsize=5000, transformer=lambda r: r.json()['headers'] ), *common.batched( api._metadata.list, type='LOGICAL_COLUMN', batchsize=5000, # NOTE: "True" = includes Custom Calendars & Materialized Views... # auto_created=False, transformer=lambda r: r.json()['headers'] ) ] for item in r: try: friendly = SystemType.to_friendly(item['type']) except KeyError: friendly = 'column' except AttributeError: friendly = 'other' author = next((u for u in active_users if u['id'] == item['author']), None) or {} parent = _seen.get(item['owner']) or {} item = { **item, 'friendly': friendly, 'owner': parent.get('name'), 'authorName': author.get('name') or item.get('authorName'), 'authorDisplayName': author.get('displayName') or item.get('authorDisplayName'), } _seen[item['id']] = item metadata[friendly].append(item) return metadata app = typer.Typer( help=""" Make Dependencies searchable in your platform. [b][yellow]USE AT YOUR OWN RISK![/b] This tool uses private API calls which could change on any version update and break the tool.[/] Dependencies can be collected for various types of metadata. For example, many tables are used within a worksheet, while many worksheets will have answers and pinboards built on top of them. \b Metadata Object Metadata Dependent - guid - guid - name - parent guid - description - name - author guid - description - author name - author guid - author display name - author name - created - author display name - modified - created - object type - modified - context - object type \f Also available, but not developed for.. Tag / Stickers -> TAG Embrace Connections -> DATA_SOURCE """, cls=CSToolsGroup, options_metavar='[--version, --help]' ) @app.command(cls=CSToolsCommand) @frontend def spotapp( export: pathlib.Path = O_(None, help='directory to save the spot app to', file_okay=False, resolve_path=True), # maintained for backwards compatability backwards_compat: pathlib.Path = O_(None, '--save_path', help='backwards-compat if specified, directory to save data to', hidden=True), **frontend_kw ): """ Exports the SpotApp associated with this tool. """ shutil.copy(HERE / 'static' / 'spotapps.zip', export) console.print(f'moved the SpotApp to {export}') @app.command(cls=CSToolsCommand) @frontend def gather( export: pathlib.Path = O_(None, help='directory to save the spot app to', file_okay=False, resolve_path=True), parent: ParentType=O_(None, help='type of object to find dependents for'), include_columns: bool=O_(False, '--include-columns', help='whether or not to find column dependents', show_default=False), # maintained for backwards compatability backwards_compat: pathlib.Path = O_(None, '--save_path', help='backwards-compat if specified, directory to save data to', hidden=True), **frontend_kw ): """ Gather and optionally, insert data into Falcon. By default, data is automatically gathered and inserted into the platform. If --export argument is used, data will not be inserted and will instead be dumped to the location specified. """ cfg = TSConfig.from_cli_args(**frontend_kw, interactive=True) export = export or backwards_compat dir_ = cfg.temp_dir if export is None else export dir_.parent.mkdir(exist_ok=True) static = HERE / 'static' parent_types = [e.value for e in ParentType] if parent is None else [parent] if include_columns: parent_types.extend(['formula', 'column']) with ThoughtSpot(cfg) as ts: with console.status('getting top level metadata'): metadata = _get_recordset_metadata(ts.api) parent_q = FileQueue(dir_ / 'introspect_metadata_object.csv') children_q = FileQueue(dir_ / 'introspect_metadata_dependent.csv') with parent_q as pq, children_q as cq: for parent in parent_types: with console.status(f'getting dependents of metadata: {parent}'): _format_metadata_objects(pq, metadata[parent]) _get_dependents(ts.api, cq, parent, metadata[parent]) if export is not None: return try: with console.status('creating tables with remote TQL'): run_tql_command(ts, command='CREATE DATABASE cs_tools;') run_tql_script(ts, fp=static / 'create_tables.tql', raise_errors=True) except common.TableAlreadyExists: with console.status('altering tables with remote TQL'): run_tql_script(ts, fp=static / 'alter_tables.tql') with console.status('loading data to Falcon with remote tsload'): for stem in ('introspect_metadata_object', 'introspect_metadata_dependent'): path = dir_ / f'{stem}.csv' cycle_id = tsload( ts, fp=path, target_database='cs_tools', target_table=stem, has_header_row=True ) path.unlink() r = ts.api.ts_dataservice.load_status(cycle_id).json() m = ts.api.ts_dataservice._parse_tsload_status(r) console.print(m)
2.09375
2
bots/philBots.py
phyxl/GameOfPureStrategy
0
12490
#!/usr/bin/python import math import random from utils.log import log from bots.simpleBots import BasicBot def get_Chosen(num_cards, desired_score): chosen = list(range(1,num_cards+1)) last_removed = 0 while sum(chosen) > desired_score: #remove a random element last_removed = random.randint(0,len(chosen)-1) add_back = chosen[last_removed] chosen.remove(add_back) chosen.append(add_back) chosen.sort return chosen class shiftBot(BasicBot): def __init__(self, player_num, num_players, num_cards, num_games): #this bot is pretty dumb, and just plays bottom up self.shift_hand = list(range(1, num_cards+1)) self.num_cards = num_cards self.player_num = player_num #I can use this to cheat I think by asking the other bots what they are planning on playing self.num_players = num_players self.start_index = 1 def end_game(self, result): #increment index self.start_index += 1 if(self.start_index >= self.num_cards): self.start_index = 0 def take_turn(self, game_state, verbose = False): num_cards_remaining = len(game_state.current_prizes) index = (self.start_index + self.num_cards - num_cards_remaining) % self.num_cards return self.shift_hand[index] class PhillipAdaptoBot(BasicBot): def __init__(self, player_num, num_players, num_cards, num_games): #Bot is initialized once at the beginning of the competition, and persists between games. self.player_num = player_num #I can use this to cheat I think by asking the other bots what they are planning on playing self.num_players = num_players #normally 2, but ideally, you should allow your bot to gracefully handle more self.num_cards = num_cards self.num_games = 50 self.current_record = 0 self.game_count = 0 self.state = 0 #I'll use this to cycle through strategies attempting to hard counter my opponent self.implemented_strategies = 8 #can only cycle through strategies that I know self.wobble = 0 #some secret sauce self.staying_power = 2 self.desired_score = math.ceil((num_cards + 1) * num_cards / 4) self.chosen = get_Chosen(self.num_cards, self.desired_score) return def end_game(self, result): #Called by GameArena upon game end. Result is the number of the winning bot previous game, -1 if tie #Likely want to reset any tracking variables that persist between rounds here. self.game_count += 1 self.chosen = get_Chosen(self.num_cards, self.desired_score) if result != self.player_num or self.wobble == 1: #It think that means I lost, and am not hard countering self.state += 1 if self.state >= self.implemented_strategies: self.state = 0 #You're probably sunk at this point #if self.current_record > self.staying_power: #self.wobble = 1 self.current_record = 0 else: self.current_record += 1 # a little ugly, but who cares #this means I won, and should not change strategy #want to detect a winning streak return def take_turn(self, game_state, verbose = False): #a completed bot should wrap all log statments in verbosity checks, so we don't get a flooded console if running 1000 iterations if verbose: log(self,"This is a verbose print statment!") #the goal is to beat the opponent by one when possible (most effecient) num_cards_remaining = len(game_state.current_prizes) my_score = game_state.current_scores[self.player_num] my_current_hand = game_state.current_hands[self.player_num] if self.state == 0:#default case should be obvious bot play = game_state.prize_this_round elif self.state == 1: #bidding fairly didn't win the first round, could be playing a random bot or literally anything... if len(my_current_hand) > 1: play = self.num_cards - len(my_current_hand) + 2 else: play = min(my_current_hand) elif self.state == 2: play = max(my_current_hand) elif self.state == 3: if game_state.prize_this_round < self.num_cards: play = game_state.prize_this_round + 1 else: play = 1 elif self.state == 4: if game_state.prize_this_round < self.num_cards - 1: play = game_state.prize_this_round + 2 else: play = min(my_current_hand) elif self.state == 5: if game_state.prize_this_round > self.num_cards: play = game_state.prize_this_round - 1 else: play = max(my_current_hand) elif self.state == 6: if game_state.prize_this_round > self.num_cards + 1: play = game_state.prize_this_round - 2 else: play = max(my_current_hand) elif self.state == 7: if game_state.prize_this_round in self.chosen: play = my_current_hand[-(len(self.chosen) - self.chosen.index(game_state.prize_this_round)):][0] #play = max(my_current_hand) self.chosen.remove(game_state.prize_this_round) else: play = min(my_current_hand) return play # return a card to play class PhillipBotUpBot(BasicBot): def take_turn(self, game, verbose = False): """ Called by GameArena when it's time to take your turn. You are passed a "game" object with this info to work with: card = (int) value 1 thru num_cards variables available to your bot: self.player_num = your player number self.num_players = normally 2, but ideally, you should allow your bot to gracefully handle more self.num_cards = normally 13, but ideally, you should allow your bot to gracefully handle any amount game_state.current_won_cards[player_num][cards] = list of cards each player has won so far game_state.current_scores[player_num] = current score of each each player game_state.current_hands[player][cards] = list of cards currently in each player's hand game_state.current_prizes[cards] = list of prizes remaining game_state.prize_this_round (int) = current prize showing for this round """ num_cards_remaining = len(game.current_prizes) my_score = game.current_scores[self.player_num] my_current_hand = game.current_hands[self.player_num] if (my_score > 0) or (game.prize_this_round == 12): play = max(my_current_hand) else: play = min(my_current_hand) #base strategy, need to add tweaks later return play
3.484375
3
tests/ws/TestWebsocketRegisterAgent.py
sinri/nehushtan
0
12491
import uuid from typing import Dict, List from nehushtan.ws.NehushtanWebsocketConnectionEntity import NehushtanWebsocketConnectionEntity class TestWebsocketRegisterAgent: def __init__(self): self.__map: Dict[str, NehushtanWebsocketConnectionEntity] = {} self.agent_identity = str(uuid.uuid4()) def register(self, websocket): entity = NehushtanWebsocketConnectionEntity(websocket) self.__map[entity.get_key()] = entity print(f"TestWebsocketRegisterAgent[{self.agent_identity}] registered [{entity.get_key()}]") return entity def unregister(self, key: str): if self.__map.get(key): del self.__map[key] print(f"TestWebsocketRegisterAgent[{self.agent_identity}] unregistered [{key}]") def read(self, key: str): print(f"TestWebsocketRegisterAgent[{self.agent_identity}] reading [{key}]") return self.__map.get(key) def list_for_server(self, local_key: str) -> List[NehushtanWebsocketConnectionEntity]: print(f"TestWebsocketRegisterAgent[{self.agent_identity}] listing for [{local_key}]") enities = [] for k, v in self.__map.items(): if v.get_local_key() == local_key: enities.append(v) return enities
2.578125
3
decorator_pattern/starbuzz/condiment.py
garyeechung/design-pattern-practice
2
12492
from .interface import Beverage, CondimentDecorator class Mocha(CondimentDecorator): def __init__(self, beverage: Beverage): super().__init__(beverage) self._cost = 0.2 class Whip(CondimentDecorator): def __init__(self, beverage: Beverage): super().__init__(beverage) self._cost = 0.1 class Soy(CondimentDecorator): def __init__(self, beverage: Beverage): super().__init__(beverage) self._cost = 0.15
3.125
3
src/geo_testing/test_scripts/psgs_big.py
hpgl/hpgl
70
12493
# # # Copyright 2009 HPGL Team # # This file is part of HPGL (High Perfomance Geostatistics Library). # # HPGL is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, version 2 of the License. # # HPGL is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with HPGL. If not, see http://www.gnu.org/licenses/. # from geo import * from sys import * import os import time if not os.path.exists("results/"): os.mkdir("results/") if not os.path.exists("results/medium/"): os.mkdir("results/medium/") #grid = SugarboxGrid(166, 141, 225) #prop = load_cont_property("test_data/BIG_HARD_DATA.INC", -99) grid = SugarboxGrid(166, 141, 20) prop = load_cont_property("test_data/BIG_SOFT_DATA_CON_160_141_20.INC",-99) sgs_params = { "prop": prop, "grid": grid, "seed": 3439275, "kriging_type": "sk", "radiuses": (20, 20, 20), "max_neighbours": 12, "covariance_type": covariance.exponential, "ranges": (10, 10, 10), "sill": 0.4 } for x in xrange(1): time1 = time.time() psgs_result = sgs_simulation(workers_count = x+2, use_new_psgs = True, **sgs_params) time2 = time.time() print "Workers: %s" % (x+2) print "Time: %s" % (time2 - time1) write_property(psgs_result, "results/medium/PSGS_workers_1.inc", "PSIS_MEDIUM_workers_1", -99)
1.65625
2
app/src/constants.py
hubacekjirka/dailyPhotoTwitterBot
1
12494
<filename>app/src/constants.py<gh_stars>1-10 friendly_camera_mapping = { "GM1913": "Oneplus 7 Pro", "FC3170": "Mavic Air 2", # An analogue scanner in FilmNeverDie "SP500": "Canon AE-1 Program" }
1.234375
1
refined/refinement_types.py
espetro/refined
4
12495
<filename>refined/refinement_types.py from typing_extensions import Annotated, TypeGuard from typing import TypeVar, List, Set, Dict from refined.predicates import ( PositivePredicate, NegativePredicate, ValidIntPredicate, ValidFloatPredicate, EmptyPredicate, NonEmptyPredicate, TrimmedPredicate, IPv4Predicate, IPv6Predicate, XmlPredicate, CsvPredicate ) __all__ = [ # numeric types 'Positive', 'Negative', # string types 'TrimmedString', 'ValidIntString', 'ValidFloatString', 'XmlString', 'CsvString', 'IPv4String', 'IPv6String', # generic collection types 'Empty', 'NonEmpty', # concrete collection types 'NonEmptyString', 'NonEmptyList', 'NonEmptySet', 'NonEmptyDict', ] _T1 = TypeVar("_T1") _T2 = TypeVar("_T2") Positive = Annotated[_T1, PositivePredicate[_T1]] Negative = Annotated[_T1, NegativePredicate[_T1]] TrimmedString = Annotated[str, TrimmedPredicate[str]] ValidIntString = Annotated[str, ValidIntPredicate[str]] ValidFloatString = Annotated[str, ValidFloatPredicate[str]] XmlString = Annotated[str, XmlPredicate[str]] CsvString = Annotated[str, CsvPredicate[str]] IPv4String = Annotated[str, IPv4Predicate[str]] IPv6String = Annotated[str, IPv6Predicate[str]] Empty = Annotated[_T1, EmptyPredicate[_T1]] NonEmpty = Annotated[_T1, NonEmptyPredicate[_T1]] NonEmptyString = Annotated[str, NonEmptyPredicate[str]] NonEmptyList = Annotated[List[_T1], NonEmptyPredicate[List[_T1]]] NonEmptySet = Annotated[Set[_T1], NonEmptyPredicate[Set[_T1]]] NonEmptyDict = Annotated[Dict[_T1, _T2], NonEmptyPredicate[Dict[_T1, _T2]]]
1.992188
2
setup.py
evamvid/SpotPRIS2
0
12496
<reponame>evamvid/SpotPRIS2 from setuptools import setup with open("README.md", "r") as f: long_description = f.read() setup(name="SpotPRIS2", version='0.3.1', author="<NAME>", author_email="<EMAIL>", url="https://github.com/freundTech/SpotPRIS2", description="MPRIS2 interface for Spotify Connect", long_description=long_description, packages=['spotpris2'], package_dir={'spotpris2': "spotpris2"}, package_data={'spotpris2': ['mpris/*.xml', 'html/*.html']}, install_requires=[ "PyGObject", "pydbus", "spotipy>=2.8", "appdirs", ], entry_points={ 'console_scripts': ["spotpris2=spotpris2.__main__:main"] }, classifiers=[ "Development Status :: 3 - Alpha", "Environment :: No Input/Output (Daemon)", "Intended Audience :: End Users/Desktop", "License :: OSI Approved :: MIT License", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3 :: Only", "Topic :: Multimedia :: Sound/Audio", ], python_requires='>=3.6', )
1.53125
2
partycipe/migrations/0001_initial.py
spexxsoldier51/PartyCipe
0
12497
# Generated by Django 4.0.3 on 2022-04-02 17:32 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='cocktail', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('last_updated', models.DateTimeField(auto_now=True)), ('created', models.DateTimeField(auto_now_add=True)), ('id_api', models.PositiveIntegerField()), ], ), migrations.CreateModel( name='party', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)), ('paypal', models.URLField()), ('name', models.CharField(max_length=50)), ('resume', models.CharField(max_length=500)), ('place', models.CharField(max_length=150)), ('datehour', models.DateTimeField()), ('last_updated', models.DateTimeField(auto_now=True)), ('price', models.FloatField()), ('drink', models.ManyToManyField(to='partycipe.cocktail')), ('organisate', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='participate', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('last_updated', models.DateTimeField(auto_now=True)), ('created', models.DateTimeField(auto_now_add=True)), ('etat', models.BooleanField()), ('party', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='partycipe.party')), ('utilisateur', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ]
1.742188
2
raster_statistic.py
Summer0328/DeeplabforRS
3
12498
<gh_stars>1-10 #!/usr/bin/env python # Filename: raster_statistic """ introduction: conduct statistic based on vectos, similar to https://github.com/perrygeo/python-rasterstats, # but allow image tiles (multi-raster). authors: <NAME> email:<EMAIL> add time: 02 March, 2021 """ import os,sys import vector_gpd from shapely.geometry import mapping # transform to GeJSON format import raster_io import basic_src.io_function as io_function import basic_src.map_projection as map_projection import basic_src.basic as basic import numpy as np from multiprocessing import Pool def array_stats(in_array, stats, nodata,range=None): data_1d = in_array.flatten() data_1d = data_1d[ data_1d != nodata] data_1d = data_1d[~np.isnan(data_1d)] # remove nan value if range is not None: lower = range[0] upper = range[1] if lower is None: data_1d = data_1d[data_1d <= upper] elif upper is None: data_1d = data_1d[data_1d >= lower] else: data_1d = data_1d[np.logical_and( data_1d >= lower, data_1d <= upper ) ] # https://numpy.org/doc/stable/reference/routines.statistics.html out_value_dict = {} for item in stats: if item == 'mean': value = np.mean(data_1d) elif item == 'max': value = np.max(data_1d) elif item == 'min': value = np.min(data_1d) elif item == 'median': value = np.median(data_1d) elif item == 'count': value = data_1d.size elif item =='std': value = np.std(data_1d) else: raise ValueError('unsupported stats: %s'%item) out_value_dict[item] = value return out_value_dict def zonal_stats_one_polygon(idx, polygon, image_tiles, img_tile_polygons, stats, nodata=None,range=None, band = 1,all_touched=True): overlap_index = vector_gpd.get_poly_index_within_extent(img_tile_polygons, polygon, min_overlap_area=0.01) image_list = [image_tiles[item] for item in overlap_index] if len(image_list) == 1: out_image, out_tran,nodata = raster_io.read_raster_in_polygons_mask(image_list[0], polygon, nodata=nodata, all_touched=all_touched,bands=band) elif len(image_list) > 1: # for the case it overlap more than one raster, need to produce a mosaic tmp_saved_files = [] for k_img, image_path in enumerate(image_list): # print(image_path) tmp_save_path = os.path.splitext(os.path.basename(image_path))[0] + '_subset_poly%d'%idx +'.tif' _, _,nodata = raster_io.read_raster_in_polygons_mask(image_path, polygon,all_touched=all_touched,nodata=nodata, bands=band, save_path=tmp_save_path) tmp_saved_files.append(tmp_save_path) # mosaic files in tmp_saved_files save_path = 'raster_for_poly%d.tif'%idx mosaic_args_list = ['gdal_merge.py', '-o', save_path,'-n',str(nodata),'-a_nodata',str(nodata)] mosaic_args_list.extend(tmp_saved_files) if basic.exec_command_args_list_one_file(mosaic_args_list,save_path) is False: raise IOError('error, obtain a mosaic (%s) failed'%save_path) # read the raster out_image, out_nodata = raster_io.read_raster_one_band_np(save_path,band=band) # remove temporal raster tmp_saved_files.append(save_path) for item in tmp_saved_files: io_function.delete_file_or_dir(item) else: basic.outputlogMessage('warning, cannot find raster for %d (start=0) polygon'%idx) return None # do calculation return array_stats(out_image, stats, nodata,range=range) def zonal_stats_multiRasters(in_shp, raster_file_or_files, nodata=None, band = 1, stats = None, prefix='', range=None,all_touched=True, process_num=1): ''' zonal statistic based on vectors, along multiple rasters (image tiles) Args: in_shp: input vector file raster_file_or_files: a raster file or multiple rasters nodata: band: band stats: like [mean, std, max, min] range: interested values [min, max], None means infinity all_touched: process_num: process number for calculation Returns: ''' io_function.is_file_exist(in_shp) if stats is None: basic.outputlogMessage('warning, No input stats, set to ["mean"])') stats = ['mean'] if isinstance(raster_file_or_files,str): io_function.is_file_exist(raster_file_or_files) image_tiles = [raster_file_or_files] elif isinstance(raster_file_or_files,list): image_tiles = raster_file_or_files else: raise ValueError('unsupport type for %s'%str(raster_file_or_files)) # check projection (assume we have the same projection), check them outside this function # get image box img_tile_boxes = [raster_io.get_image_bound_box(tile) for tile in image_tiles] img_tile_polygons = [vector_gpd.convert_image_bound_to_shapely_polygon(box) for box in img_tile_boxes] polygons = vector_gpd.read_polygons_gpd(in_shp) if len(polygons) < 1: basic.outputlogMessage('No polygons in %s'%in_shp) return False # polygons_json = [mapping(item) for item in polygons] # no need when use new verion of rasterio # process polygons one by one polygons and the corresponding image tiles (parallel and save memory) # also to avoid error: daemonic processes are not allowed to have children if process_num == 1: stats_res_list = [] for idx, polygon in enumerate(polygons): out_stats = zonal_stats_one_polygon(idx, polygon, image_tiles, img_tile_polygons, stats, nodata=nodata, range=range, band=band, all_touched=all_touched) stats_res_list.append(out_stats) elif process_num > 1: threadpool = Pool(process_num) para_list = [ (idx, polygon, image_tiles, img_tile_polygons, stats, nodata, range,band, all_touched) for idx, polygon in enumerate(polygons)] stats_res_list = threadpool.starmap(zonal_stats_one_polygon,para_list) else: raise ValueError('Wrong process number: %s '%str(process_num)) # save to shapefile add_attributes = {} new_key_list = [ prefix + '_' + key for key in stats_res_list[0].keys()] for new_ley in new_key_list: add_attributes[new_ley] = [] for stats_result in stats_res_list: for key in stats_result.keys(): add_attributes[prefix + '_' + key].append(stats_result[key]) vector_gpd.add_attributes_to_shp(in_shp,add_attributes) pass def test_zonal_stats_multiRasters(): shp = os.path.expanduser('~/Data/Arctic/canada_arctic/Willow_River/Willow_River_Thaw_Slumps.shp') # save_shp = os.path.basename(io_function.get_name_by_adding_tail(shp,'raster_stats')) # a single DEM # dem_file_dir = os.path.expanduser('~/Data/Arctic/canada_arctic/DEM/WR_dem_ArcticDEM_mosaic') # dem_path = os.path.join(dem_file_dir,'WR_extent_2m_v3.0_ArcticTileDEM_sub_1_prj.tif') # dem patches dem_file_dir = os.path.expanduser('~/Data/Arctic/canada_arctic/DEM/WR_dem_ArcticDEM_mosaic/dem_patches') dem_list = io_function.get_file_list_by_ext('.tif',dem_file_dir,bsub_folder=False) save_shp = os.path.basename(io_function.get_name_by_adding_tail(shp, 'multi_raster_stats')) io_function.copy_shape_file(shp, save_shp) zonal_stats_multiRasters(save_shp, dem_list, nodata=None, band=1, stats=None, prefix='dem', range=None, all_touched=True, process_num=4) def main(): test_zonal_stats_multiRasters() pass if __name__=='__main__': basic.setlogfile('raster_statistic.log') main()
2.4375
2
ariadne/old/defutils.py
microns-ariadne/ariadne-pipeline-test-harness
2
12499
# Defutils.py -- Contains parsing functions for definition files. # Produces an organized list of tokens in the file. def parse(filename): f=open(filename, "r") contents=f.read() f.close() # Tokenize the file: #contents=contents.replace('\t', '\n') lines=contents.splitlines() outList=[] for l in lines: if l[len(l)-1]==':': outList.append([l.rstrip(':')]) elif l!="": outList[len(outList)-1].append(l) return outList def search(tokList, key): for tok in tokList: if tok[0]==key: return tok return [] def write(tokList, filename): f=open(filename, "w") for tok in tokList: f.write(tok[0]+":\n") for i in range(1, len(tok), 1): f.write(tok[i]+"\n") f.close() class InvalidTypeException(Exception): typestr="" def __init__(self, value="File cannot be read properly."): typestr=value def __str__(self): return "InvalidTypeException: "+typestr class DefFormatException(Exception): typestr="" def __init__(self, value="Definition format error."): typestr=value def __str__(self): return "DefFormatException: "+typestr
3.140625
3