hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b1d089298e5f4bb67268690bc90d7e531a39929b
| 7,710
|
py
|
Python
|
aleph/model/document.py
|
gazeti/aleph
|
f6714c4be038471cfdc6408bfe88dc9e2ed28452
|
[
"MIT"
] | 1
|
2017-07-28T12:54:09.000Z
|
2017-07-28T12:54:09.000Z
|
aleph/model/document.py
|
gazeti/aleph
|
f6714c4be038471cfdc6408bfe88dc9e2ed28452
|
[
"MIT"
] | 7
|
2017-08-16T12:49:23.000Z
|
2018-02-16T10:22:11.000Z
|
aleph/model/document.py
|
gazeti/aleph
|
f6714c4be038471cfdc6408bfe88dc9e2ed28452
|
[
"MIT"
] | 6
|
2017-07-26T12:29:53.000Z
|
2017-08-18T09:35:50.000Z
|
import logging
from datetime import datetime, timedelta
from normality import ascii_text
from sqlalchemy import func
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm.attributes import flag_modified
from aleph.core import db
from aleph.model.metadata import Metadata
from aleph.model.validate import validate
from aleph.model.collection import Collection
from aleph.model.reference import Reference
from aleph.model.common import DatedModel
from aleph.model.document_record import DocumentRecord
from aleph.model.document_tag import DocumentTag
from aleph.text import index_form
log = logging.getLogger(__name__)
class Document(db.Model, DatedModel, Metadata):
_schema = 'document.json#'
SCHEMA = 'Document'
TYPE_TEXT = 'text'
TYPE_TABULAR = 'tabular'
TYPE_OTHER = 'other'
STATUS_PENDING = 'pending'
STATUS_SUCCESS = 'success'
STATUS_FAIL = 'fail'
id = db.Column(db.BigInteger, primary_key=True)
content_hash = db.Column(db.Unicode(65), nullable=True, index=True)
foreign_id = db.Column(db.Unicode, unique=False, nullable=True)
type = db.Column(db.Unicode(10), nullable=False, index=True)
status = db.Column(db.Unicode(10), nullable=True, index=True)
meta = db.Column(JSONB, default={})
crawler = db.Column(db.Unicode(), index=True)
crawler_run = db.Column(db.Unicode())
error_type = db.Column(db.Unicode(), nullable=True)
error_message = db.Column(db.Unicode(), nullable=True)
parent_id = db.Column(db.BigInteger, db.ForeignKey('document.id'), nullable=True) # noqa
children = db.relationship('Document', backref=db.backref('parent', uselist=False, remote_side=[id])) # noqa
collection_id = db.Column(db.Integer, db.ForeignKey('collection.id'), nullable=False, index=True) # noqa
collection = db.relationship(Collection, backref=db.backref('documents', lazy='dynamic')) # noqa
def __init__(self, **kw):
self.meta = {}
super(Document, self).__init__(**kw)
def update(self, data):
validate(data, self._schema)
self.title = data.get('title')
self.summary = data.get('summary')
self.languages = data.get('languages')
self.countries = data.get('countries')
db.session.add(self)
def update_meta(self):
flag_modified(self, 'meta')
def delete_records(self):
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id == self.id)
# pq.delete(synchronize_session='fetch')
pq.delete()
db.session.flush()
def delete_tags(self):
pq = db.session.query(DocumentTag)
pq = pq.filter(DocumentTag.document_id == self.id)
# pq.delete(synchronize_session='fetch')
pq.delete()
db.session.flush()
def delete_references(self, origin=None):
pq = db.session.query(Reference)
pq = pq.filter(Reference.document_id == self.id)
if origin is not None:
pq = pq.filter(Reference.origin == origin)
# pq.delete(synchronize_session='fetch')
pq.delete()
db.session.flush()
def delete(self, deleted_at=None):
self.delete_references()
self.delete_records()
db.session.delete(self)
def insert_records(self, sheet, iterable, chunk_size=1000):
chunk = []
for index, data in enumerate(iterable):
chunk.append({
'document_id': self.id,
'index': index,
'sheet': sheet,
'data': data
})
if len(chunk) >= chunk_size:
db.session.bulk_insert_mappings(DocumentRecord, chunk)
chunk = []
if len(chunk):
db.session.bulk_insert_mappings(DocumentRecord, chunk)
def text_parts(self):
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id == self.id)
for record in pq.yield_per(1000):
for text in record.text_parts():
yield text
@classmethod
def crawler_last_run(cls, crawler_id):
q = db.session.query(func.max(cls.updated_at))
q = q.filter(cls.crawler == crawler_id)
return q.scalar()
@classmethod
def is_crawler_active(cls, crawler_id):
# TODO: add a function to see if a particular crawl is still running
# this should be defined as having "pending" documents.
last_run_time = cls.crawler_last_run(crawler_id)
if last_run_time is None:
return False
return last_run_time > (datetime.utcnow() - timedelta(hours=1))
@classmethod
def crawler_stats(cls, crawler_id):
# Check if the crawler was active very recently, if so, don't
# allow the user to execute a new run right now.
stats = {
'updated': cls.crawler_last_run(crawler_id),
'running': cls.is_crawler_active(crawler_id)
}
q = db.session.query(cls.status, func.count(cls.id))
q = q.filter(cls.crawler == crawler_id)
q = q.group_by(cls.status)
for (status, count) in q.all():
stats[status] = count
return stats
@classmethod
def by_keys(cls, parent_id=None, collection=None, foreign_id=None,
content_hash=None):
"""Try and find a document by various criteria."""
q = cls.all()
if collection is not None:
q = q.filter(Document.collection_id == collection.id)
if parent_id is not None:
q = q.filter(Document.parent_id == parent_id)
if foreign_id is not None:
q = q.filter(Document.foreign_id == foreign_id)
elif content_hash is not None:
q = q.filter(Document.content_hash == content_hash)
else:
raise ValueError("No unique criterion for document.")
document = q.first()
if document is None:
document = cls()
document.type = cls.TYPE_OTHER
document.collection_id = collection.id
document.collection = collection
document.parent_id = parent_id
document.foreign_id = foreign_id
document.content_hash = content_hash
document.status = document.STATUS_PENDING
db.session.add(document)
return document
def to_dict(self):
data = self.to_meta_dict()
try:
from flask import request # noqa
data['public'] = request.authz.collection_public(self.collection_id) # noqa
except:
data['public'] = None
data.update({
'id': self.id,
'type': self.type,
'status': self.status,
'parent_id': self.parent_id,
'foreign_id': self.foreign_id,
'content_hash': self.content_hash,
'crawler': self.crawler,
'crawler_run': self.crawler_run,
'error_type': self.error_type,
'error_message': self.error_message,
'collection_id': self.collection_id,
'created_at': self.created_at,
'updated_at': self.updated_at
})
return data
def to_index_dict(self):
data = self.to_dict()
data['text'] = index_form(self.text_parts())
data['schema'] = self.SCHEMA
data['schemata'] = [self.SCHEMA]
data['name_sort'] = ascii_text(data.get('title'))
data['title_latin'] = ascii_text(data.get('title'))
data['summary_latin'] = ascii_text(data.get('summary'))
data.pop('tables')
return data
def __repr__(self):
return '<Document(%r,%r,%r)>' % (self.id, self.type, self.title)
| 35.366972
| 113
| 0.623476
| 7,073
| 0.91738
| 243
| 0.031518
| 2,283
| 0.296109
| 0
| 0
| 981
| 0.127237
|
b1d1d0561cf49238a4a8252a8f7aae4e72eed16d
| 1,613
|
py
|
Python
|
pyInet/__main__.py
|
LcfherShell/pynet
|
80284d7147d7b8d69c631fd6fe1236bb73e50b1b
|
[
"MIT"
] | null | null | null |
pyInet/__main__.py
|
LcfherShell/pynet
|
80284d7147d7b8d69c631fd6fe1236bb73e50b1b
|
[
"MIT"
] | null | null | null |
pyInet/__main__.py
|
LcfherShell/pynet
|
80284d7147d7b8d69c631fd6fe1236bb73e50b1b
|
[
"MIT"
] | null | null | null |
try:
from main import ClassA, ClassB
except:
try:
from pyInet import ClassA, ClassB
except:
from pyInet.main import ClassA, ClassB
if __name__ == "__main__":
child = ClassA #Public Class
network = ClassB #Private Class
print("Call function using public class")
for i in range(3):
for ipv4 in child.IPv4(i):
print("IPv4:", ipv4)
for ipv6 in child.IPv6(i):
print("IPv6:", ipv6)
print("MacAddresss:", child.MacAddresss(),"\n")
i = 0
print("\nCall function using private class")
for i in range(3):
for ipv4 in network.IPv4(i):
print("IPv4:", ipv4)
for ipv6 in network.IPv6(i):
print("IPv6:", ipv6)
print("MacAddresss:", network.MacAddresss(),"\n")
ipv4 = "192.230.212.159"
ipv6 = "f18d:5980:50d1:cf2d:b204:dc2:ad87:6a58"
print("Check Version and Class Ip addresses")
print("IP version:", child.Validate_IP(ipv4))
print("IPv4 Class:", child.IPv4_Class(ipv4))
print("\nIP version:", child.Validate_IP(ipv6))
print("IPv6 Class:", child.IPv6_Class(ipv6))
print("\nManipulate IPv4 :")
for x in range(1, 33):
child.IPv4_Calculator("{}/{}".format(ipv4, x))
print(child.saving.output)
print("\nManipulate IPv6 :")
for y in range(0, 33):
ipv6range = "{}/{}".format(ipv6, y)
child.IPv6_Calculator(ipv6range)
print(child.saving.output)
| 33.604167
| 62
| 0.543707
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 379
| 0.234966
|
b1d42f3d03c0a3e27b14e56be2c1099412ba4e94
| 3,854
|
py
|
Python
|
dope/filespec/__init__.py
|
gwappa/pydope
|
c8beb315177a850e9d275902a6303e68a319c123
|
[
"MIT"
] | null | null | null |
dope/filespec/__init__.py
|
gwappa/pydope
|
c8beb315177a850e9d275902a6303e68a319c123
|
[
"MIT"
] | 5
|
2020-05-13T13:09:45.000Z
|
2020-05-14T14:18:43.000Z
|
dope/filespec/__init__.py
|
gwappa/pydope
|
c8beb315177a850e9d275902a6303e68a319c123
|
[
"MIT"
] | null | null | null |
#
# MIT License
#
# Copyright (c) 2020 Keisuke Sehara
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import collections as _collections
from ..core import SelectionStatus as _SelectionStatus
class FileSpec(_collections.namedtuple("_FileSpec",
("suffix", "trial", "run", "channel")), _SelectionStatus):
DIGITS = 5
def __new__(cls, suffix=None, trial=None, run=None, channel=None):
return super(cls, FileSpec).__new__(cls, suffix=suffix, trial=trial, run=run, channel=channel)
@classmethod
def empty(cls):
return FileSpec(suffix=None, trial=None, run=None, channel=None)
@property
def status(self):
return self.compute_status(None)
def compute_status(self, context=None):
# TODO
if context is None:
unspecified = (((self.trial is None) and (self.run is None)),
self.channel is None,
self.suffix is None)
if all(unspecified):
return self.UNSPECIFIED
elif any(callable(fld) for fld in self):
return self.DYNAMIC
elif any(unspecified):
return self.MULTIPLE
else:
return self.SINGLE
else:
raise NotImplementedError("FileSpec.compute_status()")
def compute_path(self, context):
"""context: Predicate"""
return context.compute_domain_path() / self.format_name(context)
def format_name(self, context, digits=None):
"""context: Predicate"""
runtxt = self.format_run(digits=digits)
chtxt = self.format_channel(context)
sxtxt = self.format_suffix()
return f"{context.subject}_{context.session.name}_{context.domain}{runtxt}{chtxt}{sxtxt}"
def format_run(self, digits=None):
if digits is None:
digits = self.DIGITS
if self.trial is None:
if self.run is None:
return ""
else:
return "_" + str(self.run).zfill(digits)
else:
return "_" + str(self.trial).zfill(digits)
def format_channel(self, context):
if self.channel is None:
return ""
elif isinstance(self.channel, str):
return f"_{self.channel}"
elif iterable(self.channel):
return "_" + "-".join(self.channel)
else:
raise ValueError(f"cannot compute channel from: {self.channel}")
def format_suffix(self):
return self.suffix if self.suffix is not None else ""
def with_values(self, **kwargs):
spec = dict(**kwargs)
for fld in self._fields:
if fld not in spec.keys():
spec[fld] = getattr(self, fld)
return self.__class__(**spec)
def cleared(self):
return self.__class__()
| 37.417476
| 102
| 0.639855
| 2,649
| 0.687338
| 0
| 0
| 177
| 0.045926
| 0
| 0
| 1,375
| 0.356772
|
b1d4408048c2582c035650ab9faddb5edccff6fd
| 2,498
|
py
|
Python
|
city_scrapers_core/extensions/status.py
|
jtotoole/city-scrapers-core
|
0c091d91bf8883c6f361a19fbb055abc3b306835
|
[
"MIT"
] | null | null | null |
city_scrapers_core/extensions/status.py
|
jtotoole/city-scrapers-core
|
0c091d91bf8883c6f361a19fbb055abc3b306835
|
[
"MIT"
] | null | null | null |
city_scrapers_core/extensions/status.py
|
jtotoole/city-scrapers-core
|
0c091d91bf8883c6f361a19fbb055abc3b306835
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import pytz
from scrapy import signals
RUNNING = "running"
FAILING = "failing"
STATUS_COLOR_MAP = {RUNNING: "#44cc11", FAILING: "#cb2431"}
STATUS_ICON = """
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="144" height="20">
<linearGradient id="b" x2="0" y2="100%">
<stop offset="0" stop-color="#bbb" stop-opacity=".1"/>
<stop offset="1" stop-opacity=".1"/>
</linearGradient>
<clipPath id="a">
<rect width="144" height="20" rx="3" fill="#fff"/>
</clipPath>
<g clip-path="url(#a)">
<path fill="#555" d="M0 0h67v20H0z"/>
<path fill="{color}" d="M67 0h77v20H67z"/>
<path fill="url(#b)" d="M0 0h144v20H0z"/>
</g>
<g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="110">
<text x="345" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)">{status}</text>
<text x="345" y="140" transform="scale(.1)">{status}</text>
<text x="1045" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)">{date}</text>
<text x="1045" y="140" transform="scale(.1)">{date}</text>
</g>
</svg>
""" # noqa
class StatusExtension:
"""
Scrapy extension for maintaining an SVG badge for each scraper's status.
TODO: Track how many items are scraped on each run.
"""
def __init__(self, crawler):
self.crawler = crawler
self.has_error = False
@classmethod
def from_crawler(cls, crawler):
ext = cls(crawler)
crawler.signals.connect(ext.spider_closed, signal=signals.spider_closed)
crawler.signals.connect(ext.spider_error, signal=signals.spider_error)
return ext
def spider_closed(self):
if self.has_error:
return
svg = self.create_status_svg(self.crawler.spider, RUNNING)
self.update_status_svg(self.crawler.spider, svg)
def spider_error(self):
self.has_error = True
svg = self.create_status_svg(self.crawler.spider, FAILING)
self.update_status_svg(self.crawler.spider, svg)
def create_status_svg(self, spider, status):
tz = pytz.timezone(spider.timezone)
return STATUS_ICON.format(
color=STATUS_COLOR_MAP[status],
status=status,
date=tz.localize(datetime.now()).strftime("%Y-%m-%d"),
)
def update_status_svg(self, spider, svg):
raise NotImplementedError
| 34.694444
| 108
| 0.6249
| 1,269
| 0.508006
| 0
| 0
| 254
| 0.101681
| 0
| 0
| 1,229
| 0.491994
|
b1d542377c13c57ca40f0aad4217a57a0a2f3e27
| 5,438
|
py
|
Python
|
tests/test_filters.py
|
maniospas/pygrank
|
a92f6bb6d13553dd960f2e6bda4c041a8027a9d1
|
[
"Apache-2.0"
] | 19
|
2019-10-07T14:42:40.000Z
|
2022-03-24T15:02:02.000Z
|
tests/test_filters.py
|
maniospas/pygrank
|
a92f6bb6d13553dd960f2e6bda4c041a8027a9d1
|
[
"Apache-2.0"
] | 13
|
2021-08-25T12:54:37.000Z
|
2022-03-05T03:31:34.000Z
|
tests/test_filters.py
|
maniospas/pygrank
|
a92f6bb6d13553dd960f2e6bda4c041a8027a9d1
|
[
"Apache-2.0"
] | 4
|
2019-09-25T09:54:51.000Z
|
2020-12-09T00:11:21.000Z
|
import networkx as nx
import pygrank as pg
import pytest
from .test_core import supported_backends
def test_zero_personalization():
assert pg.sum(pg.PageRank()(next(pg.load_datasets_graph(["graph9"])), {}).np) == 0
def test_abstract_filter_types():
graph = next(pg.load_datasets_graph(["graph5"]))
with pytest.raises(Exception):
pg.GraphFilter().rank(graph)
with pytest.raises(Exception):
pg.RecursiveGraphFilter().rank(graph)
with pytest.raises(Exception):
pg.ClosedFormGraphFilter().rank(graph)
with pytest.raises(Exception):
pg.Tuner().rank(graph)
def test_filter_invalid_parameters():
graph = next(pg.load_datasets_graph(["graph5"]))
with pytest.raises(Exception):
pg.HeatKernel(normalization="unknown").rank(graph)
with pytest.raises(Exception):
pg.HeatKernel(coefficient_type="unknown").rank(graph)
def test_convergence_string_conversion():
# TODO: make convergence trackable from wrapping objects
graph = next(pg.load_datasets_graph(["graph5"]))
ranker = pg.PageRank()
ranker(graph)
assert str(ranker.convergence.iteration)+" iterations" in str(ranker.convergence)
def test_pagerank_vs_networkx():
graph = next(pg.load_datasets_graph(["graph9"]))
for _ in supported_backends():
ranker = pg.Normalize("sum", pg.PageRank(normalization='col', tol=1.E-9))
test_result = ranker(graph)
test_result2 = nx.pagerank(graph, tol=1.E-9)
# TODO: assert that 2.5*epsilon is indeed a valid limit
assert pg.Mabs(test_result)(test_result2) < 2.5*pg.epsilon()
def test_prevent_node_lists_as_graphs():
graph = next(pg.load_datasets_graph(["graph5"]))
with pytest.raises(Exception):
pg.PageRank().rank(list(graph))
def test_non_convergence():
graph = next(pg.load_datasets_graph(["graph9"]))
with pytest.raises(Exception):
pg.PageRank(max_iters=5).rank(graph)
def test_custom_runs():
graph = next(pg.load_datasets_graph(["graph9"]))
for _ in supported_backends():
ranks1 = pg.Normalize(pg.PageRank(0.85, tol=pg.epsilon(), max_iters=1000, use_quotient=False)).rank(graph, {"A": 1})
ranks2 = pg.Normalize(pg.GenericGraphFilter([0.85**i*len(graph) for i in range(80)], tol=pg.epsilon())).rank(graph, {"A": 1})
assert pg.Mabs(ranks1)(ranks2) < 1.E-6
def test_completion():
graph = next(pg.load_datasets_graph(["graph9"]))
for _ in supported_backends():
pg.PageRank().rank(graph)
pg.HeatKernel().rank(graph)
pg.AbsorbingWalks().rank(graph)
pg.HeatKernel().rank(graph)
assert True
def test_quotient():
graph = next(pg.load_datasets_graph(["graph9"]))
for _ in supported_backends():
test_result = pg.PageRank(normalization='symmetric', tol=max(1.E-9, pg.epsilon()), use_quotient=True).rank(graph)
norm_result = pg.PageRank(normalization='symmetric', tol=max(1.E-9, pg.epsilon()), use_quotient=pg.Normalize("sum")).rank(graph)
assert pg.Mabs(test_result)(norm_result) < pg.epsilon()
def test_automatic_graph_casting():
graph = next(pg.load_datasets_graph(["graph9"]))
for _ in supported_backends():
signal = pg.to_signal(graph, {"A": 1})
test_result1 = pg.PageRank(normalization='col').rank(signal, signal)
test_result2 = pg.PageRank(normalization='col').rank(personalization=signal)
assert pg.Mabs(test_result1)(test_result2) < pg.epsilon()
with pytest.raises(Exception):
pg.PageRank(normalization='col').rank(personalization={"A": 1})
with pytest.raises(Exception):
pg.PageRank(normalization='col').rank(graph.copy(), signal)
def test_absorbing_vs_pagerank():
graph = next(pg.load_datasets_graph(["graph9"]))
personalization = {"A": 1, "B": 1}
for _ in supported_backends():
pagerank_result = pg.PageRank(normalization='col').rank(graph, personalization)
absorbing_result = pg.AbsorbingWalks(0.85, normalization='col', max_iters=1000).rank(graph, personalization)
assert pg.Mabs(pagerank_result)(absorbing_result) < pg.epsilon()
def test_kernel_locality():
graph = next(pg.load_datasets_graph(["graph9"]))
personalization = {"A": 1, "B": 1}
for _ in supported_backends():
for kernel_algorithm in [pg.HeatKernel, pg.BiasedKernel]:
pagerank_result = pg.Normalize("sum", pg.PageRank(max_iters=1000)).rank(graph, personalization)
kernel_result = pg.Normalize("sum", kernel_algorithm(max_iters=1000)).rank(graph, personalization)
assert pagerank_result['A'] < kernel_result['A']
assert pagerank_result['I'] > kernel_result['I']
def test_optimization_dict():
from timeit import default_timer as time
graph = next(pg.load_datasets_graph(["bigraph"]))
personalization = {str(i): 1 for i in range(200)}
preprocessor = pg.preprocessor(assume_immutability=True)
preprocessor(graph)
tic = time()
for _ in range(10):
pg.ParameterTuner(preprocessor=preprocessor, tol=1.E-9).rank(graph, personalization)
unoptimized = time()-tic
optimization = dict()
tic = time()
for _ in range(10):
pg.ParameterTuner(optimization_dict=optimization, preprocessor=preprocessor, tol=1.E-9).rank(graph, personalization)
optimized = time() - tic
assert len(optimization) == 20
assert unoptimized > optimized
| 39.693431
| 136
| 0.685914
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 368
| 0.067672
|
b1d70b532712bd846f1a70f021a50fadff6b4449
| 9,816
|
py
|
Python
|
src/preprocess.py
|
vkola-lab/ajpa2021
|
67a76ae184b4c9c40c9bc104c8d87ffa5ea69d91
|
[
"MIT"
] | null | null | null |
src/preprocess.py
|
vkola-lab/ajpa2021
|
67a76ae184b4c9c40c9bc104c8d87ffa5ea69d91
|
[
"MIT"
] | null | null | null |
src/preprocess.py
|
vkola-lab/ajpa2021
|
67a76ae184b4c9c40c9bc104c8d87ffa5ea69d91
|
[
"MIT"
] | null | null | null |
import os
import PIL
from PIL import Image
from PIL import Image, ImageStat
PIL.Image.MAX_IMAGE_PIXELS = 10000000000
import numpy as np
import cv2
import openslide
import time
PATCH_SIZE = 224
STRIDE = 224
DOWN_SIZE = 508
def preprocess_mask(path_ori, path_mask, width_resize=2560, height_resize=1920):
start_time = time.time()
all_files = os.listdir(path_ori)
for file_ in all_files:
ori = openslide.OpenSlide(os.path.join(path_ori, file_))
name = file_.split('.')[0]
if os.path.exists(os.path.join(path_mask, '{}-annotations.png'.format(name))):
annt = Image.open(os.path.join(path_mask, '{}-annotations.png'.format(name)))
width, height = annt.size
width_r = width/width_resize
height_r = height/height_resize
annt_resize = annt.resize((width_resize,height_resize))
annt_resize_img = np.array(annt_resize)
os.makedirs(os.path.join('data/masks/'), exist_ok=True)
cv2.imwrite('data/masks/{}_annt.jpg'.format(name), annt_resize_img)
print('{} finished'.format(name))
time.s
print("--- %s seconds ---" % (time.time() - start_time_total))
def preprocess_global(path_ori, width_resize=2560, height_resize=1920):
start_time = time.time()
all_files = os.listdir(path_ori)
for file_ in all_files:
ori = openslide.OpenSlide(os.path.join(path_ori, file_))
name = file_.split('.')[0]
width, height = ori.dimensions
x_resize_mins = []
x_resize_maxs = []
y_resize_mins = []
y_resize_maxs = []
if os.path.exists('data/masks/{}_annt.jpg'.format(name)):
annt_resized = cv2.imread('data/masks/{}_annt.jpg'.format(name), 0)
height_annt, width_annt = annt_resized.shape
ret, score = cv2.threshold(annt_resized, 128, 1, 0)
nLabels, labels, stats, centroids = cv2.connectedComponentsWithStats(score.astype(np.uint8), connectivity=4)
for k in range(1,nLabels):
size = stats[k, cv2.CC_STAT_AREA]
if size < 10000: continue
segmap = np.zeros(annt_resized.shape, dtype=np.uint8)
segmap[labels==k] = 255
np_contours = np.roll(np.array(np.where(segmap!=0)),1,axis=0).transpose().reshape(-1,2)
x_resize_min, x_resize_max = min(np_contours[:,1]), max(np_contours[:,1])
y_resize_min, y_resize_max = min(np_contours[:,0]), max(np_contours[:,0])
x_resize_mins.append(x_resize_min)
x_resize_maxs.append(x_resize_max)
y_resize_mins.append(y_resize_min)
y_resize_maxs.append(y_resize_max)
width_r = width/width_resize
height_r = height/height_resize
if len(x_resize_maxs) != 0:
x_resize_min, x_resize_max, y_resize_min, y_resize_max = min(x_resize_mins), max(x_resize_maxs), min(y_resize_mins), max(y_resize_maxs)
else:
for k in range(1,nLabels):
size = stats[k, cv2.CC_STAT_AREA]
if size < 150: continue
segmap = np.zeros(annt_resized.shape, dtype=np.uint8)
segmap[labels==k] = 255
np_contours = np.roll(np.array(np.where(segmap!=0)),1,axis=0).transpose().reshape(-1,2)
x_resize_min, x_resize_max = min(np_contours[:,1]), max(np_contours[:,1])
y_resize_min, y_resize_max = min(np_contours[:,0]), max(np_contours[:,0])
x_resize_mins.append(x_resize_min)
x_resize_maxs.append(x_resize_max)
y_resize_mins.append(y_resize_min)
y_resize_maxs.append(y_resize_max)
x_resize_min, x_resize_max, y_resize_min, y_resize_max = min(x_resize_mins), max(x_resize_maxs), min(y_resize_mins), max(y_resize_maxs)
x_min, x_max = int(x_resize_min * height_r), int(x_resize_max * height_r)
y_min, y_max = int(y_resize_min * width_r), int(y_resize_max * width_r)
crop = ori.read_region((y_min,x_min), 0, (y_max-y_min,x_max-x_min))
crop = crop.resize((DOWN_SIZE,DOWN_SIZE))
crop_np = np.array(crop)
crop_np = cv2.cvtColor(crop_np, cv2.COLOR_RGBA2RGB)
os.makedirs(os.path.join('data/globals/'), exist_ok=True)
cv2.imwrite('data/globals/{}.png'.format(name), crop_np)
print('{} finished'.format(name))
time.s
print("--- %s seconds ---" % (time.time() - start_time))
def preprocess_patch(path_ori, width_resize=2560, height_resize=1920):
start_time = time.time()
all_files = os.listdir(path_ori)
os.makedirs(os.path.join('data/locals/'), exist_ok=True)
for file_ in all_files:
ori = openslide.OpenSlide(os.path.join(path_ori, file_))
name = file_.split('.')[0]
width, height = ori.dimensions
x_resize_mins = []
x_resize_maxs = []
y_resize_mins = []
y_resize_maxs = []
if os.path.exists('data/masks/{}_annt.jpg'.format(name)):
os.makedirs(os.path.join('data/locals/' + name), exist_ok=True)
annt_resized = cv2.imread('data/masks/{}_annt.jpg'.format(name), 0)
height_annt, width_annt = annt_resized.shape
ret, score = cv2.threshold(annt_resized, 128, 1, 0)
nLabels, labels, stats, centroids = cv2.connectedComponentsWithStats(score.astype(np.uint8), connectivity=4)
for k in range(1,nLabels):
size = stats[k, cv2.CC_STAT_AREA]
if size < 10000: continue
segmap = np.zeros(annt_resized.shape, dtype=np.uint8)
segmap[labels==k] = 255
np_contours = np.roll(np.array(np.where(segmap!=0)),1,axis=0).transpose().reshape(-1,2)
x_resize_min, x_resize_max = min(np_contours[:,1]), max(np_contours[:,1])
y_resize_min, y_resize_max = min(np_contours[:,0]), max(np_contours[:,0])
x_resize_mins.append(x_resize_min)
x_resize_maxs.append(x_resize_max)
y_resize_mins.append(y_resize_min)
y_resize_maxs.append(y_resize_max)
width_r = width/width_resize
height_r = height/height_resize
if len(x_resize_maxs) != 0:
x_resize_min, x_resize_max, y_resize_min, y_resize_max = min(x_resize_mins), max(x_resize_maxs), min(y_resize_mins), max(y_resize_maxs)
else:
for k in range(1,nLabels):
size = stats[k, cv2.CC_STAT_AREA]
if size < 150: continue
segmap = np.zeros(annt_resized.shape, dtype=np.uint8)
segmap[labels==k] = 255
np_contours = np.roll(np.array(np.where(segmap!=0)),1,axis=0).transpose().reshape(-1,2)
x_resize_min, x_resize_max = min(np_contours[:,1]), max(np_contours[:,1])
y_resize_min, y_resize_max = min(np_contours[:,0]), max(np_contours[:,0])
x_resize_mins.append(x_resize_min)
x_resize_maxs.append(x_resize_max)
y_resize_mins.append(y_resize_min)
y_resize_maxs.append(y_resize_max)
x_resize_min, x_resize_max, y_resize_min, y_resize_max = min(x_resize_mins), max(x_resize_maxs), min(y_resize_mins), max(y_resize_maxs)
x_min, x_max = int(x_resize_min * height_r), int(x_resize_max * height_r)
y_min, y_max = int(y_resize_min * width_r), int(y_resize_max * width_r)
else:
continue
roi_h = x_max - x_min
roi_w = y_max - y_min
wp = int((roi_w - PATCH_SIZE) / STRIDE + 1)
hp = int((roi_h - PATCH_SIZE) / STRIDE + 1)
total = wp * hp
cnt = 0
for w in range(wp):
for h in range(hp):
y = y_min + w * STRIDE
x = x_min + h * STRIDE
cnt += 1
if y > width or x > height:
continue
crop = ori.read_region((y,x), 0, (PATCH_SIZE,PATCH_SIZE))
crop = crop.convert('RGB')
if sum(ImageStat.Stat(crop).stddev)/3 < 18 or sum(ImageStat.Stat(crop).median)/3 > 200:
continue
os.makedirs(os.path.join('data/locals/{}'.format(name)), exist_ok=True)
crop.save('data/locals/{}/{}_{}_{}_{}_{}.png'.format(name,name,str(h),str(w),str(roi_h),str(roi_w)))
if cnt % 1000 == 0:
print('{}/{}'.format(str(cnt), str(total)))
print('{} finished'.format(name))
print("--- %s seconds ---" % (time.time() - start_time_total))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Preprocessing')
parser.add_argument('--path_ori', type=str, default='/scratch2/zheng/ajpa2021-master/data/OSUWMC/', help='path to dataset where images store')
parser.add_argument('--path_mask', type=str, default='/scratch2/zheng/ajpa2021-master/data/OSUWMC_MASK/', help='path to dataset where masks (if possible) store')
parser.add_argument('--m', action='store_true', default=False, help='preprocess masks if possible')
parser.add_argument('--g', action='store_true', default=False, help='preprocess data at global level')
parser.add_argument('--p', action='store_true', default=False, help='preprocess data at patch level')
args = parser.parse_args()
if args.m:
preprocess_mask(args.path_ori, args.path_mask)
elif args.g:
preprocess_global(args.path_ori)
elif args.p:
preprocess_patch(args.path_ori)
| 49.326633
| 165
| 0.599124
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 786
| 0.080073
|
b1d7b3ea3f8d942998560e953fec761fcb002a45
| 2,433
|
py
|
Python
|
procgen.py
|
tredfern/rdl2021-tutorial
|
18f992c9c09ab18ee8e2927cf53d707c251d4948
|
[
"MIT"
] | null | null | null |
procgen.py
|
tredfern/rdl2021-tutorial
|
18f992c9c09ab18ee8e2927cf53d707c251d4948
|
[
"MIT"
] | null | null | null |
procgen.py
|
tredfern/rdl2021-tutorial
|
18f992c9c09ab18ee8e2927cf53d707c251d4948
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 Trevor Redfern
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
from __future__ import annotations
from typing import Tuple, Iterator, List, TYPE_CHECKING
import random
import tcod
from game_map import GameMap
import tile_types
if TYPE_CHECKING:
from entity import Entity
class RectangularRoom:
def __init__(self, x: int, y: int, width: int, height: int) -> None:
self.x1 = x
self.y1 = y
self.x2 = x + width
self.y2 = y + height
@property
def center(self) -> Tuple[int, int]:
centerX = int((self.x1 + self.x2) / 2)
centerY = int((self.y1 + self.y2) / 2)
return centerX, centerY
@property
def inner(self) -> Tuple[slice, slice]:
return slice(self.x1 + 1, self.x2), slice(self.y1 + 1, self.y2)
def intersects(self, other: RectangularRoom) -> bool:
return (
self.x1 <= other.x2 and
self.x2 >= other.x1 and
self.y1 <= other.y2 and
self.y2 >= other.y1
)
def generateDungeon(
maxRooms: int,
roomMinSize: int,
roomMaxSize: int,
mapWidth: int,
mapHeight: int,
player: Entity) -> GameMap:
dungeon = GameMap(mapWidth, mapHeight)
rooms: List[RectangularRoom] = []
for r in range(maxRooms):
roomWidth = random.randint(roomMinSize, roomMaxSize)
roomHeight = random.randint(roomMinSize, roomMaxSize)
x = random.randint(0, dungeon.width - roomWidth - 1)
y = random.randint(0, dungeon.height - roomHeight - 1)
newRoom = RectangularRoom(x, y, roomWidth, roomHeight)
if any(newRoom.intersects(otherRoom) for otherRoom in rooms):
continue
dungeon.tiles[newRoom.inner] = tile_types.floor
if len(rooms) == 0:
player.x, player.y = newRoom.center
else:
for x, y in tunnelBetween(rooms[-1].center, newRoom.center):
dungeon.tiles[x, y] = tile_types.floor
rooms.append(newRoom)
return dungeon
def tunnelBetween( start: Tuple[int, int], end: Tuple[int, int]) -> Iterator[Tuple[int, int]]:
x1, y1 = start
x2, y2 = end
if random.random() < 0.5:
cornerX, cornerY = x2, y1
else:
cornerX, cornerY = x1, y2
for x, y in tcod.los.bresenham((x1, y1), (cornerX, cornerY)).tolist():
yield x, y
for x, y in tcod.los.bresenham((cornerX, cornerY), (x2, y2)).tolist():
yield x, y
| 25.610526
| 95
| 0.628442
| 690
| 0.2836
| 416
| 0.170982
| 289
| 0.118783
| 0
| 0
| 128
| 0.05261
|
b1d821122ad47a7fa47c073b2ce27f383a3871d3
| 1,492
|
py
|
Python
|
examples/plot_simulate_bo.py
|
pmdaly/supereeg
|
750f55db3cbfc2f3430e879fecc7a1f5407282a6
|
[
"MIT"
] | 1
|
2018-12-10T01:38:48.000Z
|
2018-12-10T01:38:48.000Z
|
examples/plot_simulate_bo.py
|
pmdaly/supereeg
|
750f55db3cbfc2f3430e879fecc7a1f5407282a6
|
[
"MIT"
] | null | null | null |
examples/plot_simulate_bo.py
|
pmdaly/supereeg
|
750f55db3cbfc2f3430e879fecc7a1f5407282a6
|
[
"MIT"
] | 1
|
2019-06-25T21:34:12.000Z
|
2019-06-25T21:34:12.000Z
|
# -*- coding: utf-8 -*-
"""
=============================
Simulating a brain object
=============================
In this example, we demonstrate the simulate_bo function.
First, we'll load in some example locations. Then we'll simulate 1
brain object specifying a noise parameter and the correlational structure
of the data (a toeplitz matrix). We'll then subsample 10 locations from the
original brain object.
"""
# Code source: Lucy Owen & Andrew Heusser
# License: MIT
import supereeg as se
from supereeg.helpers import _corr_column
import numpy as np
# simulate 100 locations
locs = se.simulate_locations(n_elecs=100)
# simulate brain object
bo = se.simulate_bo(n_samples=1000, sample_rate=100, cov='random', locs=locs, noise =.1)
# sample 10 locations, and get indices
sub_locs = locs.sample(90, replace=False).sort_values(['x', 'y', 'z']).index.values.tolist()
# index brain object to get sample patient
bo_sample = bo[: ,sub_locs]
# plot sample patient locations
bo_sample.plot_locs()
# plot sample patient data
bo_sample.plot_data()
# make model from brain object
r_model = se.Model(data=bo, locs=locs)
# predict
bo_s = r_model.predict(bo_sample, nearest_neighbor=False)
# find indices for reconstructed locations
recon_labels = np.where(np.array(bo_s.label) != 'observed')
# find correlations between predicted and actual data
corrs = _corr_column(bo.get_data().as_matrix(), bo_s.get_data().as_matrix())
# index reconstructed correlations
corrs[recon_labels].mean()
| 27.127273
| 92
| 0.731233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 850
| 0.569705
|
b1d8a19c3055e7f0d5aa484065ba5f44c533be7b
| 420
|
py
|
Python
|
poc/classes/AuxSTRuleOfInference.py
|
bookofproofs/fpl
|
527b43b0f8bb3d459ee906e5ed8524a676ce3a2c
|
[
"MIT"
] | 4
|
2021-11-08T10:09:46.000Z
|
2021-11-13T22:25:46.000Z
|
poc/classes/AuxSTRuleOfInference.py
|
bookofproofs/fpl
|
527b43b0f8bb3d459ee906e5ed8524a676ce3a2c
|
[
"MIT"
] | 1
|
2020-09-04T13:02:09.000Z
|
2021-06-16T07:07:44.000Z
|
poc/classes/AuxSTRuleOfInference.py
|
bookofproofs/fpl
|
527b43b0f8bb3d459ee906e5ed8524a676ce3a2c
|
[
"MIT"
] | 1
|
2021-11-08T10:10:12.000Z
|
2021-11-08T10:10:12.000Z
|
from poc.classes.AuxSTBlockWithSignature import AuxSTBlockWithSignature
from poc.classes.AuxSymbolTable import AuxSymbolTable
class AuxSTRuleOfInference(AuxSTBlockWithSignature):
def __init__(self, i):
super().__init__(AuxSymbolTable.block_ir, i)
self.zfrom = i.last_positions_by_rule['PredicateIdentifier'].pos_to_str()
self.zto = i.last_positions_by_rule['RuleOfInference'].pos_to_str()
| 38.181818
| 81
| 0.785714
| 291
| 0.692857
| 0
| 0
| 0
| 0
| 0
| 0
| 38
| 0.090476
|
b1d8cc75992fcd005adcc90ea90aa099fbd29007
| 5,031
|
py
|
Python
|
examples/fmanipulator.py
|
mateusmoutinho/python-cli-args
|
40b758db808e96b3c12a3e0a87b6904660e90d9b
|
[
"MIT"
] | null | null | null |
examples/fmanipulator.py
|
mateusmoutinho/python-cli-args
|
40b758db808e96b3c12a3e0a87b6904660e90d9b
|
[
"MIT"
] | null | null | null |
examples/fmanipulator.py
|
mateusmoutinho/python-cli-args
|
40b758db808e96b3c12a3e0a87b6904660e90d9b
|
[
"MIT"
] | null | null | null |
from io import TextIOWrapper
from typing import IO, Text
from cli_args_system import Args
from cli_args_system import Args, FlagsContent
from sys import exit
HELP = """this is a basic file manipulator to demonstrate
args_system usage with file flags
-------------------flags----------------------------
-join: join the files passed and save in the --out flag
-replace: replace the text on file and save in the --out flag
if there is no out flag, it will save in the same file
-remove: remove the given text in the file
-------------------usage----------------------------
$ python3 fmanipulator.py -join a.txt b.txt -out c.txt
will join the content on a.txt and b.txt, and save in c.txt
$ python3 fmanipulator.py a.txt -replace a b
will replace the char a for char b in the a.txt file
$ python3 fmanipulator.py a.txt -replace a b -out b.txt
will replace the char a for char b and save in b.txt
$ python3 fmanipulator.py a.txt -r test
will remove the text: test in the file a.txt
$ python3 fmanipulator.py a.txt -r test -out b.txt
will remove the text: test in the file a.txt and save in b.txt"""
def exit_with_mensage(mensage:str):
"""kills the aplcation after printing the mensage \n
mensage: the mensage to print"""
print(mensage)
exit(1)
def get_file_text(args:Args) ->str:
"""returns the file text of args[0] (argv[0]) \n
args:The args Object"""
try:
with open(args[0],'r') as f:
return f.read()
except (FileNotFoundError,IndexError):
#if doenst find the file text,kilss the aplcation
exit_with_mensage(mensage='no file')
def get_out_wraper(args:Args,destroy_if_dont_find=True)->TextIOWrapper or None:
"""returns the out wraper of out[0] flag\n
args: The args Object \n
destroy_if_dont_find: if True it will destroy the aplication
if doesnt find out[0] flag"""
out = args.flags_content('out','o','out-file','outfile','out_file')
if out.filled():
return open(out[0],'w')
else:
#check if is to destroy
if destroy_if_dont_find:
exit_with_mensage(mensage='not out file')
def write_text_in_out_file_or_same_file(text:str,args:Args):
"""write text in out flag if exist,
otherwhise write on same file args(0)\n
text: the text to write \n
args: The args Object \n
"""
out = get_out_wraper(args,destroy_if_dont_find=False)
#if out is not passed it replace in the same file
if out is None:
open(args[0],'w').write(text)
else:
#otherwise write in the out file
out.write(text)
def join_files(join:FlagsContent,args:Args):
"""join the files of join flag, in the out flag content
join: the join FlagsContent \n
args: The args Object"""
if len(join) < 2:
print('must bee at least 2 files')
exit(1)
full_text = ''
#make a iteration on join flag
for file_path in join:
try:
#try to open and add in the full text, the content of
#file path
with open(file_path,'r') as file:
full_text+=file.read()
except FileNotFoundError:
print(f'file {file_path} not exist')
exit(1)
#write the changes in the out file
get_out_wraper(args).write(full_text)
def replace_elements(replace:FlagsContent,args:Args):
"""replace in file (args[0) with replace[0] to replace[1]
replace: the replace FlagsContent
args: The args Object
"""
if len(replace) != 2:
exit_with_mensage(mensage='must bee two elements to replace')
#get the file of args[0]
file = get_file_text(args)
#make the replace
replaced_text = file.replace(replace[0],replace[1])
write_text_in_out_file_or_same_file(text=replaced_text,args=args)
def remove_text(remove:FlagsContent,args:Args):
"""this function remove the text in passed in the remove flags \n
remove: the remove FlagsContent \n
args: The args Object """
if not remove.filled():
exit_with_mensage('not text to remove')
text_file = get_file_text(args)
#goes in a iteration in remove flags
for text in remove:
text_file = text_file.replace(text,'')
write_text_in_out_file_or_same_file(text=text_file,args=args)
if __name__ == '__main__':
#construct the args
args = Args(convert_numbers=False)
#for help flag
help = args.flags_content('h','help')
if help.exist():
print(HELP);exit(0)
join = args.flags_content('join','j')
#if join flag exist, call the join_files
if join.exist():
join_files(join,args)
replace = args.flags_content('replace','substitute')
#if replace flag exist call the replace_elements function
if replace.exist():
replace_elements(replace,args)
remove = args.flags_content('r','remove','pop')
#if remove flag exist call the remove_text
if remove.exist():
remove_text(remove,args)
| 29.25
| 79
| 0.650566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,602
| 0.517193
|
b1d9ea1eac536432c7382cf5532afaf25887bbe6
| 276
|
py
|
Python
|
test/test_main.py
|
KY64/python-starter-template
|
6ba734cec57668db6246e85bf0c324ff04359482
|
[
"MIT"
] | null | null | null |
test/test_main.py
|
KY64/python-starter-template
|
6ba734cec57668db6246e85bf0c324ff04359482
|
[
"MIT"
] | 2
|
2021-09-24T12:57:15.000Z
|
2021-09-24T19:55:01.000Z
|
test/test_main.py
|
KY64/python-starter-template
|
6ba734cec57668db6246e85bf0c324ff04359482
|
[
"MIT"
] | null | null | null |
import unittest
from src.main import substract, add
class TestMain(unittest.TestCase):
def test_add(self):
result = add(9, 7)
self.assertEqual(result, 16)
def test_substract(self):
result = substract(9, 7)
self.assertEqual(result, 2)
| 23
| 36
| 0.655797
| 222
| 0.804348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b1da25a95c5118697812a66adf7849f4cbae7363
| 441
|
py
|
Python
|
usage.py
|
nicogetaz/cfile
|
305a8e5fd133e4fd36d8958ede4627b008d4664a
|
[
"MIT"
] | 45
|
2017-11-17T04:44:29.000Z
|
2022-03-30T12:30:17.000Z
|
usage.py
|
nicogetaz/cfile
|
305a8e5fd133e4fd36d8958ede4627b008d4664a
|
[
"MIT"
] | 4
|
2019-03-25T15:43:26.000Z
|
2021-02-09T12:26:03.000Z
|
usage.py
|
nicogetaz/cfile
|
305a8e5fd133e4fd36d8958ede4627b008d4664a
|
[
"MIT"
] | 18
|
2017-10-12T13:24:00.000Z
|
2021-12-09T05:29:54.000Z
|
import cfile as C
hello = C.cfile('hello.c')
hello.code.append(C.sysinclude('stdio.h'))
hello.code.append(C.blank())
hello.code.append(C.function('main', 'int',).add_param(C.variable('argc', 'int')).add_param(C.variable('argv', 'char', pointer=2)))
body = C.block(innerIndent=3)
body.append(C.statement(C.fcall('printf').add_arg(r'"Hello World!\n"')))
body.append(C.statement('return 0'))
hello.code.append(body)
print(str(hello))
| 40.090909
| 132
| 0.693878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 89
| 0.201814
|
b1da8715774b114e58c022d18b5599a966be0c83
| 116
|
py
|
Python
|
question_model.py
|
sangethmathewjohn/PyQuiz_OOP
|
a9fdcab3884cee1b80951c1e279f5e58db981c3d
|
[
"Unlicense"
] | 2
|
2021-06-27T08:33:58.000Z
|
2021-07-20T06:59:53.000Z
|
question_model.py
|
sangethmathewjohn/PyQuiz_OOP
|
a9fdcab3884cee1b80951c1e279f5e58db981c3d
|
[
"Unlicense"
] | null | null | null |
question_model.py
|
sangethmathewjohn/PyQuiz_OOP
|
a9fdcab3884cee1b80951c1e279f5e58db981c3d
|
[
"Unlicense"
] | null | null | null |
class Question:
def __init__(self, qtext, qanswer):
self.text =qtext
self.answer =qanswer
| 19.333333
| 40
| 0.603448
| 114
| 0.982759
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b1daaac896ddc4849cfd241b1e6031646b780a40
| 156
|
py
|
Python
|
data-science/exercicios/livro-introducao-a-programacao-com-python/capitulo-5/exercicio5-2.py
|
joaovictor-loureiro/data-science
|
21ad240e1db94d614e54fcb3fbf6ef74a78af9d8
|
[
"MIT"
] | null | null | null |
data-science/exercicios/livro-introducao-a-programacao-com-python/capitulo-5/exercicio5-2.py
|
joaovictor-loureiro/data-science
|
21ad240e1db94d614e54fcb3fbf6ef74a78af9d8
|
[
"MIT"
] | null | null | null |
data-science/exercicios/livro-introducao-a-programacao-com-python/capitulo-5/exercicio5-2.py
|
joaovictor-loureiro/data-science
|
21ad240e1db94d614e54fcb3fbf6ef74a78af9d8
|
[
"MIT"
] | null | null | null |
# Exercício 5.2 - Modifique o programa para exibir os números de 50 a 100.
i = 50
print('\n')
while i <= 100:
print('%d' % i)
i += 1
print('\n')
| 14.181818
| 74
| 0.570513
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 88
| 0.556962
|
b1dab95c84aa79d34c26b83dc05fe89c1233edca
| 14,141
|
py
|
Python
|
scripts/BaxterArmClient.py
|
mkrizmancic/qlearn_baxter
|
0498315212cacb40334cbb97a858c6ba317f52a3
|
[
"MIT"
] | 4
|
2017-11-11T18:16:22.000Z
|
2018-11-08T13:31:09.000Z
|
scripts/BaxterArmClient.py
|
mkrizmancic/qlearn_baxter
|
0498315212cacb40334cbb97a858c6ba317f52a3
|
[
"MIT"
] | null | null | null |
scripts/BaxterArmClient.py
|
mkrizmancic/qlearn_baxter
|
0498315212cacb40334cbb97a858c6ba317f52a3
|
[
"MIT"
] | 2
|
2019-09-04T12:28:58.000Z
|
2021-09-27T13:02:48.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
CREDIT:
Main layout of this file was done by Lucija Kopic (graduation thesis)
at Faculty of Electrical Engineering and Computing, University of Zagreb.
"""
import math
from threading import Thread
import actionlib
import tf
from tf.transformations import quaternion_from_euler
from geometry_msgs.msg import Pose
from baxter_moveit_config.msg import baxterAction, baxterGoal
from baxter_interface import Limb
import Errors
from Util import *
# Comments beginning with "noinspection" are PyCharm auto-generated comments
# noinspection PyMethodMayBeStatic,PyUnusedLocal,PyNoneFunctionAssignment,PyRedundantParentheses,PyTypeChecker
class BaxterArmClient:
"""Actionlib client for Baxter's arm. See more here: http://wiki.ros.org/actionlib"""
def __init__(self):
"""Initialize and start actionlib client."""
self.left_client = actionlib.SimpleActionClient("baxter_action_server_left", baxterAction)
self.left_client.wait_for_server(rospy.Duration(10.0))
self.listener = tf.TransformListener()
self.left_arm = Limb('left')
# Get ROS parameters set up from launch file
self.left_rod_offset = rospy.get_param('~left_rod')
self.right_rod_offset = rospy.get_param('~right_rod')
self.center_rod_offset = rospy.get_param('~center_rod')
def transformations(self):
"""Transform rods' coordinate system to the Baxter's coordinate system."""
self.listener.waitForTransform("/base", "/rods", rospy.Time(0), rospy.Duration(8.0))
(trans, rot) = self.listener.lookupTransform('/base', '/rods', rospy.Time(0))
return trans
def position(self, target_position, trans, height):
"""
Calculate simple position of the robot's arm.
Args:
target_position (Pose): Wanted coordinates of robot's tool
trans: Calculated transformation
height (float): Height offset, depends on the number of disks on the rod
Returns:
target_position (Pose): Modified coordinates and orientation of robot's tool
"""
roll = -math.pi / 2
pitch = 0
yaw = -math.pi / 2
quaternion = tf.transformations.quaternion_from_euler(roll, pitch, yaw)
target_position.orientation.x = quaternion[0]
target_position.orientation.y = quaternion[1]
target_position.orientation.z = quaternion[2]
target_position.orientation.w = quaternion[3]
target_position.position.x = trans[0]
target_position.position.y = trans[1]
target_position.position.z = trans[2] + height
return target_position
def go_to_position(self, task, destination, height, offset_x, offset_y, offset_z):
"""
Calculate goal position, send that to the robot and wait for response.
Args:
task (string): Pick or place action
destination (int): Destination rod [0, 1, 2]
height (float): Height of the goal position (based on number of disk on the rod)
offset_x (float): Offset in robot's x axis
offset_y (float): Offset in robot's y axis
offset_z (float): Offset in robot's z axis
"""
goal = Pose()
trans = self.transformations()
if task == 'pick':
height = get_pick_height(height)
else:
height = get_place_height(height)
goal = self.position(goal, trans, height)
# Calculate offset from the markers
if destination == 0: offset_y += self.left_rod_offset
if destination == 1: offset_y += 0
if destination == 2: offset_y -= self.right_rod_offset
offset_x -= self.center_rod_offset
offset_x -= 0.1 # Moving from rod to rod should be done 10 cm in front of them
offset_x -= 0.03 # Back up a little to compensate for the width of the disks
# Update goal with calculated offsets
goal.position.x += offset_x
goal.position.y += offset_y
goal.position.z += offset_z
goal_final = baxterGoal(id=1, pose=goal)
# Send goal to Baxter arm server and wait for result
self.left_client.send_goal_and_wait(goal_final)
result = self.left_client.get_result()
if result.status:
return 1
else:
return Errors.RaiseGoToFailed(task, destination, height, offset_x, offset_y, offset_z)
def close_gripper(self):
"""Send the instruction to the robot to close the gripper."""
goal = Pose()
goal_final = baxterGoal(id=3, pose=goal)
status = self.left_client.send_goal_and_wait(goal_final)
result = self.left_client.wait_for_result()
return result
def open_gripper(self):
"""Send the instruction to the robot to open the gripper."""
goal = Pose()
goal_final = baxterGoal(id=2, pose=goal)
self.left_client.send_goal_and_wait(goal_final)
result = self.left_client.wait_for_result()
return result
def pick(self, pick_destination, pick_height):
"""
Execute the pick action.
Args:
pick_destination (int): Describes the rod from which to pick up the disk [0, 1, 2]
pick_height (float): Height from which to pick up the disk
Returns:
1 if successful, 0 otherwise
"""
self.left_arm.set_joint_position_speed(0.4) # Set higher speed for non-delicate movements
starting_height = get_pick_height(pick_height)
# Go in front of the selected rod with the necessary height
pick1 = self.go_to_position('pick', pick_destination, pick_height, 0, 0, 0)
if pick1:
user_print("PICK 1 ok", 'info')
# Move towards the rod
pick2 = self.go_to_position('pick', pick_destination, pick_height, 0.1, 0, 0)
if pick2:
user_print("PICK 2 ok", 'info')
# Close the gripper
pick3 = self.close_gripper()
if pick3:
user_print("PICK 3 ok", 'info')
# Lift vertically above the rod - 0.30 is height just above the rods
if pick_height < 3:
self.left_arm.set_joint_position_speed(0.1) # Set lower speed for delicate movements
pick4 = self.go_to_position('pick', pick_destination, pick_height, 0.1, 0, 0.30 - starting_height)
if pick4:
user_print("PICK 4 ok", 'info')
return 1
return Errors.RaisePickFailed()
def place(self, place_destination, place_height):
"""
Execute the place action.
Args:
place_destination (int): Describes the rod on which to place the disk [0, 1, 2]
place_height (float): Height to which disk should be placed
Returns:
1 if successful, 0 otherwise
"""
starting_height = get_place_height(place_height)
# Go directly above the selected rod - 0.30 is height just above the rods
self.left_arm.set_joint_position_speed(0.4) # Set higher speed for non-delicate movements
place1 = self.go_to_position('place', place_destination, place_height, 0.1, 0, 0.30 - starting_height)
if place1:
if place_height < 3:
self.left_arm.set_joint_position_speed(0.08) # Set lower speed for delicate movements
user_print("PLACE 1 OK", 'info')
# Lower vertically to the necessary height
place2 = self.go_to_position('place', place_destination, place_height, 0.1, 0, 0)
self.left_arm.set_joint_position_speed(0.4) # Set higher speed for non-delicate movements
if place2:
user_print("PLACE 2 OK", 'info')
# Open the gripper
place3 = self.open_gripper()
if place3:
user_print("PLACE 3 OK", 'info')
# Lower the arm slightly more to avoid hitting the disk
place4 = self.go_to_position('place', place_destination, place_height, 0.1, 0, -0.015)
if place4:
user_print("PLACE 4 OK", 'info')
# Move away from the rod
place5 = self.go_to_position('place', place_destination, place_height, 0, 0, -0.015)
if place5:
user_print("PLACE 5 OK", 'info')
return 1
return Errors.RaisePlaceFailed()
def pick_and_place(self, pick_destination, pick_height, place_destination, place_height):
"""
Execute 'pick and place' action.
Args:
pick_destination (int): Describes the rod from which to pick up the disk [0, 1, 2]
pick_height (int): Height from which to pick up the disk (in number of disks)
place_destination (int): Describes the rod on which to place the disk [0, 1, 2]
place_height (int): Height to which disk should be placed (in number of disks)
Returns:
1 if successful, 0 otherwise
"""
user_print("------PICK---------------", 'info')
print pick_destination, pick_height
pick = self.pick(pick_destination, pick_height)
if pick == 0:
return Errors.RaisePickAndPlaceFailed()
else:
user_print("------PLACE--------------", 'info')
print place_destination, place_height
place = self.place(place_destination, place_height, )
if place == 0:
return Errors.RaisePickAndPlaceFailed()
else:
return 1
def test_absolute(self):
"""Test robot's ability to position its gripper in absolute coordinates (base frame)."""
goal = Pose()
roll = -math.pi / 2
pitch = 0
yaw = -math.pi / 2
quaternion = tf.transformations.quaternion_from_euler(roll, pitch, yaw)
goal.orientation.x = quaternion[0]
goal.orientation.y = quaternion[1]
goal.orientation.z = quaternion[2]
goal.orientation.w = quaternion[3]
while True:
end = user_input("Zelite li nastaviti? d/n")
if end != 'd':
break
goal.position.x = float(user_input("X?"))
goal.position.y = float(user_input("Y?"))
goal.position.z = float(user_input("Z?"))
goal_final = baxterGoal(id=1, pose=goal)
self.left_client.send_goal_and_wait(goal_final)
result = self.left_client.get_result()
def test_relative(self):
"""Test robot's ability to position its gripper relative to a given marker."""
goal = Pose()
roll = -math.pi / 2
pitch = 0
yaw = -math.pi / 2
quaternion = tf.transformations.quaternion_from_euler(roll, pitch, yaw)
goal.orientation.x = quaternion[0]
goal.orientation.y = quaternion[1]
goal.orientation.z = quaternion[2]
goal.orientation.w = quaternion[3]
while True:
end = user_input("Zelite li nastaviti? d/n")
if end != 'd':
break
trans = self.transformations()
goal.position.x = trans[0]
goal.position.y = trans[1]
goal.position.z = trans[2]
offset_x = float(user_input("X?"))
offset_y = float(user_input("Y?"))
offset_z = float(user_input("Z?"))
# Uncomment for testing movement speed as well
# brzina = float(user_input("Brzina?"))
# self.left_arm.set_joint_position_speed(brzina)
goal.position.x += offset_x
goal.position.y += offset_y
goal.position.z += offset_z
goal_final = baxterGoal(id=1, pose=goal)
self.left_client.send_goal_and_wait(goal_final)
result = self.left_client.get_result()
def calibration_single(self, rod):
"""Calibrate disk positions."""
# Go to 1st disk
self.go_to_position('pick', rod, 3, 0, 0, 0)
self.go_to_position('pick', rod, 3, 0.1, 0, 0)
rospy.sleep(2)
self.go_to_position('pick', rod, 3, 0, 0, -0.015)
self.go_to_position('pick', rod, 3, 0, 0, 0)
# Go to 2nd disk
self.go_to_position('pick', rod, 2, 0, 0, 0)
self.go_to_position('pick', rod, 2, 0.1, 0, 0)
rospy.sleep(2)
self.go_to_position('pick', rod, 2, 0, 0, -0.015)
self.go_to_position('pick', rod, 2, 0, 0, 0)
# Go to 3rd disk
self.go_to_position('pick', rod, 1, 0, 0, 0)
self.go_to_position('pick', rod, 1, 0.1, 0, 0)
rospy.sleep(2)
self.go_to_position('pick', rod, 1, 0, 0, -0.015)
self.go_to_position('pick', rod, 1, 0, 0, 0)
def calibration_all(self):
"""Calibrate each rod."""
# Go to 1st, 2nd and 3rd rod and calibrate
self.calibration_single(0)
self.calibration_single(1)
self.calibration_single(2)
def start(self, pick_destination, pick_height, place_destination, place_height):
thread = Thread(target=self.pick_and_place,
args=(pick_destination, pick_height, place_destination, place_height))
thread.start()
thread.join()
def calibration(self):
thread = Thread(target=self.calibration_all)
thread.start()
thread.join()
if __name__ == '__main__':
rospy.init_node('Baxter_Client', disable_signals=True)
try:
client = BaxterArmClient()
client.start()
except rospy.ROSInterruptException:
rospy.loginfo('Terminating baxter_client.')
| 42.851515
| 119
| 0.592391
| 13,173
| 0.931547
| 0
| 0
| 0
| 0
| 0
| 0
| 4,639
| 0.328053
|
b1dbac0f835a64d0cbdbae3be2827e7023234d2d
| 107
|
py
|
Python
|
Factorial.py
|
conbopbi/TEchMasterHK
|
81452694de6b5d46a51bdf1eceb7a4346b93cc85
|
[
"MIT"
] | null | null | null |
Factorial.py
|
conbopbi/TEchMasterHK
|
81452694de6b5d46a51bdf1eceb7a4346b93cc85
|
[
"MIT"
] | null | null | null |
Factorial.py
|
conbopbi/TEchMasterHK
|
81452694de6b5d46a51bdf1eceb7a4346b93cc85
|
[
"MIT"
] | null | null | null |
import math
n=int(input('Nhap mot so:'))
output=math.factorial(n)
print('Giai thua cua ',n,' la: ',output)
| 21.4
| 40
| 0.682243
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 37
| 0.345794
|
b1dbc2b8aaeac4063785ede18a17f1f56b8d7356
| 86
|
py
|
Python
|
flytekit/__init__.py
|
flytehub/flytekit
|
f8f53567594069b29fcd3f99abd1da71a5ef0e22
|
[
"Apache-2.0"
] | null | null | null |
flytekit/__init__.py
|
flytehub/flytekit
|
f8f53567594069b29fcd3f99abd1da71a5ef0e22
|
[
"Apache-2.0"
] | null | null | null |
flytekit/__init__.py
|
flytehub/flytekit
|
f8f53567594069b29fcd3f99abd1da71a5ef0e22
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
import flytekit.plugins
__version__ = '0.3.1'
| 17.2
| 38
| 0.813953
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 0.081395
|
b1dc9ba592a6ef41c372eaa2cd477c8b9c68c9a0
| 7,289
|
py
|
Python
|
src/Navigate.py
|
Qu-Xiangjun/CQU_NK_Research_Project
|
8634ce3496801610bc94aa3a424bcd9cff8d042e
|
[
"MIT"
] | 1
|
2021-04-14T12:52:47.000Z
|
2021-04-14T12:52:47.000Z
|
src/Navigate.py
|
Qu-Xiangjun/CQU_NK_Research_Project
|
8634ce3496801610bc94aa3a424bcd9cff8d042e
|
[
"MIT"
] | null | null | null |
src/Navigate.py
|
Qu-Xiangjun/CQU_NK_Research_Project
|
8634ce3496801610bc94aa3a424bcd9cff8d042e
|
[
"MIT"
] | null | null | null |
"""
@Author: Qu Xiangjun
@Time: 2021.01.26
@Describe: 此文件负责根据雷达数据进行导航的线程类定义
"""
import socket
import time
from threading import Thread
import threading
import numpy as np
# python3.8.0 64位(python 32位要用32位的DLL)
from ctypes import *
from Navigation_help import *
from Can_frame_help import *
VCI_USBCAN2 = 4 # 设备类型 USBCAN-2A或USBCAN-2C或CANalyst-II
STATUS_OK = 1
# 定义初始化CAN的数据类型
class VCI_INIT_CONFIG(Structure):
_fields_ = [("AccCode", c_uint), # 接收滤波验收码
("AccMask", c_uint), # 接收滤波屏蔽码
("Reserved", c_uint),
("Filter", c_ubyte), # '滤波方式 0,1接收所有帧。2标准帧滤波,3是扩展帧滤波。
# 500kbps Timing0=0x00 Timing1=0x1C
("Timing0", c_ubyte), # 波特率参数1,具体配置,请查看二次开发库函数说明书。
("Timing1", c_ubyte), # 波特率参数1
("Mode", c_ubyte) # '模式,0表示正常模式,1表示只听模式,2自测模式
]
# 定义CAN信息帧的数据类型。
class VCI_CAN_OBJ(Structure):
_fields_ = [("ID", c_uint),
("TimeStamp", c_uint), # 时间标识
("TimeFlag", c_ubyte), # 是否使用时间标识
("SendType", c_ubyte), # 发送标志。保留,未用
("RemoteFlag", c_ubyte), # 是否是远程帧
("ExternFlag", c_ubyte), # 是否是扩展帧
("DataLen", c_ubyte), # 数据长度
("Data", c_ubyte*8), # 数据
("Reserved", c_ubyte*3) # 保留位
]
CanDLLName = './ControlCAN.dll' # 把DLL放到对应的目录下
canDLL = windll.LoadLibrary('./ControlCAN.dll')
# Linux系统下使用下面语句,编译命令:python3 python3.8.0.py
#canDLL = cdll.LoadLibrary('./libcontrolcan.so')
class Navigate_Thread(threading.Thread):
"""
导航线程
"""
def __init__(self,thread_draw_lidar, socket_server_thread):
"""
:param thread_draw_lidar: 绘画雷达图线程类实例
:param socket_server_thread: 远程Socket传输数据类实例
"""
threading.Thread.__init__(self) # 初始化父类
# 绘制雷达
self.thread_draw_lidar = thread_draw_lidar
# 改变雷达数据远程传输线程内容
self.socket_server_thread = socket_server_thread
def run(self):
"""
Can接口连接scout——mini 底盘
"""
# 打开设备
ret = canDLL.VCI_OpenDevice(VCI_USBCAN2, 0, 0)
if ret == STATUS_OK:
print('调用 VCI_OpenDevice成功\r\n')
if ret != STATUS_OK:
print('调用 VCI_OpenDevice出错\r\n')
# 初始0通道
vci_initconfig = VCI_INIT_CONFIG(0x80000008, 0xFFFFFFFF, 0,
0, 0x00, 0x1C, 0) # 波特率500k,正常模式
ret = canDLL.VCI_InitCAN(VCI_USBCAN2, 0, 0, byref(vci_initconfig))
if ret == STATUS_OK:
print('调用 VCI_InitCAN0成功\r\n')
if ret != STATUS_OK:
print('调用 VCI_InitCAN0出错\r\n')
# 开启通道
ret = canDLL.VCI_StartCAN(VCI_USBCAN2, 0, 0)
if ret == STATUS_OK:
print('调用 VCI_StartCAN0成功\r\n')
if ret != STATUS_OK:
print('调用 VCI_StartCAN0出错\r\n')
# 设置底盘为指令控制模式
ret = canDLL.VCI_Transmit(
VCI_USBCAN2, 0, 0, byref(get_start_controller_inst()), 1)
if ret == STATUS_OK:
print('CAN1通道发送成功\r\n')
if ret != STATUS_OK:
print('CAN1通道发送失败\r\n')
'''
socket配置
'''
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(("localhost", 8888)) # 服务器端,将Socket与网络地址和端口绑定起来,
server.listen(0) # backlog 指定最大的连接数
connection, address = server.accept()
print("socket connect:", connection)
print("socket ip address:", address)
global lidar_data_list
lidar_data_list = [0 for i in range(1536)] # 初始化
register_direct = 0 # 记忆上一次转动方向,1位左,0位前进,-1位右
'''
执行导航
'''
while True:
# get lidar data
try:
recv_str = connection.recv(9216) # 1536个数据,每个为6bytes
except(ConnectionResetError):
print("[ConnectionResetError] Lost lidar socket connnetion.")
break
# recv_str=str(recv_str) 这样不行带有了b''
recv_str = recv_str.decode("GBK") # type(recv_str) = str
lidar_data_bytes = recv_str.split(",")
lidar_data_bytes = lidar_data_bytes[0:-1]
dirty_count = 0
for i in range(len(lidar_data_bytes)): # 1536个数据
lidar_data_bytes[i] = int(lidar_data_bytes[i]) # 单位从毫米
if(lidar_data_bytes[i] == 0):
if(i == 0): # 起始处不管
lidar_data_bytes[i] = 0
else:
lidar_data_bytes[i] = lidar_data_bytes[i-1]
dirty_count += 1
for i in range(125):
lidar_data_bytes[i] = 0
for i in range(1411,1536):
lidar_data_bytes[i] = 0
lidar_data_list = lidar_data_bytes
# if(dirty_count > 200): # 脏点太多,设置界限报错
# print("[WARNING] Lidar is very dirty.")
# exit(1)
print("lidar_data_list",lidar_data_list)
# 数据不规整报错
if(len(lidar_data_list) != 1536):
print("[ERROR] Lidar frame's length is not 1536*6 bytes.")
continue
# 写入文件查看数据
# f = open('test.txt', 'w')
# f.write(str(lidar_data_list))
# f.close()
self.thread_draw_lidar.lidar_data_list = lidar_data_list # 更新绘图线程的雷达数据
self.socket_server_thread.lidar_data_list = lidar_data_list # 更新发送雷达数据线程的雷达数据
# get direction
best_direction = navigate(lidar_data_list) # 导航得到的方向
print("best_direction", best_direction)
# time.sleep(1)
# 发送控制命令给小车
if(best_direction == None):
# 没有方向时就自转找方向
best_direction = 5
register_direct = 1
ret = canDLL.VCI_Transmit(
VCI_USBCAN2, 0, 0, get_move_inst(best_direction, 0), 1)
if ret == STATUS_OK:
print('CAN1通道发送成功\r\n')
if ret != STATUS_OK:
print('CAN1通道发送失败\r\n')
continue
# 记忆转动方向
if(register_direct == -1 ): # 曾经是右转
if(best_direction == 0):
register_direct = 0
else:
best_direction = -5
elif(register_direct == 1 ): # 曾经左转
if(best_direction == 0):
register_direct = 0
else:
best_direction = 5
else:
if(best_direction < 0):
register_direct = -1
best_direction = -5
elif(best_direction > 0):
register_direct = 1
best_direction = 5
else:
register_direct = 0
for i in range(1): # 只用发送一次即可,这里可设置循环增强控制效果
ret = canDLL.VCI_Transmit(VCI_USBCAN2, 0, 0, get_move_inst(
best_direction, best_speed=default_best_speed), 1)
if ret == STATUS_OK:
print('CAN1通道发送成功\r\n')
if ret != STATUS_OK:
print('CAN1通道发送失败\r\n')
connection.close()
| 33.589862
| 89
| 0.522568
| 7,640
| 0.910066
| 0
| 0
| 0
| 0
| 0
| 0
| 3,045
| 0.362716
|
b1dd89557115038bb1f6354ded5195f9ead07ccf
| 233
|
py
|
Python
|
list02/exer_02.py
|
pedrolucas27/exercising-python
|
4b30bbce6b860fb617baf4600d8da83b68023e82
|
[
"MIT"
] | null | null | null |
list02/exer_02.py
|
pedrolucas27/exercising-python
|
4b30bbce6b860fb617baf4600d8da83b68023e82
|
[
"MIT"
] | null | null | null |
list02/exer_02.py
|
pedrolucas27/exercising-python
|
4b30bbce6b860fb617baf4600d8da83b68023e82
|
[
"MIT"
] | null | null | null |
#Faça um Programa que peça um valor e mostre na tela se o valor é positivo ou negativo.
valor = int(input("Dígite um número:"))
if valor < 0:
print("O número",valor,"é negativo!")
else:
print("O número",valor,"é positivo!")
| 33.285714
| 88
| 0.682403
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 162
| 0.669421
|
b1de66542e990852570d0825e181d49c32975991
| 48
|
py
|
Python
|
python/testData/intentions/PyConvertToFStringIntentionTest/percentOperatorWidthAndPrecision_after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/intentions/PyConvertToFStringIntentionTest/percentOperatorWidthAndPrecision_after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/intentions/PyConvertToFStringIntentionTest/percentOperatorWidthAndPrecision_after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
f'{1:.5d} {2:3.5d} {3:3d} {"spam":>20} {4:<#d}'
| 24
| 47
| 0.395833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 47
| 0.979167
|
b1dea7e1058a7eee3b72428c420020b2fdd458a2
| 6,346
|
py
|
Python
|
ansys/dpf/core/operators/math/make_one_on_comp.py
|
jfthuong/pydpf-core
|
bf2895ebc546e0004f759289bfc9a23196559ac3
|
[
"MIT"
] | 18
|
2021-10-16T10:38:29.000Z
|
2022-03-29T11:26:42.000Z
|
ansys/dpf/core/operators/math/make_one_on_comp.py
|
jfthuong/pydpf-core
|
bf2895ebc546e0004f759289bfc9a23196559ac3
|
[
"MIT"
] | 79
|
2021-10-11T23:18:54.000Z
|
2022-03-29T14:53:14.000Z
|
ansys/dpf/core/operators/math/make_one_on_comp.py
|
jfthuong/pydpf-core
|
bf2895ebc546e0004f759289bfc9a23196559ac3
|
[
"MIT"
] | 5
|
2021-11-29T18:35:37.000Z
|
2022-03-16T16:49:21.000Z
|
"""
make_one_on_comp
===============
Autogenerated DPF operator classes.
"""
from warnings import warn
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import Output, _Outputs
from ansys.dpf.core.operators.specification import PinSpecification, Specification
class make_one_on_comp(Operator):
"""take the input field's scoping and create a field full of zeros,
except for the indexes from pin 1 that will hold 1.0.
Parameters
----------
fieldA : Field
scalar_int : int
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.math.make_one_on_comp()
>>> # Make input connections
>>> my_fieldA = dpf.Field()
>>> op.inputs.fieldA.connect(my_fieldA)
>>> my_scalar_int = int()
>>> op.inputs.scalar_int.connect(my_scalar_int)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.math.make_one_on_comp(
... fieldA=my_fieldA,
... scalar_int=my_scalar_int,
... )
>>> # Get output data
>>> result_field = op.outputs.field()
"""
def __init__(self, fieldA=None, scalar_int=None, config=None, server=None):
super().__init__(name="make_one_on_comp", config=config, server=server)
self._inputs = InputsMakeOneOnComp(self)
self._outputs = OutputsMakeOneOnComp(self)
if fieldA is not None:
self.inputs.fieldA.connect(fieldA)
if scalar_int is not None:
self.inputs.scalar_int.connect(scalar_int)
@staticmethod
def _spec():
description = """take the input field's scoping and create a field full of zeros,
except for the indexes from pin 1 that will hold 1.0."""
spec = Specification(
description=description,
map_input_pin_spec={
0: PinSpecification(
name="fieldA",
type_names=["field"],
optional=False,
document="""""",
),
1: PinSpecification(
name="scalar_int",
type_names=["int32"],
optional=False,
document="""""",
),
},
map_output_pin_spec={
0: PinSpecification(
name="field",
type_names=["field"],
optional=False,
document="""""",
),
},
)
return spec
@staticmethod
def default_config(server=None):
"""Returns the default config of the operator.
This config can then be changed to the user needs and be used to
instantiate the operator. The Configuration allows to customize
how the operation will be processed by the operator.
Parameters
----------
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the the global server.
"""
return Operator.default_config(name="make_one_on_comp", server=server)
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsMakeOneOnComp
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluationg it
Returns
--------
outputs : OutputsMakeOneOnComp
"""
return super().outputs
class InputsMakeOneOnComp(_Inputs):
"""Intermediate class used to connect user inputs to
make_one_on_comp operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.make_one_on_comp()
>>> my_fieldA = dpf.Field()
>>> op.inputs.fieldA.connect(my_fieldA)
>>> my_scalar_int = int()
>>> op.inputs.scalar_int.connect(my_scalar_int)
"""
def __init__(self, op: Operator):
super().__init__(make_one_on_comp._spec().inputs, op)
self._fieldA = Input(make_one_on_comp._spec().input_pin(0), 0, op, -1)
self._inputs.append(self._fieldA)
self._scalar_int = Input(make_one_on_comp._spec().input_pin(1), 1, op, -1)
self._inputs.append(self._scalar_int)
@property
def fieldA(self):
"""Allows to connect fieldA input to the operator.
Parameters
----------
my_fieldA : Field
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.make_one_on_comp()
>>> op.inputs.fieldA.connect(my_fieldA)
>>> # or
>>> op.inputs.fieldA(my_fieldA)
"""
return self._fieldA
@property
def scalar_int(self):
"""Allows to connect scalar_int input to the operator.
Parameters
----------
my_scalar_int : int
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.make_one_on_comp()
>>> op.inputs.scalar_int.connect(my_scalar_int)
>>> # or
>>> op.inputs.scalar_int(my_scalar_int)
"""
return self._scalar_int
class OutputsMakeOneOnComp(_Outputs):
"""Intermediate class used to get outputs from
make_one_on_comp operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.make_one_on_comp()
>>> # Connect inputs : op.inputs. ...
>>> result_field = op.outputs.field()
"""
def __init__(self, op: Operator):
super().__init__(make_one_on_comp._spec().outputs, op)
self._field = Output(make_one_on_comp._spec().output_pin(0), 0, op)
self._outputs.append(self._field)
@property
def field(self):
"""Allows to get field output of the operator
Returns
----------
my_field : Field
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.make_one_on_comp()
>>> # Connect inputs : op.inputs. ...
>>> result_field = op.outputs.field()
""" # noqa: E501
return self._field
| 29.654206
| 89
| 0.578159
| 6,001
| 0.945635
| 0
| 0
| 3,372
| 0.531358
| 0
| 0
| 3,657
| 0.576269
|
b1e15b56feda70e36690890e29f9ba4efcc55495
| 8,716
|
py
|
Python
|
cheshire3/web/www_utils.py
|
cheshire3/cheshire3
|
306348831ec110229c78a7c5f0f2026a0f394d2c
|
[
"Python-2.0",
"Unlicense"
] | 3
|
2015-08-02T09:03:28.000Z
|
2017-12-06T09:26:14.000Z
|
cheshire3/web/www_utils.py
|
cheshire3/cheshire3
|
306348831ec110229c78a7c5f0f2026a0f394d2c
|
[
"Python-2.0",
"Unlicense"
] | 5
|
2015-08-17T01:16:35.000Z
|
2015-09-16T21:51:27.000Z
|
cheshire3/web/www_utils.py
|
cheshire3/cheshire3
|
306348831ec110229c78a7c5f0f2026a0f394d2c
|
[
"Python-2.0",
"Unlicense"
] | 6
|
2015-05-17T15:32:20.000Z
|
2020-04-22T08:43:16.000Z
|
#
# Program: www_utils.py
# Version: 0.10
# Description:
# Generic search functions for Cheshire 3
#
# Language: Python
# Author: John Harrison <john.harrison@liv.ac.uk>
# Date: 19 December 2007
#
# Copyright: © University of Liverpool 2005-2007
#
# Version History:
# 0.01 - 13/04/2005 - JH - Ported from Cheshire II compatible scripts
# 0.02 - 14/06/2005 - JH - Improved CGI encoding/decoding
# - Mixed phrase and plain term searching handled
# (e.g. wyndham "science fiction" triffids)
# 0.03 - 17/10/2005 - JH - File logger class added
# keeps all logs for a single request in mem until complete, then flushes to file
# - html_encode() added to allow display of raw SGML in the browser
# 0.04 - 26/01/2006 - JH - Modifications to cgiReplacements
# 0.05 - 31/01/2006 - JH - More tweaks to cgiReplacement characters
# - Speech marks handled sensibly in exact or /string searches
# 0.06 - 27/02/2006 - JH - Booleans extracted first in generate_cqlQuery() - debugs 'NOT' searches
# 0.07 - 04/01/2007 - JH - Check for noComponents moved out of generic generate_cqlQuery function
# - Allow limit to collection
# 0.08 - 25/01/2007 - JH - Mods to allow date searching - decode < > etc from form
# 0.09 - 07/09/2007 - JH - renamed: wwwSearch.py --> www_utils.py
# 0.10 - 19/12/2007 - JH - handling of form character set implemented
# - can handle multiple indexes to be specified in fieldidx
# multiple indexes combine with or/relevant/proxinfo
#
import re
import time
import urlparse
from urllib import unquote
class FieldStorageDict(dict):
"""A sub-class of dict to behave like FieldStorage for testing.
Note, does not support multiple values for the same key.
"""
def getfirst(self, key, default=None):
return self.get(key, default)
def getlist(self, key):
val = self.get(key)
if val:
return [val]
return []
def generate_cqlQuery(form):
global phraseRe
formcodec = form.getfirst('_charset_', 'utf-8')
qClauses = []
bools = []
i = 1
while 'fieldcont{0}'.format(i) in form:
boolean = form.getfirst('fieldbool{0}'.format(i - 1),
'and/relevant/proxinfo'
)
bools.append(boolean)
i += 1
i = 1
while 'fieldcont{0}'.format(i) in form:
cont = form.getfirst('fieldcont{0}'.format(i))
if isinstance(cont, unicode):
# Encode any unicode back to raw byte string for compatibility
# with legacy code
cont = cont.encode(formcodec)
idxs = unquote(
form.getfirst('fieldidx{0}'.format(i),
'cql.anywhere'
)
)
rel = unquote(
form.getfirst('fieldrel{0}'.format(i),
'all/relevant/proxinfo'
)
)
idxClauses = []
# In case they're trying to do phrase searching
if (
rel.startswith('exact') or
rel.startswith('=') or
'/string' in rel
):
# Don't allow phrase searching for exact or /string searches
cont = cont.replace('"', '\\"')
for idx in idxs.split('||'):
subClauses = []
if (rel.startswith('all')):
subBool = ' and/relevant/proxinfo '
else:
subBool = ' or/relevant/proxinfo '
# In case they're trying to do phrase searching
if (
'exact' in rel or
'=' in rel or
'/string' in rel
):
# Don't allow phrase searching for exact or /string searches
# we already did quote escaping
subcont = cont
else:
phrases = phraseRe.findall(cont)
for ph in phrases:
subClauses.append(
'({0} =/relevant/proxinfo {1})'.format(idx, ph)
)
subcont = phraseRe.sub('', cont)
if (idx and rel and subcont):
subClauses.append(
'{0} {1} "{2}"'.format(idx, rel, subcont.strip())
)
if (len(subClauses)):
idxClauses.append('({0})'.format(subBool.join(subClauses)))
qClauses.append(
'({0})'.format(' or/rel.combine=sum/proxinfo '.join(idxClauses))
)
# If there's another clause and a corresponding boolean
try:
qClauses.append(bools[i])
except:
break
i += 1
qString = ' '.join(qClauses)
return qString.decode(formcodec).encode('utf8')
def parse_url(url):
u"""Parse a URL to split it into its component parts."""
bits = urlparse.urlsplit(url)
print bits
transport = bits[0]
uphp = bits[1].split('@')
user = ''
passwd = ''
if len(uphp) == 2:
(user, passwd) = uphp.pop(0).split(':')
hp = uphp[0].split(':')
host = hp[0]
if len(hp) == 2:
port = int(hp[1])
else:
# Require subclass to default
port = 0
dirname, filename = bits[2].rsplit('/', 1)
# params = map(lambda x: x.split('='), bits[3].split('&'))
params = [x.split('=') for x in bits[3].split('&')]
try:
params = dict(params)
except ValueError:
params = {}
anchor = bits[4]
return (transport, user, passwd, host, port, dirname, filename, params, anchor)
phraseRe = re.compile('".*?"')
cgiReplacements = {
#'%': '%25',
'+': '%2B',
' ': '%20',
'<': '%3C',
'>': '%3E',
'#': '%23',
'{': '%7B',
'}': '%7D',
'|': '%7C',
'"': '%22',
"'": '%27',
'^': '%5E',
'~': '%7E',
'[': '%5B',
']': '%5D',
'`': '%60',
';': '%3B',
'/': '%2F',
'?': '%3F',
':': '%3A',
'@': '%40',
'=': '%3D',
'&': '%26',
'$': '%24'
#'=': "%3D",
#'\n\t': "%0A",
#',': "%2C",
#'\'': "%27",
#'/': "%2F",
#'"': "%22",
#'@': "%40",
#'#': "%23",
#'{': "%7B",
#'}': "%7D",
#'[': "%5B",
#']': "%5D",
#'\\': "%5C",
#';': "%3B"
}
def cgi_encode(txt):
global cgiReplacements
txt = txt.replace('%', '%25')
#txt = txt.strip()
for key, val in cgiReplacements.iteritems():
txt = txt.replace(key, val)
return txt
#- end cgi_encode
def cgi_decode(txt):
global cgiReplacements
#txt = txt.strip()
for key, val in cgiReplacements.iteritems():
txt = txt.replace(val, key)
txt = txt.replace('%25', '%')
return txt
#- end cgi_decode
rawSgmlReplacements = {'<': '<'
,'>': '>'
,"'": '''
,'"': '"'
}
def html_encode(txt):
global rawSgmlReplacements
txt = txt.replace('&', '&')
for key, val in rawSgmlReplacements.iteritems():
txt = txt.replace(key, val)
return txt
#- end html_encode
def multiReplace(txt, params):
for k,v in params.iteritems():
try:
txt = txt.replace(k,unicode(v).encode('ascii', 'xmlcharrefreplace'))
except UnicodeDecodeError:
txt = txt.replace(k,unicode(v, 'utf8').encode('ascii', 'xmlcharrefreplace'))
return txt
#- end multiReplace
def read_file(fileName):
fileH = open(fileName, 'r')
cont = fileH.read()
fileH.close()
return cont
#- end read_file()
def write_file(fileName, txt):
fileH = open(fileName, 'w')
cont = fileH.write(txt)
fileH.close()
#- end write_file()
class FileLogger:
u"""DEPRECATED: A quick and dirty transaction logger that isn't actually a Cheshire3 object and doesn't match the API.
Please use cheshire3.web.logger.TransactionLogger instead.
"""
st = None
llt = None
fp = None
rh = None
lsl = None
def __init__(self, path, rh):
self.st = time.time()
self.llt = self.st
self.fp = path
self.rh = rh
self.lsl = ['\n[%s]: Request received from %s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.st)), self.rh)]
def log(self,txt):
now = time.time()
diff = now - self.llt
self.lsl.append('...[+%f s]: %s' % (diff, txt))
self.llt = now
def flush(self):
now = time.time()
total = now - self.st
self.lsl.append('...Total time: %f secs' % (total))
fileh = file(self.fp, 'a')
fileh.write('\n'.join(self.lsl))
fileh.close()
#- end class FileLogger ---------------------------------------------------
| 27.495268
| 128
| 0.519504
| 1,321
| 0.15156
| 0
| 0
| 0
| 0
| 0
| 0
| 3,695
| 0.423933
|
b1e3076f57089de6bfe7eeff45ef0b802cbca8fa
| 5,057
|
py
|
Python
|
superviselySDK/supervisely_lib/geometry/bitmap_base.py
|
nicehuster/mmdetection-supervisely-person-datasets
|
ff1b57e16a71378510571dbb9cebfdb712656927
|
[
"Apache-2.0"
] | 40
|
2019-05-05T08:08:18.000Z
|
2021-10-17T00:07:58.000Z
|
superviselySDK/supervisely_lib/geometry/bitmap_base.py
|
nicehuster/mmdetection-supervisely-person-datasets
|
ff1b57e16a71378510571dbb9cebfdb712656927
|
[
"Apache-2.0"
] | 8
|
2019-06-13T06:00:08.000Z
|
2021-07-24T05:25:33.000Z
|
superviselySDK/supervisely_lib/geometry/bitmap_base.py
|
nicehuster/mmdetection-supervisely-person-datasets
|
ff1b57e16a71378510571dbb9cebfdb712656927
|
[
"Apache-2.0"
] | 6
|
2019-07-30T06:36:27.000Z
|
2021-06-03T11:57:36.000Z
|
# coding: utf-8
import numpy as np
from supervisely_lib.geometry.constants import DATA, ORIGIN
from supervisely_lib.geometry.geometry import Geometry
from supervisely_lib.geometry.point_location import PointLocation
from supervisely_lib.geometry.rectangle import Rectangle
from supervisely_lib.imaging.image import resize_inter_nearest, restore_proportional_size
# TODO: rename to resize_bitmap_and_origin
def resize_origin_and_bitmap(origin: PointLocation, bitmap: np.ndarray, in_size, out_size):
new_size = restore_proportional_size(in_size=in_size, out_size=out_size)
row_scale = new_size[0] / in_size[0]
col_scale = new_size[1] / in_size[1]
# TODO: Double check (+restore_proportional_size) or not? bitmap.shape and in_size are equal?
# Make sure the resulting size has at least one pixel in every direction (i.e. limit the shrinkage to avoid having
# empty bitmaps as a result).
scaled_rows = max(round(bitmap.shape[0] * row_scale), 1)
scaled_cols = max(round(bitmap.shape[1] * col_scale), 1)
scaled_origin = PointLocation(row=round(origin.row * row_scale), col=round(origin.col * col_scale))
scaled_bitmap = resize_inter_nearest(bitmap, (scaled_rows, scaled_cols))
return scaled_origin, scaled_bitmap
class BitmapBase(Geometry):
def __init__(self, data: np.ndarray, origin: PointLocation=None, expected_data_dims=None):
"""
:param origin: PointLocation
:param data: np.ndarray
"""
if origin is None:
origin = PointLocation(row=0, col=0)
if not isinstance(origin, PointLocation):
raise TypeError('BitmapBase "origin" argument must be "PointLocation" object!')
if not isinstance(data, np.ndarray):
raise TypeError('BitmapBase "data" argument must be numpy array object!')
data_dims = len(data.shape)
if expected_data_dims is not None and data_dims != expected_data_dims:
raise ValueError('BitmapBase "data" argument must be a {}-dimensional numpy array. '
'Instead got {} dimensions'.format(expected_data_dims, data_dims))
self._origin = origin.clone()
self._data = data.copy()
@classmethod
def _impl_json_class_name(cls):
"""Descendants must implement this to return key string to look up serialized representation in a JSON dict."""
raise NotImplementedError()
@staticmethod
def base64_2_data(s: str) -> np.ndarray:
raise NotImplementedError()
@staticmethod
def data_2_base64(data: np.ndarray) -> str:
raise NotImplementedError()
def to_json(self):
return {
self._impl_json_class_name(): {
ORIGIN: [self.origin.col, self.origin.row],
DATA: self.data_2_base64(self.data)
}
}
@classmethod
def from_json(cls, json_data):
json_root_key = cls._impl_json_class_name()
if json_root_key not in json_data:
raise ValueError(
'Data must contain {} field to create MultichannelBitmap object.'.format(json_root_key))
if ORIGIN not in json_data[json_root_key] or DATA not in json_data[json_root_key]:
raise ValueError('{} field must contain {} and {} fields to create MultichannelBitmap object.'.format(
json_root_key, ORIGIN, DATA))
col, row = json_data[json_root_key][ORIGIN]
data = cls.base64_2_data(json_data[json_root_key][DATA])
return cls(data=data, origin=PointLocation(row=row, col=col))
@property
def origin(self) -> PointLocation:
return self._origin.clone()
@property
def data(self) -> np.ndarray:
return self._data.copy()
def translate(self, drow, dcol):
translated_origin = self.origin.translate(drow, dcol)
return self.__class__(data=self.data, origin=translated_origin)
def fliplr(self, img_size):
flipped_mask = np.flip(self.data, axis=1)
flipped_origin = PointLocation(row=self.origin.row, col=(img_size[1] - flipped_mask.shape[1] - self.origin.col))
return self.__class__(data=flipped_mask, origin=flipped_origin)
def flipud(self, img_size):
flipped_mask = np.flip(self.data, axis=0)
flipped_origin = PointLocation(row=(img_size[0] - flipped_mask.shape[0] - self.origin.row), col=self.origin.col)
return self.__class__(data=flipped_mask, origin=flipped_origin)
def scale(self, factor):
new_rows = round(self._data.shape[0] * factor)
new_cols = round(self._data.shape[1] * factor)
mask = self._resize_mask(self.data, new_rows, new_cols)
origin = self.origin.scale(factor)
return self.__class__(data=mask, origin=origin)
@staticmethod
def _resize_mask(mask, out_rows, out_cols):
return resize_inter_nearest(mask.astype(np.uint8), (out_rows, out_cols)).astype(np.bool)
def to_bbox(self):
return Rectangle.from_array(self._data).translate(drow=self._origin.row, dcol=self._origin.col)
| 40.782258
| 120
| 0.686969
| 3,796
| 0.750643
| 0
| 0
| 1,431
| 0.282974
| 0
| 0
| 842
| 0.166502
|
b1e369fb08913d130b89cec1f5483abc5621f780
| 851
|
py
|
Python
|
src/secml/ml/classifiers/gradients/mixin_classifier_gradient_sgd.py
|
zangobot/secml
|
95a293e1201c24256eb7fe2f1d2125cd5f318c8c
|
[
"Apache-2.0"
] | 63
|
2020-04-20T16:31:16.000Z
|
2022-03-29T01:05:35.000Z
|
src/secml/ml/classifiers/gradients/mixin_classifier_gradient_sgd.py
|
zangobot/secml
|
95a293e1201c24256eb7fe2f1d2125cd5f318c8c
|
[
"Apache-2.0"
] | 5
|
2020-04-21T11:31:39.000Z
|
2022-03-24T13:42:56.000Z
|
src/secml/ml/classifiers/gradients/mixin_classifier_gradient_sgd.py
|
zangobot/secml
|
95a293e1201c24256eb7fe2f1d2125cd5f318c8c
|
[
"Apache-2.0"
] | 8
|
2020-04-21T09:16:42.000Z
|
2022-02-23T16:28:43.000Z
|
"""
.. module:: CClassifierGradientSGDMixin
:synopsis: Mixin for SGD classifier gradients.
.. moduleauthor:: Ambra Demontis <ambra.demontis@unica.it>
.. moduleauthor:: Marco Melis <marco.melis@unica.it>
"""
from secml.array import CArray
from secml.ml.classifiers.gradients import CClassifierGradientLinearMixin
class CClassifierGradientSGDMixin(CClassifierGradientLinearMixin):
"""Mixin class for CClassifierSGD gradients."""
# train derivatives:
def grad_tr_params(self, x, y):
"""
Derivative of the classifier training objective function w.r.t. the
classifier parameters
Parameters
----------
x : CArray
Features of the dataset on which the training objective is computed.
y : CArray
dataset labels
"""
raise NotImplementedError
| 26.59375
| 80
| 0.680376
| 531
| 0.623972
| 0
| 0
| 0
| 0
| 0
| 0
| 585
| 0.687427
|
b1e811f35a54f18497c53bcd1b57a0f2b90a05d1
| 455
|
py
|
Python
|
src/epyodbc/constructs/base_class.py
|
kingspp/epyodbc
|
601ea659c243e7128f946fed264a095f82b25f8a
|
[
"MIT"
] | null | null | null |
src/epyodbc/constructs/base_class.py
|
kingspp/epyodbc
|
601ea659c243e7128f946fed264a095f82b25f8a
|
[
"MIT"
] | null | null | null |
src/epyodbc/constructs/base_class.py
|
kingspp/epyodbc
|
601ea659c243e7128f946fed264a095f82b25f8a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
| **@created on:** 9/4/20,
| **@author:** prathyushsp,
| **@version:** v0.0.1
|
| **Description:**
|
|
| **Sphinx Documentation Status:**
"""
from abc import ABCMeta, abstractmethod
import json
class BaseClass(metaclass=ABCMeta):
@abstractmethod
def pretty(self):
pass
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=2)
| 18.2
| 61
| 0.586813
| 228
| 0.501099
| 0
| 0
| 50
| 0.10989
| 0
| 0
| 170
| 0.373626
|
b1e81c6b8160d531e4161ae5339f729307b428e9
| 1,891
|
py
|
Python
|
madlib4.py
|
Leorodr501/Mad-Libs
|
0d619908ad4b1f73365e86345dd4023f9aa7f72d
|
[
"MIT"
] | null | null | null |
madlib4.py
|
Leorodr501/Mad-Libs
|
0d619908ad4b1f73365e86345dd4023f9aa7f72d
|
[
"MIT"
] | null | null | null |
madlib4.py
|
Leorodr501/Mad-Libs
|
0d619908ad4b1f73365e86345dd4023f9aa7f72d
|
[
"MIT"
] | null | null | null |
adjective = input("Enter an adjective: ")
adjective = input("Enter another adjective: ")
noun = input("Enter a noun: ")
noun = input("Enter another noun: ")
plural_noun = input("Enter a plural noun: ")
game = input("Enter a game: ")
plural_noun = input("Enter another plural noun: ")
verb = input("Enter a verb ending in 'ing': ")
verb = input("Enter another verb ending in 'ing': ")
plural_noun = input("Enter another plural noun: ")
verb = input("Enter another verb ending in 'ing': ")
noun = input("Enter another noun: ")
plant = input("Enter a plant; ")
body_part = input("Enter a part of the body: ")
place = input("Enter a place: ")
verb = input("Enter another verb ending in 'ing': ")
adjective = input("Enter another adjective: ")
number = input("Enter a number <100: ")
plural_noun = input("Enter another plural noun: ")
print("A vacation is when you take a trip to some " + adjective + " place"),
print(" with you " + adjective + " family.")
print("Usually you go to some place thatis near a/an " + noun ),
print(" or up on a/an " + noun + ".")
print("A good vacation place is one where you can ride " + plural_noun ),
print(" or play " + game + " or go hunting for " + plural_noun + ".")
print("I like to spend my time " + verb + " or " + verb + ".")
print("When parents go on a vacation, they spend their time eating three " + plural_noun + " a day,")
print(" and fathers play golf, and mothers sit around " + verb + ".")
print("Last summer, my little brother fell in a/an " + noun + " and got poison " + plant ),
print(" all over his " + body_part + ".")
print("My family is going to go to (the) " + place + ", and I will practice " + verb + ".")
print("Parents need vacations more than kids because parents are always very " + adjective + " and because they have to work " + number + " hours every day all year making enough " + plural_noun + " to pay for the vacation.")
| 51.108108
| 225
| 0.668429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,205
| 0.637229
|
b1ea115bb0ded34de6025f440ccc8b24a383a6be
| 5,854
|
py
|
Python
|
proto_5/connect/__main__.py
|
jadnohra/connect
|
8eb21e6f122898094447bc3d5edb3053d5a2adf2
|
[
"Unlicense"
] | null | null | null |
proto_5/connect/__main__.py
|
jadnohra/connect
|
8eb21e6f122898094447bc3d5edb3053d5a2adf2
|
[
"Unlicense"
] | 6
|
2021-03-19T12:06:56.000Z
|
2022-03-12T00:23:09.000Z
|
proto_5/connect/__main__.py
|
jadnohra/connect
|
8eb21e6f122898094447bc3d5edb3053d5a2adf2
|
[
"Unlicense"
] | null | null | null |
import argparse
import os
import logging
import sys
from types import SimpleNamespace
from .util.print_tree import print_tree
def data_to_card_path(card_dir, fpath):
return os.path.splitext("/" + os.path.relpath(fpath, start=card_dir))[0]
class Card:
def __init__(self, card_dir, fpath):
self._fpath = fpath
self._card_path = data_to_card_path(card_dir, fpath)
@property
def card_path(self):
return self._card_path
@property
def card_dir(self):
return Card.to_card_dir(self._card_path)
@property
def card_name(self):
return Card.to_card_name(self._card_path)
def to_card_dir(card_path):
return os.path.dirname(card_path)
def to_card_name(card_path):
return os.path.basename(card_path)
def is_connect_script_file(filename):
return os.path.basename(filename) == "CONNECT"
def is_card_file(filename):
return filename.endswith('.yaml') or filename == "CONNECT"
def is_connect_file(filename):
return any([predicate(filename)
for predicate in [is_card_file, is_connect_script_file]])
def parse_connect_files(card_dir):
connect_files = []
for dirpath, dirnames, filenames in os.walk(card_dir):
for filename in filenames:
if is_connect_file(filename):
connect_files.append(os.path.join(dirpath, filename))
return connect_files
class CardTree:
class Node:
def __init__(self, name):
self._name = name
def print(self):
print_tree(self,
lambda node: node.children.values()
if type(node) == CardTree.DirNode
else [],
lambda node: node.name)
@property
def name(self):
return self._name
class DirNode(Node):
def __init__(self, name):
super().__init__(name)
self.children = {}
class CardNode(Node):
def __init__(self, card):
super().__init__(card.card_name)
self.card = card
def __init__(self):
self._root_node = self.DirNode(".")
def _ensure_child_dir_node(self, parent_dir, nodename):
if nodename not in parent_dir.children:
parent_dir.children[nodename] = self.DirNode(nodename)
return parent_dir.children[nodename]
def _get_card_dir_node(self, card_dir):
dir_parts = card_dir.split(os.sep)[1:]
path_node = self._root_node
for dirname in dir_parts:
path_node = self._ensure_child_dir_node(path_node, dirname)
return path_node
def add_card_node(self, card):
dir_node = self._get_card_dir_node(card.card_dir)
dir_node.children[card.card_name] = self.CardNode(card)
def get_card(self, card_path):
dir_node = self._get_card_dir_node(Card.to_card_dir(card_path))
return dir_node.children.get(Card.to_card_name(card_path), None)
@property
def root():
return self._root_node
def print(self):
self._root_node.print()
def apply_connect_script(card_dir, script_file, card_tree):
def get_card(card_path, card_tree):
card = card_tree.get_card(card_path)
if card is None:
logging.error(f"Card {card_path} mentioned in {script_file} not found")
return card
def get_cards(script_card_dir, pattern, card_tree):
if pattern.startswith("/"):
return [get_card(pattern, card_tree)]
elif pattern == "*":
# TODO this is slow, we need a tree structure
return []
else:
return [get_card(os.path.join(script_card_dir, pattern), card_tree)]
def apply_relate_cmd(script_card_dir, cmd, card_tree):
src_pat, rel_pat, target_pat = cmd
src_cards = get_cards(script_card_dir, src_pat, card_tree)
rel_cards = get_cards(script_card_dir, rel_pat, card_tree)
target_cards = get_cards(script_card_dir, target_pat, card_tree)
script_lines = open(script_file).readlines()
line_index = 0
relate_cmds = []
while line_index < len(script_lines):
line = script_lines[line_index]
if line.strip() == "relate":
cmd = tuple([arg.strip()
for arg in script_lines[line_index+1: line_index+4]])
relate_cmds.append(cmd)
line_index = line_index + 4
else:
line_index = line_index + 1
script_card_dir = data_to_card_path(card_dir, os.path.dirname(script_file))
for relate_cmd in relate_cmds:
apply_relate_cmd(script_card_dir, relate_cmd, card_tree)
def parse_card_tree_from_files(card_dir, connect_files):
card_tree = CardTree()
script_files = []
for connect_file in connect_files:
if is_card_file(connect_file):
card = Card(card_dir, connect_file)
card_tree.add_card_node(card)
elif is_connect_script_file(connect_file):
script_files.append(connect_file)
card_tree.print()
for script_file in script_files:
apply_connect_script(card_dir, script_file, card_tree)
return card_tree
def parse_card_tree(data_dir):
connect_files = parse_connect_files(data_dir)
print(connect_files)
card_tree = parse_card_tree_from_files(data_dir, connect_files)
card_tree.print()
def main(args):
logging.basicConfig(format='%(levelname)s: %(message)s',
level="DEBUG" if args.log_debug else "INFO")
parse_card_tree(args.data_dir)
return os.EX_OK
parser = argparse.ArgumentParser()
parser.add_argument("data_dir", help="The directory containing our data")
parser.add_argument("--log-debug", help="Log debug information",
default=False, action="store_true")
sys.exit(main(parser.parse_args()))
| 34.233918
| 83
| 0.654766
| 2,249
| 0.384182
| 0
| 0
| 350
| 0.059788
| 0
| 0
| 280
| 0.047831
|
b1ea3252892ba4485f0734ae364981f2c8a20c18
| 35,322
|
py
|
Python
|
arcadia.py
|
ntw1103/arcadia
|
bfefc433a97c13739c9c1b329e0b9af63dba9d1b
|
[
"BSD-2-Clause"
] | 1
|
2018-12-11T04:36:53.000Z
|
2018-12-11T04:36:53.000Z
|
arcadia.py
|
ntw1103/arcadia
|
bfefc433a97c13739c9c1b329e0b9af63dba9d1b
|
[
"BSD-2-Clause"
] | null | null | null |
arcadia.py
|
ntw1103/arcadia
|
bfefc433a97c13739c9c1b329e0b9af63dba9d1b
|
[
"BSD-2-Clause"
] | 1
|
2018-12-11T04:37:35.000Z
|
2018-12-11T04:37:35.000Z
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python2
import fileinput
import json
import random
import socket
import thread #
import urllib2 #
import collections
import time
import traceback
import re
from decimal import Decimal as D
from datetime import datetime
import MySQLdb #
import math
username = 'Arcadia2'
oper_key = 'ee82aeb94474fcc21f05061043cb4' #This is not the actual key
#weather_api="http://api.openweathermap.org/data/2.5/weather?q="
#weather_api="http://api.openweathermap.org/data/2.5/find?q="
weather_api="http://api.openweathermap.org/data/2.5/find?APPID=c61b24ac1edeb6837b377df=" # This is not the actual key
API_KEY="&APPID=c61b24ac1edeb6837b377df" #This is not the actual key
CHANNEL = "#main"
# TODO: @Arcadia.msg_register(name: str, requires_auth: bool, secret: bool, floodrule: str)
# @Arcadia.data_register(....)
# would inspect function for name and arguments, and get docstring, to add as a response to !help.
# would add the decoratee function as a msg or data callback.
# IDEA: floodrule is similar to unrealircd +f channel flag: [*]<attempts>:<seconds>
# TODO: Log !sudo commands to #opers (unless it's issued from #opers), or to file, or to /helpops or /globops or /chatops
# Failed attempts should be logged unobtrusively, to file
# IDEA: Allow user to pipe the output of a command to another channel they are in.
class Connection: #This should work similarly to the singleton.
__shared_state = {}
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
network = '192.102.0.203'
port = 6667
channel = CHANNEL
#sock.connect((network, port))
def __init__(self):
self.__dict__ = self.__shared_state
# and whatever else you want in your class -- that's all!
def connect(self):
self.sock.connect((self.network, self.port))
def get_names_list(self, channel=None):
if channel is None or not is_chan(channel):
channel = self.channel
self.sendPacket ( 'names %s :' %(channel))
all_names = []
while True: # I don't like having our own little recv loop here
# but it's probably fine for IRC. Better solution would involve
# moving to asyncio, threading and queues, or 'callback hell'.
data = self.sock.recv ( 1024 )
print("Listening for NAMES:", data)
for line in data.splitlines():
numeric = line.split(" ")[1]
if numeric == "353": # line of NAMES response
names_part = line.split(channel + " :")[1]
for namemask in names_part.split(" "):
name = namemask.split("!")[0].lstrip("&~@")
all_names.append(name)
else:
if numeric != "366": # end of NAMES response
print("Odd data received while listening for NAMES:", data)
return all_names
return all_names
def get_names(self, channel=None):
return " ".join(self.get_names_list(channel))
def sendPacket(self, packet):
self.sock.send ( packet + '\r\n' )
def sendPacket_split(self, prefix, suffix, split_size=250):
# TODO: Need a maximum number of lines to send, with possibly a !continue callback.
# TODO: Need to delay or queue our output without disrupting comms. Would require a timer.
for i in range(0, len(suffix), split_size):
self.sendPacket(prefix + suffix[i:i+split_size])
class Message:
"""
Represents a message from a user in a channel or PM window.
"""
def __init__(self, bot, data):
self.timestamp = time.time()
self.bot = bot
self.server = bot.server
self.data = data
self.msgtxt = ':'.join(data.split(':')[2:])
self.nick = data.split( '!' )[0].replace(':', '')
self.channel = get_chan_from_data(data, self.nick)
self.type = data.split(" ")[1] # PRIVMSG, NOTICE, etc
self.is_channel = is_chan(self.channel)
self.msgparts = self.msgtxt.strip().split(' ')
def reply(self, message, highlight=None):
h = self.nick + ": "
if highlight is None:
highlight = self.is_channel
if is_badword(self.nick):
highlight = False
prefix = "PRIVMSG %s : %s" % (self.channel, h*highlight)
return self.server.sendPacket_split(prefix, message)
def said(self, s):
return self.msgtxt.lower().find(s.lower()) == 0
def find_raw(self, s):
return self.data.find(s)
class database:
host = "localhost"
user = "arcadia"
passwd = "hoi22h33"
db = "Arcadia"
conn = None
cursor = None
def connect(self):
self.conn = MySQLdb.connect(self.host, self.user, self.passwd, self.db)
self.cursor = self.conn.cursor()
def disconnect(self):
self.cursor.close()
self.conn.close()
def escape(self, toclean):
return MySQLdb.escape_string(toclean)
def insert(self,query,params):
self.cursor.execute(query, ( params ))
def run(self, query):
self.cursor.execute(query)
row = self.cursor.fetchone()
return row
"""def set_note(user,text):
def get_note(user):
return notes"""
"""class clue:
db=database()
db.connect()
def get_location:
db.connect()
db.run("SELECT * FROM `locations` ORDER BY rand() LIMIT 1")
db.disconnect()
def add_location:
def get_weapon:
def add_weapon:"""
class brain:
filename = "arcadias_brain.txt"
def load(self):
print("trying to load the brain")
file = open(self.filename, "r")
lines = file.readlines()
print("I loaded the brain")
return lines
def save(self,lines):
print("saving brain to file.")
file = open(self.filename, "w")
for line in lines:
file.write("%s\n" % line)
file.close()
def clean(self,lines):
for lines in fileinput.FileInput(self.filename, inplace=1):
lines = lines.strip()
if lines == '': continue
print lines
print("cleaned the brain")
def ago(past_timestamp):
"""
Returns a string, the number of days, hours, minutes, or seconds ago a timestamp is.
Example output:
5h30m ago
3d5h ago
2m30s ago
"""
time_diff = int(time.time() - past_timestamp)
time_diff_units = []
for unit, unitname in [(86400, "d"), (3600, "h"), (60, "m"), (1, "s")]:
u = time_diff // unit
if u > 0:
time_diff_units.append(str(u))
time_diff_units.append(unitname)
time_diff -= u * unit
if len(time_diff_units) >= 4:
break
return "".join(time_diff_units) + " ago"
class last_message:
chans = collections.defaultdict(list) # {"#chan": [(time, txt), (time, txt), ...], ...}
def __init__(self, default_channel):
self.default_channel = default_channel
def push(self, timestamp, data, channel=None):
if channel is None:
channel = self.default_channel
arr = self.chans[channel]
if len(arr) >5:
arr.pop(0)
arr.append((timestamp, data.replace ( '\r\n', '' )))
def pop(self, channel=None):
if channel is None:
channel = self.default_channel
arr = self.chans[channel]
if len(arr) > 0:
return arr.pop(0)
return (0, "")
def pop_all(self, channel=None):
if channel is None:
channel = self.default_channel
return self.chans[channel]
"""class user_tracker:
db = database()
db.connect()
#m = db.escape(message)
def check_name(name):
clean_name = self.db.escape(name)
#print "Checking: " + word[:-4]
query = "SELECT COUNT(*) from `users` WHERE `username` = '"+clean_name+"'"
row = db.run(query)
#print row[0]
if(row[0] ==1):
return 1
else:
return 0
def insert_name(name):
clean_name = self.db.escape(name)
query = "INSERT INTO `users` VALUES('"+clean_name+"','9')"
row = db.run(query)"""
"""class trivia_engine:
db = database()
db.connect()
#m = db.escape(message)
state = 0
question_id =0
players =[]
def restart(self):
self.players =[]
self.state =0
def question(self,message,nick):
if self.state != 0:
return "A question was already asked: " +self.state
m = self.db.escape(message)
m = m.replace ( '\r\n', '' )
parts = m.split(' ')
bleh = parts[1::]
#one_line = ' '.join([str(i) for i in bleh])[:-4:]
#question = "Who was luke skywalker's father?"
db = database()
db.connect()
m = db.escape(message)
query ="SELECT count(*)FROM `trivia`;"
row = db.run(query)
count = row[0]
next_id = str(random.randint(0,count-1))
self.question_id = next_id
print("Next_ID:"+ next_id)
query ="SELECT `question` FROM `trivia` WHERE id="+next_id+";"
row = db.run(query)
self.state = row[0] # set the state to the current question.
return self.state+" "+nick
def answer(self,message,nick):
if self.state ==0:
return "The game hasn't started yet."
else:
if "darth vader" in message:
return "Good job."
else:
db = database()
db.connect()
m = db.escape(message)
query ="SELECT `id` from `trivia` WHERE `answer` LIKE '%"+m[3:-4:]+"%'"
print query
row = db.run(query)
if row is None:
return "I'm sorry, wrong answer."
print("id: " + str(row[0]) +"Current question id: "+ str(self.question_id))
if(str(row[0]) == str(self.question_id)):
self.state =0
return "Good Job!"
else:
return "Wrong answer"
return "I'm sorry, wrong answer."
"""
def get_weather(message):
try:
search = '%20'.join(message.msgparts[1:])
if (search == 'hoth'):
search = 'Hell%20,MI'
print(weather_api+search)
try:
usock = urllib2.urlopen(weather_api+search)
except urllib2.HTTPError as e:
return "Communication error (HTTP %s: %s). Try again later or notify developer." % (e.code, e.reason)
data = usock.read()
usock.close()
weather = json.loads(data)
try:
weather = weather['list'][0]
except KeyError:
return "Error. You must provide a location. OpenWeatherMap says: %s (code %s)" % (weather['message'], weather['cod'])
print(weather['weather'])
status = weather['weather']
temp = weather['main']['temp']
location = str(weather['name']) + ', '+ str(weather['sys']['country'])
print("Location" + location)
return str(k2f(temp))+'°F|'+str(k2c(temp))+'°C, '+str(status[0]['description']) + ' | ' + location
except:
print "Unkown error in get_weather."
traceback.print_exc()
return "Unknown error."
def get_time(zone="-5"):
return "Eastern: "+str(datetime.now().time())+" UTC: "+str(datetime.utcnow().time())
def k2f(K):
F =(K - 273.15) *1.8000+32.00
return F
def k2c(K):
C= K- 273.15
return C
def get_response(message):
#sendPacket ( 'PRIVMSG %s : %s' % (channel, 'Trying to respond to: '+message))
db = database()
db.connect()
m = db.escape(message.msgtxt)
parts = m.split(' ')
# for part in parts:
#print(part)
#sendPacket ( 'PRIVMSG %s : %s' % (channel, 'filtered before db: '+m[:-4:]))
row = db.run("SELECT `response` from `responses` WHERE `message`='"+m[:-4:]+"'")
#db.disconnect()
if(row != None):
#sendPacket ( 'PRIVMSG %s : %s' % (channel, 'I found a response for: '+row))
response = row[0]
if "|names|" in response:
print (response)
#response = 'Bah Humbug! '#
response.replace("|names|", message.server.get_names(message.channel))
# response = ' '.join(response.split())
print (response)
if "|nick|" in response:
response = response.replace("|nick|", message.nick)
#if "||" in response:
# sendPacket("PRIVMSG %s : %s")
if(response.startswith('|action|')):
return '\001ACTION ' + response.replace("|action|", "") + '\001'
db.disconnect()
return response
else:
return False
def handle_badwords(message):
print "handling badwords"
msgtxt = message.msgtxt
for l in ('"', "'", "\\"):
msgtxt = msgtxt.replace(l, "")
words = msgtxt.split(" ")
for word in words:
if is_badword(word) == 1:
print "Stop swearing!" + word
if not message.is_channel: # Can't kick in a PM.
message.reply("Hey, watch your language")
else:
#sendPacket ( 'PRIVMSG %s : %s' % (channel, 'Hey, watch your language') )
kick = 'KICK %s %s %s' % (message.channel, message.nick, 'Hey, watch your language')
print(kick)
message.server.sendPacket(kick)
#Todo add a handling of the swearing, so it marks them saying it, and kicks them and stuff.
break
def is_badword(dirty_word):
return #Currently, I haven't dumped the swearword database.
db = database()
db.connect()
word = db.escape(dirty_word)
#print "Checking: " + word[:-4]
query = "SELECT COUNT(*) from `badwords`"
query = query + " WHERE `words` = '" + word[:-4]+"'"
query = query + " OR `words` = '"+word[:-4]+".'"
query = query + " OR `words` = '"+word[:-4]+",'"
query = query + " OR `words` = '"+word[:-4]+"!'"
row = db.run(query)
#print row[0]
if(row[0] ==1):
return 1
else:
return 0
def get_verse(message):
return #Currently I haven't dumped the bible database.
print('In get_verse')
db = database()
db.connect()
m = db.escape(message.msgtxt)
m = m.replace ('\r\n', '').replace("\\r\\n", '')
parts = m.split(' ')
for part in parts:
print(part)
print "Length: " + str(len(parts))
#parts.pop(1)
is_range = all(i.isdigit() for i in parts[-3:])
if is_range:
chapt = parts[-3]
start = parts[-2]
end = parts[-1]
book = " ".join(parts[1:-3])
else:
chapt = parts[-2]
start = end = parts[-1]
book = " ".join(parts[1:-2])
start = int(start)
end = int(end)
response = ""
if end - start > 7:
end = start + 7
response += "[Limited verses: %s to %s] " % (start, end)
print "Start: " + str(start) + " , END " + str(end)
for i in range(start,end+1):
query = "SELECT `VerseText` from `BibleKJV` WHERE `book`='" +book+"' AND `Chapter`='"+chapt+"' AND `Verse`='"+str(i)+"'"
print query
row = db.run(query)
if row is None:
return 'Verse(s) not found. Format example (looking up 1 Corinthians 3:18-20): ".bible 1 Corinthians 3 18 20" -- Another (looking up John 3:16): ".bible John 3 16"'
if len(row) > 0:
print "Multiverse"
for i in row:
print "verse: " + i
response += " " + i
else:
response = row[0]
print "response" + response
return response
def AI_intel_response(message,storage):
for word in message.split(' '):
print("printing input word:"+ word)
if word in ['hi','hello','yo','hey','greetings']:
matching = [s for s in storage if "hello" in s]
return random.choice(matching)
return random.choice(storage)
def AI_response(message, storage_=None):
global storage
if storage_ is None:
storage_ = storage
db = database()
db.connect()
m = db.escape(message.msgtxt)
m = m.replace ( '\r\n', '' )
parts = m.split(' ')
bleh = parts[1::]
one_line = ' '.join([str(i) for i in bleh])[:-4:]
print("storing:" + one_line)
if "pug" not in one_line:
storage.append(one_line)
#return random.choice(storage_)#[:-4:]
return AI_intel_response(one_line,storage_)
def arithmatic ( args ):
args [ 0 ] = args [ 0 ].replace ( '\r\n', '' )
for letter in 'abcdefghijklmnopqrstuvwxyz':
args [ 0 ] = args [ 0 ].replace ( letter, '' )
solution = str ( eval ( args [ 0 ], { '__builtins__' : {} } ) )
return solution
def sine ( args ):
solution = str ( math.sin ( float ( args [ 0 ] ) * ( 2 * math.pi ) / 360 ) )
return solution
def cosine ( args ):
solution = str ( math.cos ( float ( args [ 0 ] ) * ( 2 * math.pi ) / 360 ) )
return solution
def tangent ( args ):
solution = str ( math.tan ( float ( args [ 0 ] ) * ( 2 * math.pi ) / 360 ) )
return solution
def send_last_msgs(bot, nick, channel, show_error=False):
server = bot.server
if not is_chan(channel):
channel = server.channel
channel_nicks = server.get_names_list(channel)
if nick not in channel_nicks:
server.sendPacket ( 'NOTICE %s : %s' % (nick, 'You must be in a channel to see its last messages.') )
elif "[m]" in nick:
print("ignoring matrix")
else:
response = bot.last.pop_all(channel)
print(response)
if len(response) > 0:
server.sendPacket ( 'NOTICE %s : %s %s:' % (nick, 'Last messages on channel', channel) )
for timestamp, text in response:
server.sendPacket ( 'NOTICE %s : %s %s' % (nick, ago(timestamp), text) )
elif show_error:
server.sendPacket ( 'NOTICE %s : %s %s.' % (nick, 'I don\'t remember any previous messages on the channel', channel) )
def is_chan(channel):
return (channel.startswith("&") or channel.startswith("#"))
def get_chan_from_data(data, nick):
channel = data.split(" ")[2] # may be our own nick in a PM.
if not is_chan(channel):
return nick
return channel
def procss_data(bot, data, msgtxt, nick, b, last):
if data.find ( 'PING' ) != -1:
bot.server.sendPacket ( 'PONG ' + data.split() [ 1 ] )
# elif data.find( 'NICK') != -1:
# if "has changed his/her nickname to" in message:
# print data
# new_nick = message.split(':')
# server.sendPacket ( 'PRIVMSG %s : %s' % (server.channel, nick + ' Changed to: '+message.split(':')[1]))
else:
not_blocked = bot.run_callbacks(data)
if not_blocked:
if data.find("JOIN :#")!= -1: #:Bart_Roberts!bartrobert@fanta.net JOIN :#main"
if "Arcadia" not in data:
channel = data.split('JOIN :')[1].rstrip()
join_nick = data.split (':')[1].split('!')[0]
send_last_msgs(bot, join_nick, channel)
elif data.find('PRIVMSG') != -1:
procss_privmsg(Message(bot, data))
#def procss_data(server,data,message,nick,b,trivia,last):
def procss_privmsg(msg):
global storage
server = msg.bot.server
brain = msg.bot.brain
print "nick: " + msg.nick
print "channel: " + msg.channel
print "message: " + msg.msgtxt
#sendPacket ( 'JOIN :#main')
handle_badwords(msg) # This will handle the swear words stuff.
if msg.said('you\'re lame'):
msg.reply("Hey", False)
elif msg.said('!main'):
server.sendPacket('JOIN ' + CHANNEL)
server.sendPacket ('PRIVMSG %s : %s' % (CHANNEL, 'I\'m Back '))
elif msg.said('.bible'):
response = get_verse(msg)
msg.reply(response, False)
elif msg.said('arcadia'):
response = AI_response(msg)
msg.reply(" " + response, False)
elif msg.said("!last"):
try:
channel = msg.msgparts[1]
except IndexError:
channel = msg.channel
send_last_msgs(msg.bot, msg.nick, channel, show_error=True)
elif msg.said('!save'):
authed_function_call(msg, brain.save, storage)
elif msg.said('!load'):
def f():
global storage
storage = brain.load()
authed_function_call(msg, f)
elif msg.said('!clean' or '!fixbrain'):
def f():
global storage
brain.clean(storage)
storage = brain.load()
authed_function_call(msg, f)
elif msg.said('!timeout'):
command = 'SAJOIN ' + msg.msgparts[1] + ' #timeout'
print(command)
authed_function_call(msg, server.sendPacket, command)
elif msg.said('!t_DISABLED'):
print("In trivia: " +msg.msgtxt)
server.sendPacket ( 'PRIVMSG %s : %s' % (server.channel, ' ') )
#if "start" in msg:
#response = trivia.question(message,nick)
#server.sendPacket ( 'PRIVMSG %s : %s' % (server.channel, ' '+response) )
#else:
#response = trivia.answer(message,nick)
#server.sendPacket ( 'PRIVMSG %s : %s' % (server.channel, ' '+response) )
elif msg.said('!sudo'):
print("Igot:" + msg.msgtxt)
t = " ".join(msg.msgparts[1:]).replace('/msg','PRIVMSG ')
authed_function_call(msg, server.sendPacket, t)
elif msg.said('!f2c'):
f = D(msg.msgparts[1])
c = (f - 32) * D(5/9.)
msg.reply('%1.2f' % c)
elif msg.said('!c2f'):
c = D(msg.msgparts[1])
f = D(9/5.) * c + 32
msg.reply('%1.2f' % f)
elif msg.said('!time'):
msg.reply(get_time())
elif msg.said('!h7777777777'):
msg.reply('Hello room, and hello ' + msg.nick, False)
elif msg.said('!me'):
server.sendPacket('PRIVMSG %s :%s' % (msg.channel, '\001ACTION waves.\001'))
#sendPacket ( 'names %s :' %(channel))
#data = server.sock.recv ( 1024 )
server.sendPacket('PRIVMSG %s : %s' % (msg.channel, 'Response: ' + server.get_names(msg.channel)))
else:
if msg.said('!id'):
global oper_key
server.sendPacket ( 'OPER admin '+ oper_key)
response = get_response(msg)
if msg.said('!weather'):
response = get_weather(msg)
if(response != False):
if msg.said('!test'):
data = server.sock.recv ( 1024 )
print "HERE"
elif msg.said('!joinbart'):
server.sendPacket ( response)
data = server.sock.recv ( 1024 )
print "HERE"
else:
msg.reply(response, False)
if ("!id" not in msg.msgtxt and "NOTICE" not in msg.data) and msg.is_channel:
#store the last 5 things said/did.
if msg.msgtxt.startswith("\001ACTION "):
action = msg.msgtxt.replace("\001ACTION ", "", 1).rstrip("\001")
topush = "*"+msg.nick+" "+action
else:
topush = "<"+msg.nick+">"+msg.msgtxt
msg.bot.last.push(msg.timestamp, topush, msg.channel)
def authed_function_call(trig_msg, function, *args, **kwargs):
t = time.time()
timeout = t + 10
def cb(whois_code, rest):
if time.time() > timeout or whois_code == "318":
trig_msg.reply("Auth failed. Make sure that you're OPER'd (and that I'm OPER'd).") # TODO: When we have timers, we can make this error message occur always.
return True
elif whois_code in ("320", "313"):
function(*args, **kwargs)
return True
return False
whois(trig_msg.bot, trig_msg.nick, cb, ["320", "318", "313"])
def whois(bot, target_nick, cb, target_codes=None):
"""
:param bot:
An Arcadia instance.
:param target_nick:
The nick we have to whois. WHOIS responses from the server pertaining to
any other nick are ignored.
:param cb:
A callable taking two strings, first is the WHOIS RPL numeric code,
second is the string the server put at the end.
If the server responds:
```
:domainname.com 671 username1 username2 :is using a Secure Connection
```
`cb` gets:
```
cb("671", ":is using a Secure Connection")
```
:param target_codes:
The WHOIS RPL numeric codes we should watch out for, others will be ignored.
If argument is not passed, we'll watch out for all known WHOIS numerics.
"""
if target_codes is None:
target_codes = [
"311", # hostname (start)
"379", # modes
"378", # "is connecting from"
"307", # "is identified"
"319", # channels
"312", # server in the net user is connected to
"313", # server operator
"310", # available for help
"671", # using ssl
"320", # is root
"317", # idle time and signon time
"318", # end
"401" # no such user
]
def whois_cb(bot, data):
try:
servername, code, me, you, rest = data.split(" ", 4)
except ValueError: # not a whois reply.
return False, False
else:
print("whois_cb:", servername, code, me, you, rest)
cbresult = None
if code in target_codes and you.lower() == target_nick.lower():
cbresult = cb(code, rest)
if code == "318" and you == target_nick:
# When we get to the end of the relevant WHOIS message, this function
# is removed from the list of callbacks in Arcadia.
return True, False
return cbresult, False
bot.callbacks.append(whois_cb)
bot.server.sendPacket(" WHOIS {0} {0}".format(target_nick))
class Arcadia:
server = Connection()
# Callbacks are called with an Arcadia object, and a string with the data
# that was received. They are called every time we receive data, before the
# normal hardcoded commands are tried.
# Callbacks return two values.
# (whether_to_remove_me, whether_to_block_callbacks_to_my_right)
# If the second value equals 2, we cease processing the message and hardcoded
# commands won't be tried (meaning, they will be tried if it isn't 2).
callbacks = []
def run_callbacks(self, data):
return self._run_callbacks(self.callbacks, self, data)
def _run_callbacks(self, cbs, *args):
to_remove = []
block = None
for f in cbs:
remove, block = f(*args)
if remove:
to_remove.append(f)
if block:
break
for f in to_remove:
while f in cbs:
cbs.remove(f)
return (block != 2)
def Arcadia_run(self):
global running
global username
print(running)
while running==1:
self.server.connect()
self.server.sendPacket ( 'NICK %s' % (username) )
self.server.sendPacket ( 'USER %s %s %s: %s' % ('Arcadia', 'Arcadia', 'Arcadia', 'Arcadia') )
trash = self.server.sock.recv ( 1024 )
self.server.sendPacket ( 'JOIN %s' % (self.server.channel) )
self.server.sendPacket ( 'PRIVMSG %s : %s' % (self.server.channel, 'Hey') )
#Other commands: !hiwizard,!easter, !sleep, !good, !whee,
# !purple, !batman, !grapefruit, !ben, !friday, !secret, !gps, !christmas. !bunny, !monday, !pirate, !sandwich, !sandvich
self.brain = brain() # load arcadia's brain
#trivia = trivia_engine()
self.last = last_message(self.server.channel)
while True:
data = self.server.sock.recv ( 1024 )
for line in data.splitlines(True):
message = ':'.join ( line.split ( ':' ) [ 2: ] )
nick = line.split ( '!' ) [ 0 ].replace ( ':', '' )
print "Raw: " +line
# procss_data(self.server,data,message,nick,b,trivia,last)
procss_data(self, line, message, nick, self.brain, self.last)
running=1
storage = []
arcadia = Arcadia()
try:
thread.start_new_thread( arcadia.Arcadia_run, () )
except:
traceback.print_exc()
print "Error: unable to start thread"
try:
while 1:
user_input = raw_input("enter something: ")
if user_input.startswith("#EVAL#"):
user_input = user_input[len("#EVAL#"):]
try:
try:
result = eval(user_input)
print "Eval result:"
print result
print "-----"
except SyntaxError:
exec(user_input)
print "(Executed)"
except:
traceback.print_exc()
else:
if "/join" in user_input:
arcadia.server.sendPacket ( 'JOIN %s' % (user_input[6::]) )
print("trying to")
elif user_input == "/start":
arcadia.server.sendPacket ( 'OPER ntw1103 '+ oper_key)
brain = arcadia.brain
global storage
storage = brain.load()
brain.clean(storage)
else:
arcadia.server.sendPacket ( 'PRIVMSG %s : %s' % (CHANNEL, ' ' +user_input) )
print user_input
except KeyboardInterrupt:
running = "stopped"
print "done"
| 43.233782
| 188
| 0.46832
| 9,301
| 0.263305
| 0
| 0
| 0
| 0
| 0
| 0
| 12,200
| 0.345374
|
b1edfb7e986ee60ac0da1a869a4e400f7398c3fe
| 1,492
|
py
|
Python
|
app/display_modules/ags/tests/test_tasks.py
|
MetaGenScope/metagenscope-server
|
609cd57c626c857c8efde8237a1f22f4d1e6065d
|
[
"MIT"
] | null | null | null |
app/display_modules/ags/tests/test_tasks.py
|
MetaGenScope/metagenscope-server
|
609cd57c626c857c8efde8237a1f22f4d1e6065d
|
[
"MIT"
] | null | null | null |
app/display_modules/ags/tests/test_tasks.py
|
MetaGenScope/metagenscope-server
|
609cd57c626c857c8efde8237a1f22f4d1e6065d
|
[
"MIT"
] | null | null | null |
"""Test suite for Average Genome Size tasks."""
from app.display_modules.ags.ags_tasks import boxplot, ags_distributions
from app.samples.sample_models import Sample
from app.tool_results.microbe_census.tests.factory import create_microbe_census
from tests.base import BaseTestCase
class TestAverageGenomeSizeTasks(BaseTestCase):
"""Test suite for Average Genome Size tasks."""
def test_boxplot(self):
"""Ensure boxplot method creates correct boxplot."""
values = [37, 48, 30, 53, 3, 83, 19, 71, 90, 16, 19, 7, 11, 43, 43]
result = boxplot(values)
self.assertEqual(3, result['min_val'])
self.assertEqual(17.5, result['q1_val'])
self.assertEqual(37, result['mean_val'])
self.assertEqual(50.5, result['q3_val'])
self.assertEqual(90, result['max_val'])
def test_ags_distributions(self):
"""Ensure ags_distributions task works."""
def create_sample(i):
"""Create test sample."""
metadata = {'foo': f'bar{i}'}
return Sample(name=f'SMPL_{i}',
metadata=metadata,
microbe_census=create_microbe_census())
samples = [create_sample(i).fetch_safe() for i in range(15)]
result = ags_distributions.delay(samples).get()
self.assertIn('foo', result)
self.assertIn('bar0', result['foo'])
self.assertIn('bar1', result['foo'])
self.assertIn('min_val', result['foo']['bar0'])
| 38.25641
| 79
| 0.635389
| 1,205
| 0.807641
| 0
| 0
| 0
| 0
| 0
| 0
| 329
| 0.220509
|
b1ee21b42e49b37ad9977b9259b77f5d847cdf1c
| 491
|
py
|
Python
|
tsim/serialization/__init__.py
|
eduardomezencio/tsim
|
60ac63152a98fd7dabb59c66367bca216e6a7370
|
[
"MIT"
] | 2
|
2021-04-24T06:48:13.000Z
|
2022-01-25T02:38:44.000Z
|
tsim/serialization/__init__.py
|
eduardomezencio/tsim
|
60ac63152a98fd7dabb59c66367bca216e6a7370
|
[
"MIT"
] | null | null | null |
tsim/serialization/__init__.py
|
eduardomezencio/tsim
|
60ac63152a98fd7dabb59c66367bca216e6a7370
|
[
"MIT"
] | null | null | null |
"""Global serialization configuration."""
from importlib import import_module
import os
def configure_serialization():
"""Configure serialization for all classes in folder."""
for name in filter(
lambda s: not s.startswith('_') and s.endswith('.py'),
os.listdir(os.path.dirname(os.path.abspath(__file__)))):
module_name = os.path.splitext(name)[0]
module = import_module(f'.{module_name}', 'tsim.serialization')
module.configure()
| 30.6875
| 71
| 0.668024
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 142
| 0.289206
|
b1eeecae89c5a75d2089876662644291654428d3
| 4,678
|
py
|
Python
|
windows/winobject/handle.py
|
1orenz0/PythonForWindows
|
f3de7b528b020b45ac6a871c975006fc1db1c3b0
|
[
"BSD-3-Clause"
] | 1
|
2021-06-22T16:50:31.000Z
|
2021-06-22T16:50:31.000Z
|
windows/winobject/handle.py
|
killvxk/PythonForWindows
|
b253bc5873e7d97087ed22f2753b51fc6880ec18
|
[
"BSD-3-Clause"
] | null | null | null |
windows/winobject/handle.py
|
killvxk/PythonForWindows
|
b253bc5873e7d97087ed22f2753b51fc6880ec18
|
[
"BSD-3-Clause"
] | 1
|
2021-05-12T12:58:27.000Z
|
2021-05-12T12:58:27.000Z
|
import os
import ctypes
import windows
from windows import winproxy
from windows.generated_def import windef
from windows.generated_def.winstructs import *
# Remove this ?
class EPUBLIC_OBJECT_TYPE_INFORMATION(PUBLIC_OBJECT_TYPE_INFORMATION):
pass
current_process_pid = os.getpid()
class Handle(SYSTEM_HANDLE):
"""A handle of the system"""
@windows.utils.fixedpropety
def process(self):
"""The process possessing the handle
:type: :class:`WinProcess <windows.winobject.process.WinProcess>`"""
# "TODO: something smart ? :D"
# return [p for p in windows.system.processes if p.pid == self.dwProcessId][0]
return windows.WinProcess(pid=self.dwProcessId)
@windows.utils.fixedpropety
def name(self):
"""The name of the handle
:type: :class:`str`"""
return self._get_object_name()
@windows.utils.fixedpropety
def type(self):
"""The type of the handle
:type: :class:`str`"""
return self._get_object_type()
@property
def infos(self):
"""TODO: DOC"""
return self._get_object_basic_infos()
def _get_object_name(self):
lh = self.local_handle
size_needed = DWORD()
yyy = ctypes.c_buffer(0x1000)
winproxy.NtQueryObject(lh, ObjectNameInformation, ctypes.byref(yyy), ctypes.sizeof(yyy), ctypes.byref(size_needed))
return LSA_UNICODE_STRING.from_buffer_copy(yyy[:size_needed.value]).str
def _get_object_type(self):
lh = self.local_handle
xxx = EPUBLIC_OBJECT_TYPE_INFORMATION()
size_needed = DWORD()
try:
winproxy.NtQueryObject(lh, ObjectTypeInformation, ctypes.byref(xxx), ctypes.sizeof(xxx), ctypes.byref(size_needed))
except WindowsError as e:
if e.code != STATUS_INFO_LENGTH_MISMATCH:
# print("ERROR WITH {0:x}".format(lh))
raise
size = size_needed.value
buffer = ctypes.c_buffer(size)
winproxy.NtQueryObject(lh, ObjectTypeInformation, buffer, size, ctypes.byref(size_needed))
xxx = EPUBLIC_OBJECT_TYPE_INFORMATION.from_buffer_copy(buffer)
return xxx.TypeName.str
def _get_object_basic_infos(self):
pass
lh = self.local_handle
size_needed = DWORD()
basic_infos = PUBLIC_OBJECT_BASIC_INFORMATION()
winproxy.NtQueryObject(lh, ObjectBasicInformation, ctypes.byref(basic_infos), ctypes.sizeof(basic_infos), ctypes.byref(size_needed))
return basic_infos
#PUBLIC_OBJECT_BASIC_INFORMATION
@windows.utils.fixedpropety
def local_handle(self):
"""A local copy of the handle, acquired with ``DuplicateHandle``
:type: :class:`int`"""
if self.dwProcessId == windows.current_process.pid:
return self.wValue
res = HANDLE()
winproxy.DuplicateHandle(self.process.handle, self.wValue, windows.current_process.handle, ctypes.byref(res), dwOptions=DUPLICATE_SAME_ACCESS)
return res.value
def description(self):
stype = self.type
descr_func = getattr(self, "description_" + stype, None)
if descr_func is None:
return None
return descr_func()
def description_Process(self):
proc = windows.WinProcess(handle=self.wValue)
res = str(proc)
del proc._handle
return res
def description_Thread(self):
thread = windows.WinThread(handle=self.wValue)
res = str(thread)
del thread._handle
return res
def __repr__(self):
return "<{0} value=<0x{1:x}> in process pid={2}>".format(type(self).__name__, self.wValue, self.dwProcessId)
def __del__(self):
if self.dwProcessId == current_process_pid:
return
if hasattr(self, "_local_handle"):
return winproxy.CloseHandle(self._local_handle)
def enumerate_handles():
size_needed = ULONG()
size = 0x1000
buffer = ctypes.c_buffer(size)
try:
winproxy.NtQuerySystemInformation(16, buffer, size, ReturnLength=ctypes.byref(size_needed))
except WindowsError as e:
pass
size = size_needed.value + 0x1000
buffer = ctypes.c_buffer(size)
winproxy.NtQuerySystemInformation(16, buffer, size, ReturnLength=ctypes.byref(size_needed))
x = SYSTEM_HANDLE_INFORMATION.from_buffer(buffer)
class _GENERATED_SYSTEM_HANDLE_INFORMATION(ctypes.Structure):
_fields_ = [
("HandleCount", ULONG),
("Handles", Handle * x.HandleCount),
]
return list(_GENERATED_SYSTEM_HANDLE_INFORMATION.from_buffer_copy(buffer[:size_needed.value]).Handles)
| 33.898551
| 150
| 0.665669
| 3,870
| 0.827277
| 0
| 0
| 1,209
| 0.258444
| 0
| 0
| 653
| 0.13959
|
b1efcf80cebb01dff50a1e2a45ff4368cec1958a
| 4,428
|
py
|
Python
|
metrics.py
|
efratkohen/Project
|
d95d20a1be8fe0e0918b3e699c640f36704639f8
|
[
"MIT"
] | 1
|
2020-07-25T11:27:17.000Z
|
2020-07-25T11:27:17.000Z
|
metrics.py
|
efratkohen/Project
|
d95d20a1be8fe0e0918b3e699c640f36704639f8
|
[
"MIT"
] | null | null | null |
metrics.py
|
efratkohen/Project
|
d95d20a1be8fe0e0918b3e699c640f36704639f8
|
[
"MIT"
] | null | null | null |
import traceback
import numpy as np
from matplotlib import pyplot, pyplot as plt
from sklearn.metrics import (
mean_squared_error,
median_absolute_error,
roc_curve,
auc,
f1_score,
precision_recall_curve,
r2_score,
)
from sklearn.metrics import confusion_matrix
import column_labeler as clabel
from math import sqrt
def calc_best_f1(Ytest, Yhat, selected_value=clabel.AMMONIA):
max_val = 0
best_i = 0
for i in range(1, 100):
accuracy = f1_score(Ytest, (Yhat > 0.01 * i).astype(int))
if accuracy > max_val:
max_val = accuracy
best_i = i
f1_score(Ytest, (Yhat > 0.01 * best_i).astype(int))
return max_val
def calc_rmse(Ytest, Yhat, graph=(20, 15)):
rmse = sqrt(mean_squared_error(Ytest, Yhat))
if graph:
print("RMSE", rmse)
pyplot.figure(figsize=graph)
pyplot.plot(Yhat, label="predictions")
pyplot.plot(Ytest, label="real")
pyplot.legend()
# import datetime
pyplot.show()
# pyplot.savefig("Images\\%s" % str(datetime.datetime.now()))
return rmse
def calc_mape(Ytest, Yhat, graph=True):
return np.mean(np.abs((Ytest - Yhat) / Ytest)) * 100
def calc_mae(Ytest, Yhat, graph=True):
return median_absolute_error(Ytest, Yhat)
def calc_rsquared(Ytest, Yhat, graph=True):
# R-squared
return r2_score(Ytest, Yhat)
def calc_tp_fp_rate(Ytest, Yhat, selected_value, binary=False, graph=True):
global y_not_bad_real, y_not_bad_hat
if binary:
y_not_bad_hat = Yhat.astype(int)
y_not_bad_real = Ytest.astype(int)
else:
mdict = clabel.limits[selected_value]
good_limit = mdict[clabel.GOOD]
not_bad_limit = mdict[clabel.NOT_BAD]
y_good_hat = Yhat > good_limit
y_good_real = Ytest > good_limit
y_not_bad_hat = Yhat > not_bad_limit
y_not_bad_real = Ytest > not_bad_limit
if graph:
print(confusion_matrix(y_not_bad_real, y_not_bad_hat))
res = confusion_matrix(y_not_bad_real, y_not_bad_hat).ravel()
if len(res) > 1:
return res
return res[0], 0, 0, 0
def calc_best_accuracy(Ytest, Yhat, selected_value=clabel.AMMONIA):
max_val = 0
best_i = 0
for i in range(1, 100):
tn, fp, fn, tp = calc_tp_fp_rate(
Ytest,
(Yhat > 0.01 * i).astype(int),
selected_value=selected_value,
binary=True,
graph=False,
)
accuracy = (tn + tp) / (tn + fp + fn + tp)
if accuracy > max_val:
max_val = accuracy
best_i = i
calc_tp_fp_rate(
Ytest,
(Yhat > 0.01 * best_i).astype(int),
selected_value=selected_value,
binary=True,
graph=True,
)
return max_val
def roc(Ytest, Yhat, graph=False):
fpr, tpr, threshold = roc_curve(Ytest, Yhat)
roc_auc = auc(fpr, tpr)
# method I: plt
if graph:
pyplot.title("Receiver Operating Characteristic")
pyplot.plot(fpr, tpr, "b", label="AUC = %0.2f" % roc_auc)
pyplot.legend(loc="lower right")
pyplot.plot([0, 1], [0, 1], "r--")
pyplot.xlim([0, 1])
pyplot.ylim([0, 1])
pyplot.ylabel("True Positive Rate")
pyplot.xlabel("False Positive Rate")
pyplot.show()
return fpr, tpr, threshold, roc_auc
def calc_histogram(Ytest, Yhat):
plt.figure(figsize=(15, 4))
plt.hist(Ytest.flatten(), bins=100, color="orange", alpha=0.5, label="pred")
plt.hist(Yhat.flatten(), bins=100, color="green", alpha=0.5, label="true")
plt.legend()
plt.title("value distribution")
plt.show()
def calc_precision_recall(Ytest, Yhat, threshold=0.002, graph=True):
lr_precision, lr_recall, _ = precision_recall_curve(Ytest, Yhat)
try:
lr_f1 = f1_score(Ytest, (Yhat > threshold).astype(int))
except:
traceback.print_exc()
lr_f1 = 1
lr_f1, lr_auc = lr_f1, auc(lr_recall, lr_precision)
if graph:
pyplot.title("Receiver Operating Characteristic")
pyplot.plot(
lr_recall,
lr_precision,
"b",
label="F1 = %0.2f , AUC = %0.2f" % (lr_f1, lr_auc),
)
pyplot.legend(loc="lower right")
pyplot.xlim([0, 1])
pyplot.ylim([0, 1])
pyplot.ylabel("Precision")
pyplot.xlabel("Recall")
pyplot.show()
return lr_f1, lr_auc
| 28.203822
| 80
| 0.613144
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 382
| 0.086269
|
b1f0550c1843ad31adf65e89fa5211ad4acfccfc
| 1,328
|
py
|
Python
|
tests/iterators/pull_test.py
|
SSouik/pyutil
|
d2250fb585679e49eb9056a3051bf239a58c2e8b
|
[
"MIT"
] | null | null | null |
tests/iterators/pull_test.py
|
SSouik/pyutil
|
d2250fb585679e49eb9056a3051bf239a58c2e8b
|
[
"MIT"
] | 21
|
2022-01-05T04:51:33.000Z
|
2022-01-28T05:45:57.000Z
|
tests/iterators/pull_test.py
|
SSouik/pyutil
|
d2250fb585679e49eb9056a3051bf239a58c2e8b
|
[
"MIT"
] | null | null | null |
import pytest
from pyutil import pull
def test_pull_with_empty_list():
lst = []
pull(lst, 1)
expected = []
assert lst == expected
def test_pull_when_list_has_values():
lst = [1, 2, 3, 4, 5]
pull(lst, 2)
expected = [1, 3, 4, 5]
assert lst == expected
def test_pull_when_list_has_values_2():
lst = [1, 2, 3, 4, 5]
pull(lst, 2, 3, 4)
expected = [1, 5]
assert lst == expected
def test_pull_when_list_has_values_3():
lst = [1, 2, 3, 4, 5]
pull(lst, 1, 4)
expected = [2, 3, 5]
assert lst == expected
def test_pull_when_list_has_values_4():
lst = [{"foo": 1, "bar": 2}, {"abc": 1, "def": 2}]
pull(lst, {"foo": 1, "bar": 2})
expected = [{"abc": 1, "def": 2}]
assert lst == expected
def test_pull_when_list_has_duplicate_values():
lst = [1, 2, 2, 3, 4, 4, 5]
pull(lst, 2)
expected = [1, 3, 4, 4, 5]
assert lst == expected
def test_pull_when_list_has_duplicate_values_2():
lst = [1, 2, 2, 3, 4, 4, 5]
pull(lst, 2, 3, 4)
expected = [1, 5]
assert lst == expected
def test_pull_when_list_has_duplicate_values_3():
lst = [1, 2, 2, 3, 4, 4, 5]
pull(lst, 2, 4)
expected = [1, 3, 5]
assert lst == expected
def test_pull_seq_is_not_a_list():
with pytest.raises(TypeError):
pull("foo", 1)
| 20.75
| 54
| 0.582831
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.033886
|
b1f203c60f7518be9918994e126f2868a0f76ed4
| 30,681
|
py
|
Python
|
main.py
|
RohiBaner/Beijing-Air-Quality-Prediction
|
4ec823ceacef1b61e1c1e5689a97a1335e4b5867
|
[
"MIT"
] | 3
|
2019-09-23T10:04:05.000Z
|
2021-03-10T12:12:28.000Z
|
main.py
|
RohiBaner/Beijing-Air-Quality-Prediction
|
4ec823ceacef1b61e1c1e5689a97a1335e4b5867
|
[
"MIT"
] | null | null | null |
main.py
|
RohiBaner/Beijing-Air-Quality-Prediction
|
4ec823ceacef1b61e1c1e5689a97a1335e4b5867
|
[
"MIT"
] | null | null | null |
''' --------------------------------------------IMPORTING NECESSARY LIBRARIES------------------------------------------- '''
import numpy as np
import pandas as pd
from math import radians, cos, sin, asin, sqrt
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import KFold
from itertools import cycle
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import time
start_time = time.time()
pd.options.mode.chained_assignment = None # default='warn'
''' ---------------------------FUNCTIONS TO FIND NEAREST DISTANCE BETWEEN ALL NECESSARY STATIONS------------------------ '''
# Function to find nearest station between two points using Haversine Distance
def haversine_dist(lon1, lat1, lon2, lat2):
# Calculate the great circle distance between two points on the earth
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) # Convert to radians
# Haversine distance formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 6371 #Radius of earth in kilometers
return c * r
# Find nearest AQ to AQ station
def near_aq_to_aq(lat, long):
distances = station_aq.apply(lambda row: haversine_dist(lat, long, row['latitude'], row['longitude']), axis=1)
distance = distances[distances!=0]
return station_aq.loc[distance.idxmin(), 'station']
# Find nearest GW to GW station
def near_gw_to_gw(lat, long):
distances = gw_station.apply(lambda row: haversine_dist(lat, long, row['latitude'], row['longitude']), axis=1)
distance = distances[distances!=0]
return gw_station.loc[distance.idxmin(), 'station_id']
# Find nearest OBW to OBW station
def near_obw_to_obw(lat, long):
distances = obw_station.apply(lambda row: haversine_dist(lat, long, row['latitude'], row['longitude']), axis=1)
distance = distances[distances!=0]
return obw_station.loc[distance.idxmin(), 'station_id']
# Find nearest AQ to OBW station
def near_aq_to_obw(lat, long):
distances = obw_station.apply(lambda row: haversine_dist(lat, long, row['latitude'], row['longitude']), axis=1)
return obw_station.loc[distances.idxmin(), 'station_id']
# Find nearest AQ to GW station
def near_aq_to_gw(lat, long):
distances = gw_station.apply(lambda row: haversine_dist(lat, long, row['latitude'], row['longitude']), axis=1)
return gw_station.loc[distances.idxmin(), 'station_id']
# Function to calculate the model error via SMAPE
def smape(actual, predicted):
dividend= np.abs(np.array(actual) - np.array(predicted))
denominator = np.array(actual) + np.array(predicted)
return 2 * np.mean(np.divide(dividend, denominator, out=np.zeros_like(dividend), where=denominator!=0, casting='unsafe'))
''' ------------------------------------------TRAIN: AIR QUALITY PREPROCESSING------------------------------------------ '''
print('Preprocessing and cleaning the train Air Quality Dataset!')
# Read all the air quality datasets
aq_2017 = pd.read_csv("airQuality_201701-201801.csv")
aq_2018 = pd.read_csv("airQuality_201802-201803.csv")
aq_2018a = pd.read_csv("aiqQuality_201804.csv")
# Renaming the header of April AQ dataset to match the other AQ datasets
aq_2018a.rename(columns={'station_id': 'stationId', 'time': 'utc_time', 'PM25_Concentration':'PM2.5'\
,'PM10_Concentration':'PM10','NO2_Concentration':'NO2'\
,'CO_Concentration':'CO', 'O3_Concentration':'O3'\
,'SO2_Concentration':'SO2'}, inplace=True)
aq_2018a= aq_2018a.drop(columns=['id'], axis=1)
# Merge all AQ datasets together into a single dataframe
aq_train = aq_2017.append(aq_2018, ignore_index=True)
aq_train = aq_train.append(aq_2018a, ignore_index=True)
# Convert the entire 'utc_time' column into the same format
aq_train["utc_time"] = pd.to_datetime(aq_train["utc_time"])
# Delete unnecessary dataframes to save space
del(aq_2017)
del(aq_2018)
del(aq_2018a)
# Set the time column as the index of the dataframe
aq_train.set_index("utc_time", inplace = True)
# Get the entire span of the time in the AQ dataframe
min_date=aq_train.index.min()
max_date=aq_train.index.max()
# Drop any duplicates present in the AQ dataframe
aq_train.drop_duplicates(subset= None, keep= "first", inplace= True)
# Read the AQ station location file and find nearest station for each AQ station
# This dataset was created by us
station_aq = pd.read_csv("Beijing_AirQuality_Stations.csv")
station_aq["nearest_station"] = station_aq.apply(lambda row: near_aq_to_aq(row['latitude'], row['longitude']), axis=1)
# Create an empty dataframe with all hourly time stamps in the above found range
time_hours = pd.DataFrame({"date": pd.date_range(min_date, max_date, freq='H')})
# Perform a cartesian product of all AQ stations and the above dataframe
aq_all_time = pd.merge(time_hours.assign(key=0), station_aq.assign(key=0), on='key').drop('key', axis=1)
# Join the AQ dataset with the dataframe containing all the timestamps for each AQ station
aq_train1 = pd.merge(aq_train, aq_all_time, how='right', left_on=['stationId','utc_time'], right_on = ['station','date'])
aq_train1 = aq_train1.drop('stationId', axis=1)
aq_train1.drop_duplicates(subset= None, keep= "first", inplace= True)
# Create a copy of the above dataframe keeping all required columns
# This dataframe will be used to refer all data for the nearest AQ station (same time interval)
aq_train_copy = aq_train1.copy()
aq_train_copy = aq_train_copy.drop(['nearest_station','longitude', 'latitude', 'type'], axis=1)
aq_train_copy.rename(columns={'PM2.5': 'n_PM2.5','PM10': 'n_PM10', "NO2":"n_NO2","CO":"n_CO","O3":"n_O3",
"SO2":"n_SO2", "date":"n_date", "station":"n_station" }, inplace=True)
# Merge original AQ data and the copy AQ data to get all attributes of a particular AQ station and its nearest AQ station
aq_train2 = pd.merge(aq_train1, aq_train_copy, how='left', left_on=['nearest_station','date'], right_on = ['n_station','n_date'])
# Sort the final dataframe based on AQ station and then time
aq_train2 = aq_train2.sort_values(by=['n_station', 'date'], ascending=[True,True])
aq_train2 = aq_train2.reset_index(drop=True)
# Drop all unncessary attributes
aq_train2.drop(['n_station', 'longitude', 'latitude', 'n_date'], axis=1, inplace=True)
# Create two attributes - month and hour
aq_train2['month'] = pd.DatetimeIndex(aq_train2['date']).month
aq_train2['hour'] = pd.DatetimeIndex(aq_train2['date']).hour
# Fill in missing values of attributes with their corresponding values in the nearest AQ station (within same time)
aq_train2['PM10'].fillna(aq_train2['n_PM10'], inplace=True)
aq_train2['PM2.5'].fillna(aq_train2['n_PM2.5'], inplace=True)
aq_train2['NO2'].fillna(aq_train2['n_NO2'], inplace=True)
aq_train2['CO'].fillna(aq_train2['n_CO'], inplace=True)
aq_train2['O3'].fillna(aq_train2['n_O3'], inplace=True)
aq_train2['SO2'].fillna(aq_train2['n_SO2'], inplace=True)
# Fill in any remaining missing value by the mean of the attribute within the same station, month and hour
aq_train2[['PM2.5', 'PM10', 'NO2', 'CO', 'O3', 'SO2']] = aq_train2.groupby(["station","month","hour"])[['PM2.5', 'PM10', 'NO2', 'CO', 'O3', 'SO2']].transform(lambda x: x.fillna(x.mean()))
# Create final AQ dataset after dropping all unnecessary attributes
aq_train_final = aq_train2.drop(['type','nearest_station','n_PM2.5','n_PM10','n_NO2','n_CO','n_O3','n_SO2'],axis=1)
# Delete unnecessary dataframes to save space
del(aq_train1)
del(aq_train2)
del(aq_train_copy)
del(aq_all_time)
print('Done!')
print('-'*50)
''' ------------------------------------------TRAIN: GRID DATASET PREPROCESSING------------------------------------------ '''
print('Preprocessing and cleaning the train Grid Weather Dataset!')
# Read all the grid weather train datasets
gw_2017 = pd.read_csv("gridWeather_201701-201803.csv")
gw_2018 = pd.read_csv("gridWeather_201804.csv")
# Renaming the headers of the GW data to match each other
gw_2017.rename(columns={'stationName': 'station_id', 'wind_speed/kph': 'wind_speed'}, inplace=True)
gw_2018.rename(columns={'station_id':'station_id', 'time':'utc_time'}, inplace=True)
# Merge all GW train datasets into a single dataframe
gw_train = gw_2017.append(gw_2018, ignore_index=True)
gw_train = gw_train.drop(columns=['id','weather'], axis=1)
# Delete unnecessary dataframes to save space
del(gw_2017)
del(gw_2018)
# Set the time column as the index of the dataframe
gw_train.set_index("utc_time", inplace = True)
# Get the entire span of the time in the GW dataframe
min_date = gw_train.index.min()
max_date = gw_train.index.max()
# Drop any duplicates present in the GW dataframe
gw_train.drop_duplicates(subset= None, keep= "first", inplace= True)
# Read the GW station location file and find nearest station for each GW station
gw_station = pd.read_csv("Beijing_grid_weather_station.csv", header=None, names=['station_id','latitude','longitude'])
gw_station["nearest_station"] = gw_station.apply(lambda row: near_gw_to_gw(row['latitude'], row['longitude']), axis=1)
# Create an empty dataframe with all hourly time stamps in the above found range
gw_time_hours = pd.DataFrame({"time": pd.date_range(min_date, max_date, freq='H')})
# Perform a cartesian product of all GW stations and the above dataframe
gw_all_time = pd.merge(gw_time_hours.assign(key=0), gw_station.assign(key=0), on='key').drop('key', axis=1)
gw_all_time['time'] = gw_all_time['time'].astype(str) # Make all time stamps in the same format
# Join the GW dataset with the dataframe containing all the timestamps for each GW station
gw_train1 = pd.merge(gw_train, gw_all_time, how='right', left_on=['station_id','utc_time'], right_on = ['station_id','time'])
gw_train1.drop_duplicates(subset= None, keep= "first", inplace= True)
# Create a copy of the above dataframe keeping all required columns
# This dataframe will be used to refer all data for the nearest GW station (same time interval)
gw_train_copy = gw_train1.copy()
gw_train_copy.drop(['nearest_station','longitude_x', 'latitude_y','latitude_x','longitude_y'], axis=1, inplace=True)
gw_train_copy.rename(columns={'humidity': 'n_humidity','pressure': 'n_pressure', "temperature":"n_temperature",\
"wind_direction":"n_wind_dir","wind_speed":"n_wind_speed",\
"time":"n_time", "station_id":"n_station_id" }, inplace=True)
# Merge original GW data and the copy GW data to get all attributes of a particular GW station and its nearest GW station
gw_train2 = pd.merge(gw_train1, gw_train_copy, how='left', left_on=['nearest_station','time'], right_on = ['n_station_id','n_time'])
# Sort the final dataframe based on GW station and then time
gw_train2 = gw_train2.sort_values(by=['station_id', 'time'], ascending=[True,True])
gw_train2 = gw_train2.reset_index(drop=True)
# Drop all unncessary attributes
gw_train2.drop(['n_station_id', 'n_time','longitude_x', 'latitude_y','latitude_x','longitude_y'], axis=1, inplace=True)
# Create two attributes - month and hour
gw_train2['month'] = pd.DatetimeIndex(gw_train2['time']).month
gw_train2['hour'] = pd.DatetimeIndex(gw_train2['time']).hour
# Fill in missing values of attributes with their corresponding values in the nearest GW station (within same time)
gw_train2['humidity'].fillna(gw_train2['n_humidity'], inplace=True)
gw_train2['pressure'].fillna(gw_train2['n_pressure'], inplace=True)
gw_train2['temperature'].fillna(gw_train2['n_temperature'], inplace=True)
gw_train2['wind_speed'].fillna(gw_train2['n_wind_speed'], inplace=True)
gw_train2['wind_direction'].fillna(gw_train2['n_wind_dir'], inplace=True)
# Fill in any remaining missing value by the mean of the attribute within the same station, month and hour
gw_train2[['humidity', 'pressure', 'temperature', 'wind_direction', 'wind_speed']] = gw_train2.groupby(["station_id","month","hour"])[['humidity', 'pressure', 'temperature', 'wind_direction', 'wind_speed']].transform(lambda x: x.fillna(x.mean()))
# Create final GW dataset after dropping all unnecessary attributes
gw_train_final = gw_train2.drop(['nearest_station','n_humidity','n_pressure','n_temperature','n_wind_dir','n_wind_speed'],axis=1)
# Delete unnecessary dataframes to save space
del(gw_train1)
del(gw_train2)
del(gw_train_copy)
del(gw_all_time)
print('Done!')
print('-'*50)
''' -----------------------------------TRAIN: OBSERVED WEATHER DATASET PREPROCESSING------------------------------------ '''
print('Preprocessing and cleaning the train Observed Weather Dataset!')
# Read all the observed weather train datasets
obw_2017 = pd.read_csv("observedWeather_201701-201801.csv")
obw_2018 = pd.read_csv("observedWeather_201802-201803.csv")
obw_2018a = pd.read_csv("observedWeather_201804.csv")
obw_2018a.rename(columns={'time': 'utc_time'}, inplace=True)
# Read the time stamp in the April observed weather data in the same format as the other datasets
#obw_2018a['utc_time'] = pd.to_datetime(obw_2018a['utc_time'], format='%d-%m-%Y %H:%M:%S')
obw_2018a['utc_time'] = obw_2018a['utc_time'].astype(str)
# Merge all OBW train datasets into a single dataframe
obw_train = obw_2017.append(obw_2018, ignore_index=True)
obw_train = obw_train.append(obw_2018a, ignore_index=True)
obw_train.drop(['id','weather'],axis=1, inplace=True) # Drop unnecessary columns
# Delete unnecessary dataframes to save space
del(obw_2017)
del(obw_2018)
del(obw_2018a)
# Set the time column as the index of the dataframe
obw_train.set_index("utc_time", inplace = True)
# Get the entire span of the time in the OBW dataframe
min_date = obw_train.index.min()
max_date = obw_train.index.max()
# Drop any duplicates present in the OBW dataframe
obw_train.drop_duplicates(subset= None, keep= "first", inplace= True)
# Read the OBW station location file
obw_station = obw_train[["station_id","latitude","longitude"]]
obw_station = obw_station.drop_duplicates().dropna()
obw_station = obw_station.reset_index(drop=True)
# Find nearest station for each OBW station
obw_station["nearest_station"] = obw_station.apply(lambda row: near_obw_to_obw(row['latitude'], row['longitude']), axis=1)
# Create an empty dataframe with all hourly time stamps in the above found range
obw_time_hours = pd.DataFrame({"time": pd.date_range(min_date, max_date, freq='H')})
# Perform a cartesian product of all OBW stations and the above dataframe
obw_all_time = pd.merge(obw_time_hours.assign(key=0), obw_station.assign(key=0), on='key').drop('key', axis=1)
obw_all_time['time'] = obw_all_time['time'].astype(str) # Make all time stamps in the same format
# Join the OBW dataset with the dataframe containing all the timestamps for each OBW station
obw_train1 = pd.merge(obw_train, obw_all_time, how='right', left_on=['station_id','utc_time'], right_on = ['station_id','time'])
obw_train1.drop_duplicates(subset= None, keep= "first", inplace= True)
# Create a copy of the above dataframe keeping all required columns
# This dataframe will be used to refer all data for the nearest OBW station (same time interval)
obw_train_copy = obw_train1.copy()
obw_train_copy.drop(['nearest_station','longitude_x', 'latitude_x','longitude_y', 'latitude_y'], axis=1, inplace=True)
obw_train_copy.rename(columns={'humidity': 'n_humidity','pressure': 'n_pressure', "temperature":"n_temperature",\
"wind_direction":"n_wind_dir","wind_speed":"n_wind_speed",\
"time":"n_time", "station_id":"n_station_id" }, inplace=True)
# Merge original OBW data and the copy OBW data to get all attributes of a particular OBW station and its nearest OBW station
obw_train2 = pd.merge(obw_train1, obw_train_copy, how='left', left_on=['nearest_station','time'], right_on = ['n_station_id','n_time'])
# Sort the final dataframe based on OBW station and then time
obw_train2 = obw_train2.sort_values(by=['station_id', 'time'], ascending=[True,True] )
obw_train2.drop(['n_station_id', 'n_time'], axis=1, inplace=True)
obw_train2 = obw_train2.reset_index(drop=True)
# Create two attributes - month and hour
obw_train2['month'] = pd.DatetimeIndex(obw_train2['time']).month
obw_train2['hour'] = pd.DatetimeIndex(obw_train2['time']).hour
# Fill in missing values of attributes with their corresponding values in the nearest OBW station (within same time)
obw_train2['humidity'].fillna(obw_train2['n_humidity'], inplace=True)
obw_train2['pressure'].fillna(obw_train2['n_pressure'], inplace=True)
obw_train2['temperature'].fillna(obw_train2['n_temperature'], inplace=True)
obw_train2['wind_speed'].fillna(obw_train2['n_wind_speed'], inplace=True)
obw_train2['wind_direction'].fillna(obw_train2['n_wind_dir'], inplace=True)
# Fill in any remaining missing value by the mean of the attribute within the same station, month and hour
obw_train2[['humidity', 'pressure', 'temperature', 'wind_direction', 'wind_speed']] = obw_train2.groupby(["station_id","month","hour"])[['humidity', 'pressure', 'temperature', 'wind_direction', 'wind_speed']].transform(lambda x: x.fillna(x.mean()))
# Create final OBW dataset after dropping all unnecessary attributes
obw_train_final = obw_train2.drop(['longitude_x', 'latitude_x','longitude_y', 'latitude_y','nearest_station',\
'n_humidity','n_pressure','n_temperature','n_wind_dir','n_wind_speed'],axis=1)
# Delete unnecessary dataframes to save space
del(obw_train1)
del(obw_train2)
del(obw_train_copy)
del(obw_all_time)
print('Done!')
print('-'*50)
''' --------------------------MERGING ALL TRAINING DATASETS AND GETTING READY FOR MODEL TRAINING------------------------- '''
aq_train_final['date'] = aq_train_final['date'].astype(str)
print('Getting the training model ready!')
# Convert wind speed in grid weather data from kmph to m/s (observed weather data is already in m/s)
gw_train_final['wind_speed'] = (gw_train_final['wind_speed']*5)/18
# Make all start and end times equal for the training datasets
gw_train_final = gw_train_final[gw_train_final['time']>='2017-01-30 16:00:00']
aq_train_final = aq_train_final[aq_train_final['date']>='2017-01-30 16:00:00']
# Replace noise values with previous hours value in both Observed and Grid datasets
obw_train_final.replace(999999,np.NaN,inplace=True)
obw_train_final[['humidity', 'pressure','temperature','wind_direction','wind_speed']] = obw_train_final[['humidity', 'pressure','temperature','wind_direction','wind_speed']].fillna(method='ffill')
gw_train_final.replace(999999,np.NaN,inplace=True)
gw_train_final[['humidity', 'pressure','temperature','wind_direction','wind_speed']] = gw_train_final[['humidity', 'pressure','temperature','wind_direction','wind_speed']].fillna(method='ffill')
# Replace wind direction with the noise value '999017' when wind speed is less than 0.5m/s
# This value will then be replaced with data from the nearest observed or grid station for the same timestamp
obw_train_final.loc[obw_train_final.wind_speed < 0.5, 'wind_direction'] = 999017
gw_train_final.loc[gw_train_final.wind_speed < 0.5, 'wind_direction'] = 999017
# Find nearest OBW and GW station for every AQ station for proper joining of attributes
obw_station.drop(['nearest_station'],axis=1, inplace=True)
station_aq["near_obw"] = station_aq.apply(lambda row: near_aq_to_obw(row['latitude'], row['longitude']), axis=1)
gw_station.drop(['nearest_station'],axis=1, inplace=True)
station_aq["near_gw"] = station_aq.apply(lambda row: near_aq_to_gw(row['latitude'], row['longitude']), axis=1)
# Merge the AQ training dataset with the nearest OBW and GW stations for every time stamp
aq_train1 = pd.merge(aq_train_final, station_aq, how='left', on='station')
aq_train1.drop(['type','nearest_station'], axis=1, inplace=True)
# Append all GW data attributes with the AQ training set based on nearest GW station and time stamp
aq_train2 = pd.merge(aq_train1, gw_train_final, how='left', left_on=['near_gw','date'], right_on=['station_id','time'])
# Remove unnecessary columns and rename columns to prepare for merging of OBW data
aq_train2.drop(['station_id','time','month_y','hour_y'],axis=1, inplace=True)
aq_train2 = aq_train2.rename(columns={'month_x': 'month_aq', 'hour_x': 'hour_aq', 'longitude':'longitude_aq',\
'latitude':'latitude_aq', 'humidity': 'humidity_gw','pressure': 'pressure_gw',\
'wind_direction': 'wind_dir_gw', 'wind_speed':'wind_speed_gw',\
'temperature': 'temperature_gw'})
# Append all OBW data attributes with the AQ training set based on nearest OBW station and time stamp
TRAIN = pd.merge(aq_train2, obw_train_final, how='left', left_on=['near_obw','date'], right_on=['station_id','time'])
TRAIN.drop(['station_id','time','month','hour'],axis=1, inplace=True)
TRAIN = TRAIN.rename(columns={'humidity': 'humidity_obw','pressure': 'pressure_obw',\
'wind_direction': 'wind_dir_obw', 'wind_speed':'wind_speed_obw',\
'temperature': 'temperature_obw'})
# Final clean of all 999017 noise from the OBW and GW for wind direction
TRAIN.loc[TRAIN.wind_dir_gw == 999017, 'wind_dir_gw'] = TRAIN['wind_dir_obw']
TRAIN.loc[TRAIN.wind_dir_obw == 999017, 'wind_dir_obw'] = TRAIN['wind_dir_gw']
# Some observed data points are very outliers (probably wrongly noted by humans)
TRAIN.loc[TRAIN.humidity_obw > 100, 'humidity_obw'] = TRAIN['humidity_gw']
TRAIN.loc[TRAIN.pressure_obw > 1040, 'pressure_obw'] = TRAIN['pressure_gw']
TRAIN.loc[TRAIN.temperature_obw > 50, 'temperature_obw'] = TRAIN['temperature_gw']
TRAIN.loc[TRAIN.wind_dir_obw > 360, 'wind_dir_obw'] = TRAIN['wind_dir_gw']
TRAIN.loc[TRAIN.wind_speed_obw > 20, 'wind_speed_obw'] = TRAIN['wind_speed_gw']
# Sort the final train set based on station and then timestamp
TRAIN = TRAIN.sort_values(by=['station', 'date'], ascending=[True,True])
print('Ready to be trained by the model!')
print('-'*50)
''' ----------------------TEST DATA: CLEANING, PREPROCESSING AND GETTING READY FOR MODEL-------------------------------- '''
print('Getting the testing data ready for the model!')
# Read the AQ test dataset for test data - This dataset was found from the Beijing meteorological datasets
# This dataset helps in getting the values for the NO2, SO2 and CO attributes for the test data timestamps
test_aq = pd.read_csv('MAY_AQ.csv')
test_aq['Time'] = pd.to_datetime(test_aq['Time'], format='%d-%m-%Y %H:%M')
test_aq['Time'] = test_aq['Time'].astype(str)
# Merge the dataset with nearest GW and OBW stations with the AQ test dataset
test1 = pd.merge(test_aq, station_aq, how='left', left_on='station_id', right_on='station').drop(['station','longitude','latitude','type','nearest_station','AQI'],axis=1)
# Find time stamp range for test data: from 1st May 00:00 to 2nd May 23:00
test1.set_index("Time", inplace = True)
min_date_test = test1.index.min()
max_date_test = test1.index.max()
test1.reset_index(inplace=True)
# Grid Test Data Preprocessing
test_gw = pd.read_csv('gridWeather_20180501-20180502.csv') # Read GW test data
test_gw.drop(['id','weather'],axis=1, inplace=True)
# Create new dataframe with all timestamps for all GW stations
test_gw1 = pd.DataFrame({"time": pd.date_range(min_date_test, max_date_test, freq='H')})
test_gw2 = pd.merge(test_gw1.assign(key=0), gw_station.assign(key=0), on='key').drop('key', axis=1)
test_gw2['time'] = test_gw2['time'].astype(str) # Convert time in correct format
gw_test_final = pd.merge(test_gw2, test_gw, how='left', left_on=['station_id','time'], right_on = ['station_id','time'])
# Observed Test Data Preprocessing
test_obw = pd.read_csv('observedWeather_20180501-20180502.csv') # Read OBW test data
test_obw.drop(['id','weather'],axis=1, inplace=True)
# Create new dataframe with all timestamps for all OBW stations
test_obw1 = pd.DataFrame({"time": pd.date_range(min_date, max_date, freq='H')})
test_obw2 = pd.merge(test_obw1.assign(key=0), obw_station.assign(key=0), on='key').drop('key', axis=1)
test_obw2['time'] = test_obw2['time'].astype(str) # Convert time in correct format
obw_test_final = pd.merge(test_obw2, test_obw, how='left', left_on=['station_id','time'], right_on = ['station_id','time'])
# Join AQ Test dataframe with test GW dataframe
test_aq1 = pd.merge(test1, gw_test_final, how='left', left_on=['near_gw','Time'], right_on=['station_id','time'])
test_aq1.drop(['station_id_y','latitude','longitude'],axis=1, inplace=True)
# Rename certain columns to prepare for joining the OBW test dataframe
test_aq1 = test_aq1.rename(columns={'station_id_x':'station_id_aq',\
'humidity': 'humidity_gw',\
'pressure': 'pressure_gw',\
'wind_direction': 'wind_dir_gw',\
'wind_speed':'wind_speed_gw',\
'temperature': 'temperature_gw'})
# Join the updated AQ Test dataframe with test OBW dataframe
TEST = pd.merge(test_aq1, obw_test_final, how='left', left_on=['near_obw','time'], right_on=['station_id','time'])
TEST.drop(['station_id','latitude','longitude','time'],axis=1, inplace=True)
# Rename certain columns
TEST = TEST.rename(columns={'humidity': 'humidity_obw',\
'pressure': 'pressure_obw',\
'wind_direction': 'wind_dir_obw',\
'wind_speed':'wind_speed_obw',\
'temperature': 'temperature_obw'})
# Create attributes for month and hour - to be taken as input parameters
TEST['month'] = pd.DatetimeIndex(TEST['Time']).month
TEST['hour'] = pd.DatetimeIndex(TEST['Time']).hour
# Remove missing values based on nearest GW data (as very few values are missing in OBW data)
TEST = TEST.sort_values(by=['station_id_aq', 'Time'], ascending=[True,True])
TEST['humidity_obw'] = TEST['humidity_obw'].fillna(TEST['humidity_gw'])
TEST['temperature_obw'] = TEST['temperature_obw'].fillna(TEST['temperature_gw'])
TEST['pressure_obw'] = TEST['pressure_obw'].fillna(TEST['pressure_gw'])
TEST['wind_speed_obw'] = TEST['wind_speed_obw'].fillna(TEST['wind_speed_gw'])
TEST['wind_dir_obw'] = TEST['wind_dir_obw'].fillna(TEST['wind_dir_gw'])
# Take care of noise 999017 when wind speed is less than 0.5m/s
TEST.loc[TEST.wind_dir_gw == 999017, 'wind_dir_gw'] = TEST['wind_dir_obw']
TEST.loc[TEST.wind_dir_obw == 999017, 'wind_dir_obw'] = TEST['wind_dir_gw']
print('Ready to be tested by the model!')
''' ---------------------------------TRAINING THE MODEL AND PREDICTING REQUIRED OUTPUT----------------------------------- '''
# Train the model with only April, May and June's data
TRAIN = TRAIN.loc[TRAIN['month_aq'].isin([4,5,6])]
# Extract output columns for training the model
Y = TRAIN[['PM2.5','PM10','O3']].values
# Input parameters for the model
X = TRAIN.drop(['PM2.5','PM10','O3','latitude_aq','longitude_aq'], axis=1)
# Create new features for the model
X['AQ'] = (X['SO2']*X['NO2']*X['CO'])
X['wind'] = X['wind_dir_gw']/X['wind_speed_gw']
# Final input parameters after feature engineering
X_train = X[['station','month_aq','hour_aq','temperature_gw','AQ','humidity_gw','wind','pressure_gw']].values
# One Hot encode the station column and normalize the entire input data
le = LabelEncoder()
ohe = OneHotEncoder(categorical_features=[0])
scaler = MinMaxScaler()
X_train[:,0] = le.fit_transform(X_train[:,0])
X_train = ohe.fit_transform(X_train).toarray()
X_train_sc = scaler.fit_transform(X_train)
# Use Random Forest Regressor to predict the values
model_rf = RandomForestRegressor(random_state=42)
# Use K Fold Cross Validation to check the efficiency of the model
print('-------Printing the Cross Validation SMAPE errors-------')
kf = KFold(n_splits=10, shuffle=True, random_state=42)
for train_index, test_index in kf.split(X_train_sc):
x_train, x_val = X_train_sc[train_index], X_train_sc[test_index]
y_train, y_val = Y[train_index], Y[test_index]
model_rf.fit(x_train, y_train)
pred_val = model_rf.predict(x_val)
print(smape(y_val,pred_val))
# Get the Test data ready for the model by following the above steps
TEST['AQ'] = (TEST['CO']*TEST['SO2']*TEST['NO2'])
TEST['wind'] = TEST['wind_dir_gw']/TEST['wind_speed_gw']
# Final test data input features
X_test = TEST[['station_id_aq','month','hour','temperature_gw','AQ','humidity_gw','wind','pressure_gw']].values
# One hot encode and normalize similair to train data
X_test[:,0] = le.transform(X_test[:,0])
X_test = ohe.transform(X_test).toarray()
X_test_sc = scaler.transform(X_test)
# Predict the results after training the model on the whole final train dataset
model_rf.fit(X_train_sc,Y)
pred = model_rf.predict(X_test_sc)
''' --------------------------EXPORTING THE PREDICTED RESULTS INTO THE SPECIFIED FORMAT---------------------------------- '''
index_test = TEST[['station_id_aq']]
index = list(range(0,48)) # Create a list with all the values in the range (each for one hour over a period of two days)
# Turn the above numbers into a continuous cycle
index1 = cycle(index)
index_test['index'] = [next(index1) for i in range(len(index_test))]
# Create a column with all 35 AQ station names and all time indexes
index_test['test_id'] = index_test['station_id_aq']+'#'+index_test['index'].astype(str)
# Extract the required column and join it with the predicted output
# Both test and train data are sorted by station name and time - hence predicted output will be in arranged order
index_test.drop(['index','station_id_aq'],axis=1, inplace=True)
index_test1 = index_test.values
output = np.concatenate((index_test1, pred), axis=1)
np.savetxt('submission.csv', output, delimiter=',', header='test_id,PM2.5,PM10,O3', fmt='%s,%f,%f,%f', comments='')
print('The code is complete - please find your results in the "submission.csv" file!')
print("--- %s seconds ---" % (time.time() - start_time))
'''-------------------------------------------------------END-------------------------------------------------------------'''
| 57.671053
| 249
| 0.702031
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 16,350
| 0.532903
|
b1f22c9adbe507763be9a3e8cffbcec89c6b45a4
| 234
|
py
|
Python
|
examples/SortTimeDemo.py
|
Ellis0817/Introduction-to-Programming-Using-Python
|
1882a2a846162d5ff56d4d56c3940b638ef408bd
|
[
"MIT"
] | null | null | null |
examples/SortTimeDemo.py
|
Ellis0817/Introduction-to-Programming-Using-Python
|
1882a2a846162d5ff56d4d56c3940b638ef408bd
|
[
"MIT"
] | 4
|
2019-11-07T12:32:19.000Z
|
2020-07-19T14:04:44.000Z
|
examples/SortTimeDemo.py
|
Ellis0817/Introduction-to-Programming-Using-Python
|
1882a2a846162d5ff56d4d56c3940b638ef408bd
|
[
"MIT"
] | 5
|
2019-12-04T15:56:55.000Z
|
2022-01-14T06:19:18.000Z
|
import random
import time
n = eval(input("Enter the number of elements to sort: "))
lst = list(range(n))
random.shuffle(lst)
startTime = time.time()
lst.sort()
print("Sort time in Python is", int(time.time() - startTime), "seconds")
| 23.4
| 72
| 0.705128
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 73
| 0.311966
|
b1f274b6140c852afcbc6bb5b744a886df0fb5fe
| 102
|
py
|
Python
|
cloudflare/__init__.py
|
darylyu/cloudflare
|
be12ac9fa614a7078a89d7036f3a99e3165bd99d
|
[
"BSD-3-Clause"
] | 1
|
2015-05-03T12:51:44.000Z
|
2015-05-03T12:51:44.000Z
|
cloudflare/__init__.py
|
darylyu/cloudflare
|
be12ac9fa614a7078a89d7036f3a99e3165bd99d
|
[
"BSD-3-Clause"
] | 2
|
2015-12-17T00:47:01.000Z
|
2016-04-04T14:24:14.000Z
|
cloudflare/__init__.py
|
darylyu/cloudflare
|
be12ac9fa614a7078a89d7036f3a99e3165bd99d
|
[
"BSD-3-Clause"
] | 3
|
2015-09-13T22:43:54.000Z
|
2016-04-02T19:44:21.000Z
|
# -*- coding: utf-8 -*-
__author__ = 'Daryl Yu'
__email__ = 'dyu@fastmail.com'
__version__ = '0.0.2'
| 17
| 30
| 0.627451
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 58
| 0.568627
|
b1f5f177dec08c59abe32983e95271dfac01dbdf
| 1,239
|
py
|
Python
|
tests/conftest.py
|
andrewsayre/pysmartapp
|
5c3be867584d7e82d00b5998295b20bd12eccf94
|
[
"MIT"
] | 10
|
2019-02-07T20:07:10.000Z
|
2020-12-30T20:29:32.000Z
|
tests/conftest.py
|
andrewsayre/pysmartapp
|
5c3be867584d7e82d00b5998295b20bd12eccf94
|
[
"MIT"
] | 1
|
2021-12-05T15:00:13.000Z
|
2021-12-05T15:00:13.000Z
|
tests/conftest.py
|
andrewsayre/pysmartapp
|
5c3be867584d7e82d00b5998295b20bd12eccf94
|
[
"MIT"
] | 2
|
2020-10-17T20:20:45.000Z
|
2021-09-28T12:58:50.000Z
|
"""Define common test configuraiton."""
import pytest
from pysmartapp.dispatch import Dispatcher
from pysmartapp.smartapp import SmartApp, SmartAppManager
@pytest.fixture
def smartapp(event_loop) -> SmartApp:
"""Fixture for testing against the SmartApp class."""
app = SmartApp(dispatcher=Dispatcher(loop=event_loop))
app.name = 'SmartApp'
app.description = 'SmartApp Description'
app.permissions.append('l:devices')
app.config_app_id = 'myapp'
return app
@pytest.fixture
def manager(event_loop) -> SmartAppManager:
"""Fixture for testing against the SmartAppManager class."""
return SmartAppManager('/path/to/app',
dispatcher=Dispatcher(loop=event_loop))
@pytest.fixture
def handler():
"""Fixture handler to mock in the dispatcher."""
def target(*args, **kwargs):
target.fired = True
target.args = args
target.kwargs = kwargs
target.fired = False
return target
@pytest.fixture
def async_handler():
"""Fixture async handler to mock in the dispatcher."""
async def target(*args, **kwargs):
target.fired = True
target.args = args
target.kwargs = kwargs
target.fired = False
return target
| 26.361702
| 66
| 0.67958
| 0
| 0
| 0
| 0
| 1,070
| 0.8636
| 120
| 0.096852
| 318
| 0.256659
|
b1f8c5ac672b61358853182ee48a06e86cda8b9c
| 294
|
py
|
Python
|
to_do_list.py
|
GYosifov88/Python-Fundamentals
|
b46ba2822bd2dac6ff46830c6a520e559b448442
|
[
"MIT"
] | null | null | null |
to_do_list.py
|
GYosifov88/Python-Fundamentals
|
b46ba2822bd2dac6ff46830c6a520e559b448442
|
[
"MIT"
] | null | null | null |
to_do_list.py
|
GYosifov88/Python-Fundamentals
|
b46ba2822bd2dac6ff46830c6a520e559b448442
|
[
"MIT"
] | null | null | null |
todo_list = ["" for i in range(11)]
command = input()
while command != 'End':
task = command.split('-')
importance = int(task[0])
thing_to_do = task[1]
todo_list[importance] = thing_to_do
command = input()
final_list = [x for x in todo_list if x != ""]
print(final_list)
| 21
| 46
| 0.629252
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0.040816
|
b1fa447d2310139f7a8d64aba2e5e1395276502b
| 6,035
|
py
|
Python
|
run.py
|
Tracymbone/password_locker
|
346a3c770174d20fe24720fd4875f5f4e222d582
|
[
"MIT"
] | null | null | null |
run.py
|
Tracymbone/password_locker
|
346a3c770174d20fe24720fd4875f5f4e222d582
|
[
"MIT"
] | null | null | null |
run.py
|
Tracymbone/password_locker
|
346a3c770174d20fe24720fd4875f5f4e222d582
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3.8
from socket import create_server
from users import Users
from credentials import Credentials
def create_credentials(first_name, last_name, user_name, credential):
users = Users(first_name, last_name, user_name, credential)
return users
def save_user(users):
users.save_user()
def delete_users(users):
users.delete_users()
def find_users(user_name):
return Users.find_by_user_name(user_name)
def isexist_users(user_name):
return Users.users_exists(user_name)
def display_users():
return Users.display_users()
def create_page(page, credentials):
credentials = credentials(page, credentials)
return credentials
def save_page(credentials):
credentials.save_page()
def find_page(pager):
return Credentials.find_by_page(pager)
def isexist_page(pager):
return Credentials.page_exists(pager)
def delete_page(credential):
Credentials.delete_page()
def display_pages():
return Credentials.display_page()
def main():
print('WELCOME TO PASSWORD_LOCKER')
print('Use the following information to pick their corresponding values')
while True:
print(" 1) SIGN IN \n 2) REGESTER \n 3) ABOUT PASSWORD_LOCKER \n 4) DISPLAY USERS \n 5) SIGN OUT")
choice = int(input())
if choice == 1:
print('Enter username')
username = input()
print('Enter credential')
Credentials = input()
user = find_users(username)
if user.user_name == user_name and user.credentials == Credentials:
print('logged in ')
while True:
print(
f'Welcome {user_name}, Use the following numbers to select their corresponding values')
print(
' 1) Save new credential \n 2) Delete credential \n 3) Display saved credentials \n 4) Log out ')
log_choice = int(input())
if log_choice == 1:
print('New page')
print('*'*100)
print('Page name')
page = input()
print('credentials')
Credentials = input()
# created and saved page
save_page(create_page(page, Credentials))
elif log_choice == 2:
print("Enter the name of the page you want to delete")
page = input()
if isexist_page(page):
remove_page = (page)
delete_page(remove_page)
else:
print(f'I cant find {page}')
elif log_choice == 3:
if display_pages():
for pag in display_pages():
print(
f'{pag.page}:{pag.credential}'
)
else:
print('NO CREDENTIAL SAVED YET')
print('\n')
elif log_choice == 4:
print('adios')
break
else:
print('wrong credentials')
if choice == 2:
print('NEW USERS')
print('*'*100)
print('FIRSTNAME')
first_name = input()
print('LASTNAME')
last_name = input()
print('USERNAME')
user_name = input()
print('CREDENTIALS')
Credentials = input()
save_user((
first_name, last_name, user_name, Credentials))
# save and create a new user
print('USER FORMED')
while True:
print(
f'Welcome {user_name}, Use the following numbers to select their corresponding values')
print(
' 1) Save new credential \n 2) Delete credential \n 3) Display saved credential \n 4) Log out ')
log_choice = int(input())
if log_choice == 1:
print('New page')
print('*'*100)
print('Page name')
page = input()
print('credential')
Credentials = input()
# created and saved page
save_page(create_page(page, Credentials))
elif log_choice == 2:
print("Enter the name of the page you want to delete")
page = input()
if isexist_page(page):
remove_page = (page)
delete_page(remove_page)
else:
print(f'I cant find {page}')
elif log_choice == 3:
if display_pages():
for pag in display_pages():
print(
f'{pag.page}:{pag.credential}'
)
else:
print('NO CREDENTIAL SAVED YET')
elif log_choice == 4:
break
elif choice == 3:
print('ABOUT PASSWORD_LOCKER')
print(
'''
This is a terminal based project where users can input their credentials according to the different accounts that they have.
''')
elif choice == 4:
if display_users():
for account in display_users():
print(
f'{Users.user_name}'
)
else:
print('NO USERS')
elif choice == 5:
print('Bye!welcome back again')
break
if __name__ == '__main__':
main()
| 28.875598
| 136
| 0.471914
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,390
| 0.230323
|
b1faa38cc22b54eb622228d21323a509bcdbceb8
| 2,346
|
py
|
Python
|
menu_info/menu_details.py
|
averytorres/WazHack-Clone
|
e53e9b1b64f3828b20e45d4eeaafcdedf9bc6fda
|
[
"Unlicense"
] | 1
|
2019-06-21T17:13:35.000Z
|
2019-06-21T17:13:35.000Z
|
menu_info/menu_details.py
|
averytorres/WazHack-Clone
|
e53e9b1b64f3828b20e45d4eeaafcdedf9bc6fda
|
[
"Unlicense"
] | 18
|
2019-06-25T00:48:11.000Z
|
2019-07-11T17:52:24.000Z
|
menu_info/menu_details.py
|
averytorres/WazHack-Clone
|
e53e9b1b64f3828b20e45d4eeaafcdedf9bc6fda
|
[
"Unlicense"
] | 1
|
2019-06-21T17:08:23.000Z
|
2019-06-21T17:08:23.000Z
|
from game_states import GameStates
from action_consumer.available_actions_enum import Action
def get_menu_title(menu_name):
menu_titles = {}
menu_titles.update({GameStates.SHOW_INVENTORY:'Press the key next to an item to use it, or Esc to cancel.\n'})
menu_titles.update({GameStates.DROP_INVENTORY: 'Press the key next to an item to drop it, or Esc to cancel.\n'})
menu_titles.update({GameStates.SHOW_WEAPON_INVENTORY: 'Press the key next to an item to equip/unequip it, or Esc to cancel.\n'})
menu_titles.update({GameStates.SHOW_ARMOR_INVENTORY: 'Press the key next to an item to equip/unequip it, or Esc to cancel.\n'})
menu_titles.update({GameStates.SHOW_SCROLL_INVENTORY: 'Press the key next to an item to read it, or Esc to cancel.\n'})
menu_titles.update({GameStates.SHOW_QUAFF_INVENTORY: 'Press the key next to an item to quaff it, or Esc to cancel.\n'})
menu_titles.update({GameStates.LEVEL_UP: 'Level up! Choose a stat to raise:'})
menu_titles.update({GameStates.CHARACTER_SCREEN: 'Character Information'})
menu_titles.update({GameStates.PLAYERS_TURN: ''})
menu_titles.update({GameStates.PLAYER_DEAD: ''})
menu_titles.update({None: ''})
return menu_titles[menu_name]
def get_menu_width(menu_name):
menu_width = {}
menu_width.update({GameStates.SHOW_INVENTORY: 50})
menu_width.update({GameStates.DROP_INVENTORY: 50})
menu_width.update({GameStates.SHOW_WEAPON_INVENTORY: 50})
menu_width.update({GameStates.SHOW_ARMOR_INVENTORY: 50})
menu_width.update({GameStates.SHOW_SCROLL_INVENTORY: 50})
menu_width.update({GameStates.SHOW_QUAFF_INVENTORY: 50})
menu_width.update({GameStates.LEVEL_UP: 40})
menu_width.update({GameStates.CHARACTER_SCREEN: 10})
menu_width.update({GameStates.PLAYERS_TURN: 24})
menu_width.update({GameStates.PLAYER_DEAD: 50})
menu_width.update({None: 24})
return menu_width[menu_name]
def get_menu_height(screen_height):
return int(screen_height * 1.8)
def get_main_menu_options():
return ['Play a new game', 'Continue last game', 'Quit']
def get_main_menu_key(index):
index = int(index)
if index == 0:
return {Action.NEW_GAME: True}
elif index == 1:
return {Action.LOAD_GAME: True}
elif index == 2:
return {Action.EXIT: True}
else:
return {}
| 39.1
| 132
| 0.728048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 503
| 0.214408
|
b1fbf96a2060ff4635f4538c1011d07667a95b78
| 270
|
py
|
Python
|
sciris/sc_version.py
|
optimamodel/sciris
|
fc0148fd9352e443a1c9b1a790275bc2904b30b1
|
[
"MIT"
] | null | null | null |
sciris/sc_version.py
|
optimamodel/sciris
|
fc0148fd9352e443a1c9b1a790275bc2904b30b1
|
[
"MIT"
] | 4
|
2018-03-27T21:47:13.000Z
|
2018-08-28T00:50:00.000Z
|
sciris/sc_version.py
|
optimamodel/sciris
|
fc0148fd9352e443a1c9b1a790275bc2904b30b1
|
[
"MIT"
] | 1
|
2018-09-05T07:57:39.000Z
|
2018-09-05T07:57:39.000Z
|
'''
Version and license information.
'''
__all__ = ['__version__', '__versiondate__', '__license__']
__version__ = '1.3.3'
__versiondate__ = '2022-01-16'
__license__ = f'Sciris {__version__} ({__versiondate__}) – © 2014-2022 by the Sciris Development Team'
| 27
| 107
| 0.692593
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 193
| 0.70696
|
b1fc50952b7cf799deab08fe85f0849c2cbaf2f0
| 1,154
|
py
|
Python
|
tests/unit/fileserver/test_hgfs.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 1
|
2020-03-31T22:51:16.000Z
|
2020-03-31T22:51:16.000Z
|
tests/unit/fileserver/test_hgfs.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
tests/unit/fileserver/test_hgfs.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-09-30T07:00:01.000Z
|
2021-09-30T07:00:01.000Z
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
from tests.support.mock import patch
# Import Salt libs
import salt.fileserver.hgfs as hgfs
class HgfsFileTest(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {
hgfs: {}
}
def test_env_is_exposed(self):
'''
test _env_is_exposed method when
base is in whitelist
'''
with patch.dict(hgfs.__opts__,
{'hgfs_saltenv_whitelist': 'base',
'hgfs_saltenv_blacklist': ''}):
assert hgfs._env_is_exposed('base')
def test_env_is_exposed_blacklist(self):
'''
test _env_is_exposed method when
base is in blacklist
'''
with patch.dict(hgfs.__opts__,
{'hgfs_saltenv_whitelist': '',
'hgfs_saltenv_blacklist': 'base'}):
assert not hgfs._env_is_exposed('base')
| 28.85
| 72
| 0.618718
| 816
| 0.707106
| 0
| 0
| 0
| 0
| 0
| 0
| 381
| 0.330156
|
b1fca680c3a855f104f3ad48d1f63a988374a6e5
| 26
|
py
|
Python
|
constants.py
|
harryrobertwright/plutus
|
7a0d9f1474982d1bb66d7b018f2ce7e28aab7bc3
|
[
"MIT"
] | null | null | null |
constants.py
|
harryrobertwright/plutus
|
7a0d9f1474982d1bb66d7b018f2ce7e28aab7bc3
|
[
"MIT"
] | null | null | null |
constants.py
|
harryrobertwright/plutus
|
7a0d9f1474982d1bb66d7b018f2ce7e28aab7bc3
|
[
"MIT"
] | null | null | null |
INTERVALS = [
"1h",
]
| 6.5
| 13
| 0.423077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 0.153846
|
b1fd1af131dc102c96ef990fe42c7c22c4e492de
| 1,273
|
py
|
Python
|
networks/model_factory.py
|
DQle38/Fair-Feature-Distillation-for-Visual-Recognition
|
f0f98728f36528218bf19dce9a26d6ee1ba96e58
|
[
"MIT"
] | 5
|
2021-09-07T13:33:45.000Z
|
2022-02-12T18:56:45.000Z
|
networks/model_factory.py
|
DQle38/Fair-Feature-Distillation-for-Visual-Recognition
|
f0f98728f36528218bf19dce9a26d6ee1ba96e58
|
[
"MIT"
] | null | null | null |
networks/model_factory.py
|
DQle38/Fair-Feature-Distillation-for-Visual-Recognition
|
f0f98728f36528218bf19dce9a26d6ee1ba96e58
|
[
"MIT"
] | 4
|
2021-09-25T06:56:38.000Z
|
2022-03-24T18:06:08.000Z
|
import torch.nn as nn
from networks.resnet import resnet18
from networks.shufflenet import shufflenet_v2_x1_0
from networks.cifar_net import Net
from networks.mlp import MLP
class ModelFactory():
def __init__(self):
pass
@staticmethod
def get_model(target_model, num_classes, img_size, pretrained=False):
if target_model == 'mlp':
return MLP(feature_size=img_size, hidden_dim=40, num_class=num_classes)
elif target_model == 'resnet':
if pretrained:
model = resnet18(pretrained=True)
model.fc = nn.Linear(in_features=512, out_features=num_classes, bias=True)
else:
model = resnet18(pretrained=False, num_classes=num_classes)
return model
elif target_model == 'cifar_net':
return Net(num_classes=num_classes)
elif target_model == 'shufflenet':
if pretrained:
model = shufflenet_v2_x1_0(pretrained=True)
model.fc = nn.Linear(in_features=1024, out_features=num_classes, bias=True)
else:
model = shufflenet_v2_x1_0(pretrained=False, num_classes=num_classes)
return model
else:
raise NotImplementedError
| 31.04878
| 91
| 0.639434
| 1,094
| 0.859387
| 0
| 0
| 1,030
| 0.809112
| 0
| 0
| 36
| 0.02828
|
b1ff61ec8eb947ca5da56f846d344d35e22df2db
| 5,536
|
py
|
Python
|
main.py
|
MarySueTeam/Video_Maker
|
a3bbdeb49b5f887d5f8dbc3b4e57b955d4ee3671
|
[
"MIT"
] | 1
|
2022-03-04T09:25:11.000Z
|
2022-03-04T09:25:11.000Z
|
main.py
|
MarySueTeam/Video_Maker
|
a3bbdeb49b5f887d5f8dbc3b4e57b955d4ee3671
|
[
"MIT"
] | null | null | null |
main.py
|
MarySueTeam/Video_Maker
|
a3bbdeb49b5f887d5f8dbc3b4e57b955d4ee3671
|
[
"MIT"
] | 1
|
2022-01-25T16:19:25.000Z
|
2022-01-25T16:19:25.000Z
|
from manim import *
from TTS.TTS import get_mp3_file
from utils import cut, get_duration, deal_text
import time
class Video(Scene):
def construct(self):
# INFO 视频开头
LOGO = ImageMobject("./media/images/logo.png").scale(0.3).to_edge(UP, buff=2)
Slogan_text = "为你收集日落时的云朵,为你收藏下雨后的天空"
get_mp3_file(text=f"{Slogan_text}", output_path="./media/sounds/video_start", rate="-10%")
Slogan = Text(Slogan_text, font="Muyao-Softbrush", weight=MEDIUM, color="#FCA113").scale(0.7).next_to(LOGO, DOWN, buff=1)
self.play(FadeIn(LOGO, run_time=0.1))
self.wait(0.5)
self.play(FadeIn(Slogan), run_time=1)
self.add_sound("./media/sounds/video_start.mp3")
self.wait(5)
self.play(FadeOut(Slogan, LOGO))
# INFO 主视频内容
LOGO = ImageMobject("./media/images/logo.png").scale(0.1).to_edge(UL)
username = Text("@仙女玛丽苏吖",font="Muyao-Softbrush").scale(0.5).next_to(LOGO, RIGHT)
self.add(LOGO, username)
title = "在本子上写上他的名字"
title = "《" + title + "》"
title = Text(title, font="Muyao-Softbrush", color=ORANGE).scale(0.5).to_edge(UP, buff=0.75)
self.add(title)
with open("./media/words/words.txt", "rt", encoding="utf-8") as f:
content = f.readline()
while content:
audio_path = "./media/sounds/video_content_"+str(round(time.time()*1000))
# content = deal_text(content)
get_mp3_file(text=content,output_path=audio_path,rate="-10%")
audio_path = audio_path + ".mp3"
audio_time = get_duration(audio_path)
content = MarkupText(content, font="Muyao-Softbrush", font_size=60, justify=True).scale(0.5)
run_time = len(content)//50
self.play(Write(content), run_time=run_time)
self.add_sound(audio_path, time_offset = 1)
self.wait(audio_time)
self.play(FadeOut(content))
content = f.readline()
self.play(FadeOut(title,username,LOGO))
# INFO 视频结尾
LOGO = ImageMobject("./media/images/logo.png").scale(0.2).to_edge(UP, buff=2)
messages_text = "你可以在下面的平台找到我,这一期就先到这里,我们下期再见。"
messages = Text("-你可以在下面的平台找到我-", font="Muyao-Softbrush").scale(0.4).next_to(LOGO, DOWN)
# INFO 获取音频文件
get_mp3_file(text=f"{messages_text}",output_path="./media/sounds/video_end",rate="-10%")
gonzhonghao = ImageMobject("./media/images/icon/weixin.png").scale(0.2)
username1 = Text("@仙女玛丽苏", font="Smartisan Compact CNS", weight=MEDIUM).scale(0.25).next_to(gonzhonghao)
zhihu = ImageMobject("./media/images/icon/zhihu.png").next_to(gonzhonghao, RIGHT, buff=1).scale(0.2)
username2 = Text("@仙女玛丽苏", font="Smartisan Compact CNS", weight=MEDIUM).scale(0.25).next_to(zhihu)
xiaohongshu = ImageMobject("./media/images/icon/xiaohongshu.png").next_to(zhihu, RIGHT, buff=1).scale(0.2)
username3 = Text("@仙女玛丽苏", font="Smartisan Compact CNS", weight=MEDIUM).scale(0.25).next_to(xiaohongshu)
bilibili = ImageMobject("./media/images/icon/bilibili.png").next_to(gonzhonghao).scale(0.2)
username4 = Text("@仙女玛丽苏吖", font="Smartisan Compact CNS", weight=MEDIUM).scale(0.25).next_to(bilibili)
douyin = ImageMobject("./media/images/icon/douyin.png").next_to(bilibili, RIGHT, buff=1).scale(0.2)
username5 = Text("@仙女玛丽苏", font="Smartisan Compact CNS", weight=MEDIUM).scale(0.25).next_to(douyin)
toutiao = ImageMobject("./media/images/icon/toutiao1.png").next_to(douyin, RIGHT, buff=1).scale(0.2)
username6 =Text("@仙女玛丽苏", font="Smartisan Compact CNS", weight=MEDIUM).scale(0.25).next_to(toutiao)
jianshu = ImageMobject("./media/images/icon/jianshu.png").next_to(bilibili).scale(0.2)
username7 = Text("@仙女玛丽苏", font="Smartisan Compact CNS", weight=MEDIUM).scale(0.25).next_to(jianshu)
kuaishou = ImageMobject("./media/images/icon/kuaishou.png").next_to(jianshu, RIGHT, buff=1).scale(0.2)
username8 = Text("@仙女玛丽苏吖", font="Smartisan Compact CNS", weight=MEDIUM).scale(0.25).next_to(kuaishou)
xiguashipin = ImageMobject("./media/images/icon/xiguashipin.png").next_to(kuaishou, RIGHT, buff=1).scale(0.2)
username9 = Text("@仙女玛丽苏", font="Smartisan Compact CNS", weight=MEDIUM).scale(0.25).next_to(xiguashipin)
Recommend_group1 = Group(
gonzhonghao,
username1,
zhihu,
username2,
xiaohongshu,
username3,
).next_to(LOGO, DOWN, buff=1)
Recommend_group2 = Group(
bilibili,
username4,
douyin,
username5,
toutiao,
username6,
).next_to(Recommend_group1, DOWN, buff=0.2)
Recommend_group3 = Group(
jianshu,
username7,
kuaishou,
username8,
xiguashipin,
username9,
).next_to(Recommend_group2, DOWN, buff=0.2)
Recommend_group = Group(
Recommend_group1,
Recommend_group2,
Recommend_group3
)
self.play(FadeIn(LOGO))
duration = get_duration("./media/sounds/video_end.mp3")
self.add_sound("./media/sounds/video_end.mp3", time_offset=0.5)
self.play(Write(messages), run_rime=0.5)
self.play(FadeIn(Recommend_group))
self.wait(duration)
self.play(FadeOut(Recommend_group,messages,LOGO))
| 48.99115
| 129
| 0.62211
| 5,714
| 0.980439
| 0
| 0
| 0
| 0
| 0
| 0
| 1,492
| 0.256005
|
b1ff7639399b3c6d47d30f81feb9b3ec46b39e02
| 106
|
py
|
Python
|
Discord Bots/Discord Bot/test_Bot.py
|
SeymoTheDev/skittles-stuff
|
f9eba3efd0577045085418391b7154f3fd121f70
|
[
"MIT"
] | null | null | null |
Discord Bots/Discord Bot/test_Bot.py
|
SeymoTheDev/skittles-stuff
|
f9eba3efd0577045085418391b7154f3fd121f70
|
[
"MIT"
] | null | null | null |
Discord Bots/Discord Bot/test_Bot.py
|
SeymoTheDev/skittles-stuff
|
f9eba3efd0577045085418391b7154f3fd121f70
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
class Test(TestCase):
def test_punch(self):
self.fail()
| 15.142857
| 30
| 0.641509
| 69
| 0.650943
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
5901159e3f1532199cb8c881801333e8fca64f93
| 1,518
|
py
|
Python
|
sevenbridges/models/compound/tasks/batch_by.py
|
sbg/sevenbridges-python
|
b3e14016066563470d978c9b13e1a236a41abea8
|
[
"Apache-2.0"
] | 46
|
2016-04-27T12:51:17.000Z
|
2021-11-24T23:43:12.000Z
|
sevenbridges/models/compound/tasks/batch_by.py
|
sbg/sevenbridges-python
|
b3e14016066563470d978c9b13e1a236a41abea8
|
[
"Apache-2.0"
] | 111
|
2016-05-25T15:44:31.000Z
|
2022-02-05T20:45:37.000Z
|
sevenbridges/models/compound/tasks/batch_by.py
|
sbg/sevenbridges-python
|
b3e14016066563470d978c9b13e1a236a41abea8
|
[
"Apache-2.0"
] | 37
|
2016-04-27T12:10:43.000Z
|
2021-03-18T11:22:28.000Z
|
from sevenbridges.meta.resource import Resource
# noinspection PyUnresolvedReferences,PyProtectedMember
class BatchBy(Resource, dict):
"""
Task batch by resource.
"""
_name = 'batch_by'
# noinspection PyMissingConstructor
def __init__(self, **kwargs):
self.parent = kwargs.pop('_parent')
self.api = kwargs.pop('api')
for k, v in kwargs.items():
super().__setitem__(k, v)
def __setitem__(self, key, value):
super().__setitem__(key, value)
self.parent._data[self._name][key] = value
if self._name not in self.parent._dirty:
self.parent._dirty.update({self._name: {}})
self.parent._dirty[self._name][key] = value
def __getitem__(self, item):
try:
return self.parent._data[self._name][item]
except KeyError:
return None
def __repr__(self):
values = {}
for k, _ in self.items():
values[k] = self[k]
return str(values)
__str__ = __repr__
def update(self, e=None, **f):
other = {}
if e:
other.update(e, **f)
else:
other.update(**f)
for k, v in other.items():
if other[k] != self[k]:
self[k] = other[k]
def equals(self, other):
if not type(other) == type(self):
return False
return (
self is other or
self._parent._data[self._name] == other._parent._data[self._name]
)
| 27.107143
| 77
| 0.554018
| 1,411
| 0.929513
| 0
| 0
| 0
| 0
| 0
| 0
| 153
| 0.100791
|
5904c0dfbd55d07ecdda6b598e8aefd81056a978
| 2,764
|
py
|
Python
|
server/core/tests/test_models.py
|
jleg13/Django-REST-API
|
7e2c397ca3d49a320a79356c96b35beb86cc97ff
|
[
"MIT"
] | null | null | null |
server/core/tests/test_models.py
|
jleg13/Django-REST-API
|
7e2c397ca3d49a320a79356c96b35beb86cc97ff
|
[
"MIT"
] | null | null | null |
server/core/tests/test_models.py
|
jleg13/Django-REST-API
|
7e2c397ca3d49a320a79356c96b35beb86cc97ff
|
[
"MIT"
] | null | null | null |
from unittest.mock import patch
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='test@email.com', password='testpass'):
"""Create a sample user"""
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_succesful(self):
""""Test creating a new user with an email is successful"""
email = 'test@test.com'
password = 'password123'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test the email for a new user is normalized"""
email = 'test@TEST.COM'
user = get_user_model().objects.create_user(email, 'password123')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
"""Test creating user with no email raises error"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'password123')
def test_create_new_superuser(self):
"""Test creating a new superuser"""
user = get_user_model().objects.create_superuser(
'test@test.com', 'password123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_tag_str(self):
"""Test the tag string representation"""
tag = models.Tag.objects.create(
user=sample_user(),
name='Vegan'
)
self.assertEqual(str(tag), tag.name)
def test_gallery_item_str(self):
"""Test the gallery item string representation"""
gallery_item = models.GalleryItem.objects.create(
user=sample_user(),
name='Test gallery item',
blurb='This is a blurb'
)
self.assertEqual(str(gallery_item), gallery_item.name)
def test_gallery_str(self):
"""Test the gallery string representation"""
gallery = models.Gallery.objects.create(
user=sample_user(),
title='Test gallery',
description='This is a description'
)
self.assertEqual(str(gallery), gallery.title)
@patch('uuid.uuid4')
def test_gallery_item_file_name_uuid(self, mock_uuid):
"""Test that image is saved in the correct location"""
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = models.gallery_item_image_file_path(None, 'myimage.jpg')
exp_path = f'uploads/gallery-items/{uuid}.jpg'
self.assertEqual(file_path, exp_path)
| 32.904762
| 76
| 0.646527
| 2,463
| 0.8911
| 0
| 0
| 386
| 0.139653
| 0
| 0
| 681
| 0.246382
|
5906bff03b00e79f2660983fe9997b9cd354f2bc
| 30
|
py
|
Python
|
tests/database/__init__.py
|
dinosv/cobib
|
15342de37336a51d87c8f04f8430d0621da69a5c
|
[
"MIT"
] | 9
|
2020-09-27T19:22:35.000Z
|
2022-02-27T20:00:58.000Z
|
tests/database/__init__.py
|
dinosv/cobib
|
15342de37336a51d87c8f04f8430d0621da69a5c
|
[
"MIT"
] | null | null | null |
tests/database/__init__.py
|
dinosv/cobib
|
15342de37336a51d87c8f04f8430d0621da69a5c
|
[
"MIT"
] | 2
|
2020-12-07T15:26:03.000Z
|
2021-10-03T18:04:57.000Z
|
"""coBib's database tests."""
| 15
| 29
| 0.633333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 29
| 0.966667
|
590721cca2145e8661012d52208da3bcc5dbe108
| 230
|
py
|
Python
|
Semester-1/Lab8/src/lab_A.py
|
Vipul-Cariappa/Collage-CS-Lab
|
0a0193df9575a4e69b60759d974423202ddf544b
|
[
"MIT"
] | null | null | null |
Semester-1/Lab8/src/lab_A.py
|
Vipul-Cariappa/Collage-CS-Lab
|
0a0193df9575a4e69b60759d974423202ddf544b
|
[
"MIT"
] | null | null | null |
Semester-1/Lab8/src/lab_A.py
|
Vipul-Cariappa/Collage-CS-Lab
|
0a0193df9575a4e69b60759d974423202ddf544b
|
[
"MIT"
] | 2
|
2022-03-04T14:06:15.000Z
|
2022-03-16T17:32:10.000Z
|
# program to display first n lines in a text file
n = int(input("Enter number of lines: "))
with open("note.txt") as file:
while n > 0:
print(
file.readline(),
end=""
)
n -= 1
| 19.166667
| 49
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 86
| 0.373913
|
59079f538bc9e256df53c65451be92c382f11c5c
| 23,420
|
py
|
Python
|
eplusplus/view/mainWindow.py
|
labeee/EPlusPlus
|
da6cbd60575146a8f165fb72e165919cd83ddc24
|
[
"MIT"
] | 1
|
2018-02-06T17:41:12.000Z
|
2018-02-06T17:41:12.000Z
|
eplusplus/view/mainWindow.py
|
labeee/EPlusPlus
|
da6cbd60575146a8f165fb72e165919cd83ddc24
|
[
"MIT"
] | null | null | null |
eplusplus/view/mainWindow.py
|
labeee/EPlusPlus
|
da6cbd60575146a8f165fb72e165919cd83ddc24
|
[
"MIT"
] | 1
|
2021-06-29T02:49:59.000Z
|
2021-06-29T02:49:59.000Z
|
import os
import sys
import ctypes
import webbrowser
from .lineEdit import LineEdit
from .dialogWithCheckBox import DialogWithCheckBox
from eplusplus.controller import ActorUser
from eplusplus.exception import ColumnException, NoIdfException, InstallException, NoCsvException
from PyQt5.QtCore import QSize, Qt, QRect
from PyQt5.QtGui import QPixmap, QIcon, QIntValidator
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QVBoxLayout
from PyQt5.QtWidgets import QHBoxLayout, QLabel, QLineEdit, QRadioButton
from PyQt5.QtWidgets import QGridLayout, QFileDialog, QMessageBox, QApplication
from PyQt5.QtWidgets import QButtonGroup, QLineEdit, QAction, QMenuBar
##
## @brief This class implements the main window of the eplusplus
## application. The UI use the PyQt to create and configure
## all the components. Also, besides the components like
## labels, radio buttons, buttons and line text, the main
## window has a actorUser, that represents the controller, to call
## all the methods implemented in the logic of the program.
##
class MainWindow(QWidget):
def __init__(self, args):
super(MainWindow, self).__init__()
msgBox = DialogWithCheckBox(self)
self.firstTime = True
self.pathToIcon = "./media/icon.png"
self.actorUser = ActorUser()
if not self.actorUser.existsFileConfirmCheckBox():
checkedBox = msgBox.exec_()[1]
if checkedBox:
self.actorUser.createFileConfirmCheckBox()
self.logo = QLabel()
self.casesButton = QPushButton("Gerar casos")
self.simulationButton = QPushButton("Executar simulação")
self.confirmButtonCases = QPushButton("Confirmar")
self.cancelButton = QPushButton("Cancelar")
self.chooseIdfButton = QPushButton("Escolher IDF...")
self.chooseCSVButton = QPushButton("Escolher CSV...")
self.chooseFolderButton = QPushButton("Escolher pasta...")
self.chooseEpwButton = QPushButton("Escolher EPW...")
self.confirmButtonSimulation = QPushButton("Confirmar")
self.setWindowIcon(QIcon(self.pathToIcon))
self.lineIdf = LineEdit(self)
self.lineCsv = LineEdit(self)
self.lineFolder = LineEdit(self)
self.lineEpw = LineEdit(self)
self.lineCases = QLineEdit()
self.validatorCases = QIntValidator(1, 9999999, self)
self.lineCases.setValidator(self.validatorCases)
self.group = QButtonGroup()
self.lhsRB = QRadioButton("Latin Hypercube Sampling")
self.randomRB = QRadioButton("Random")
self.group.addButton(self.randomRB)
self.group.addButton(self.lhsRB)
self.gridLayout = QGridLayout()
self.menuBar = QMenuBar()
self.help = self.menuBar.addMenu("Ajuda")
self.helpAction = QAction("Documentação", self)
self.help.addAction(self.helpAction)
self.helpAction.triggered.connect(self.documentationClicked)
self.processingMessage = QLabel()
self.gridLayout.setMenuBar(self.menuBar)
self.initComponents()
##
## @brief This method is called at the constructor method or
## a cancel button is clicked to go back to the first screen.
## This method configures the layout. Also if is the first
## time that this method is called, then all buttons will
## be connected to the corresponding method.
##
## @param self Non static method.
##
## @return This is a void method.
##
def initComponents(self):
pixmap = QPixmap("./media/title.png")
self.logo.setPixmap(pixmap)
self.gridLayout.addWidget(self.logo, 0, 0)
self.gridLayout.addWidget(self.casesButton, 1, 0)
self.gridLayout.addWidget(self.simulationButton, 2, 0)
if self.firstTime:
self.firstTime = False
self.casesButton.clicked.connect(self.casesButtonClicked)
self.simulationButton.clicked.connect(self.simulationButtonClicked)
self.cancelButton.clicked.connect(self.cancelButtonClicked)
self.confirmButtonCases.clicked.connect(self.confirmButtonCasesClicked)
self.chooseIdfButton.clicked.connect(self.chooseIdfClicked)
self.chooseCSVButton.clicked.connect(self.chooseCsvClicked)
self.chooseFolderButton.clicked.connect(self.chooseFolderClicked)
self.chooseEpwButton.clicked.connect(self.chooseEpwButtonClicked)
self.confirmButtonSimulation.clicked.connect(self.confirmButtonSimulationClicked)
self.checkAndInstall()
self.setLayout(self.gridLayout)
self.setFixedSize(650, 250)
self.setWindowTitle("EPlusPlus")
self.show()
##
## @brief This method is actived whenever the "casesButton" is
## pressed. First of all, it remove all components from
## the window. After that it justs configures labels,
## lineTexts and buttons into the grid layout.
##
## @param self Non static method.
##
## @return This is a void method.
##
def casesButtonClicked(self):
self.clearAll()
idfLabel = QLabel()
csvLabel = QLabel()
folderStoreLabel = QLabel()
methodSamplingLabel = QLabel()
sampleSize = QLabel()
idfLabel.setText("Arquivo base IDF:")
csvLabel.setText("Arquivo de configuração CSV:")
folderStoreLabel.setText("Pasta para salvar os arquivos IDF's:")
methodSamplingLabel.setText("Método de amostragem")
sampleSize.setText("Número da amostragem")
self.gridLayout.addWidget(idfLabel, 1, 0, Qt.AlignRight)
self.gridLayout.addWidget(self.chooseIdfButton, 1, 1)
self.gridLayout.addWidget(self.lineIdf, 1, 2)
self.gridLayout.addWidget(csvLabel, 2, 0, Qt.AlignRight)
self.gridLayout.addWidget(self.chooseCSVButton, 2, 1)
self.gridLayout.addWidget(self.lineCsv, 2, 2)
self.gridLayout.addWidget(folderStoreLabel, 3, 0, Qt.AlignRight)
self.gridLayout.addWidget(self.chooseFolderButton, 3, 1)
self.gridLayout.addWidget(self.lineFolder, 3, 2)
self.gridLayout.addWidget(methodSamplingLabel, 4, 1, Qt.AlignBottom)
self.gridLayout.addWidget(self.randomRB, 5, 0, Qt.AlignTop)
self.gridLayout.addWidget(self.lhsRB, 5, 2, Qt.AlignRight)
self.gridLayout.addWidget(sampleSize, 6, 0, 1, 2)
self.gridLayout.addWidget(self.lineCases, 6, 2)
self.gridLayout.addWidget(self.confirmButtonCases, 7, 0, 1, 3, Qt.AlignTop)
self.gridLayout.addWidget(self.cancelButton, 8, 0, 1, 3, Qt.AlignTop)
##
## @brief This method is actived whenever the "simulationButton" is
## pressed. First of all, it remove all components from
## the window. After that it justs configures labels,
## lineTexts and buttons into the grid layout.
##
## @param self Non static method
##
## @return This is a void method
##
def simulationButtonClicked(self):
self.clearAll()
folderStoreLabel = QLabel()
epwLabel = QLabel()
folderStoreLabel.setText("Pasta com os arquivos idf's")
epwLabel.setText("Arquivo EPW")
self.gridLayout.addWidget(folderStoreLabel, 1, 0, Qt.AlignRight)
self.gridLayout.addWidget(self.chooseFolderButton, 1, 1)
self.gridLayout.addWidget(self.lineFolder, 1, 2)
self.gridLayout.addWidget(epwLabel, 2, 0, Qt.AlignRight)
self.gridLayout.addWidget(self.chooseEpwButton, 2, 1)
self.gridLayout.addWidget(self.lineEpw, 2, 2)
# Doing this just to the UI get a little bit more beautiful
self.gridLayout.addWidget(QLabel(), 3, 0)
self.gridLayout.addWidget(self.processingMessage, 4, 0, 1, 3, Qt.AlignCenter)
self.gridLayout.addWidget(self.confirmButtonSimulation, 7, 0, 1, 3, Qt.AlignBottom)
self.gridLayout.addWidget(self.cancelButton, 8, 0, 1, 3, Qt.AlignBottom)
##
## @brief This method is actived whenever the "chooseIdf" button is
## pressed. When this method is activated, a QFileDialog will
## be show to the user and it will be possible to choose a
## idf file. The QFileDialog will show only idf files and
## folders. After choosed the idf file, the "lineIdf" attribute
## will have its text setted to the absolute path to the csv
## choosed.
##
## @param self Non static method.
##
## @return This is a void method.
##
def chooseIdfClicked(self):
msg = "Escolha o arquivo idf"
filename = QFileDialog.getOpenFileName(self, msg, os.getenv("HOME"), filter="*.idf")
self.setLineIdfText(filename[0])
##
## @brief This method is actived whenever the "chooseCsv" buttons is
## pressed. When this method is activated, a QFileDialog will
## be show to the user and it will be possible to choose a
## csv file. After choosed the csv file, the "lineCsv"
## attribute will have its text setted to the absolute path
## to the csv choosed.
##
## @param self Non static method.
##
## @return This is a void method.
##
def chooseCsvClicked(self):
msg = "Escolha o arquivo base csv"
filename = QFileDialog.getOpenFileName(self, msg, os.getenv("HOME"), filter="*.csv")
self.setLineCsvText(filename[0])
##
## @brief This method is actived whenever the "chooseFolder" button is
## clicked. When this method is activated, a QFileDialog will
## be show to the user and it will be possible to choose a
## folder to save the new idf's files that gonna be generated.
## After choosed the folder, the "lineFolder" attribute
## will have its text changed to the absolute folder choosed.
##
## @param self Non static method.
##
## @return This is a void method.
##
def chooseFolderClicked(self):
msg = "Escolha a pasta para salvar os arquivos IDF's"
folder = QFileDialog.getExistingDirectory(self, msg, os.getenv("HOME"))
self.setLineFolderText(folder)
##
## @brief This method is activated when the cancel button is
## pressed. This method remove all components from the
## screen and go back to the initial screen.
##
## @param self Non static method.
##
## @return This is a void method.
##
def cancelButtonClicked(self):
self.clearAll()
self.initComponents()
##
## @brief This method is actived whenever the confirm button
## is pressed. This method checks if all the lineText
## fields where filled and one radio button. If not, the
## user will be informed through a QMessageBox. Otherwise,
## if all fields where covered then the cases will be generate.
## See the "generateCases" method for more info.
##
## @param self Non static method.
##
## @return This is a void method.
##
def confirmButtonCasesClicked(self):
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Warning)
msgBox.setWindowIcon(QIcon(self.pathToIcon))
msgBox.setWindowTitle("EPlusPlus-WAR")
msgBox.setText("Todos os campos devem estar preenchidos para prosseguir!")
if self.lineIdf.text() == "":
msgBox.exec_()
elif self.lineCsv.text() == "":
msgBox.exec_()
elif self.lineFolder.text() == "":
msgBox.exec_()
elif self.lineCases.text() == "":
msgBox.exec_()
elif not self.lhsRB.isChecked() and not self.randomRB.isChecked():
msgBox.exec_()
else:
self.generateCases()
##
## @brief This method is actived whenever the "chooseEpwButton" is
## clicked. When this method is activated, a QFileDialog will
## be show to the user and it will be possible to choose a
## EPW file. After choosed the EPW, the "lineEpw" attribute
## will have its text changed to the absolute path to EPW
## choosed.
##
## @param self Non static method
##
## @return This is a void method
##
def chooseEpwButtonClicked(self):
msg = "Escolha o arquivo EPW"
epwFile = QFileDialog.getOpenFileName(self, msg, os.getenv("HOME"), filter="*.epw")
self.setLineEpwText(epwFile[0])
##
## @brief This method is called whenever the confirm button of the
## screen of simulation is clicked. This method check if all
## fields are filled. If not, a warning message will appear
## to the user through a MessageBox informing that all fields
## need to be completed. Otherwise, if all fields were filled,
## the simulation will be executed.
##
## @param self Non static method
##
## @return This is a void method
##
def confirmButtonSimulationClicked(self):
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Warning)
msgBox.setWindowIcon(QIcon(self.pathToIcon))
msgBox.setWindowTitle("EPlusPlus-WAR")
msgBox.setText("Todos os campos devem estar preenchidos para prosseguir!")
if self.lineFolder.text() == "":
msgBox.exec_()
elif self.lineEpw.text() == "":
msgBox.exec_()
else:
self.runSimulation()
##
## @brief This method is used every time the "Documentation" button
## is clicked at the menu bar. This method open the manual
## of the program in pdf format at the default browser of the
## current user.
##
## @param self Non static method
##
## @return This is a void method.
##
def documentationClicked(self):
doc = "./docs/documentacaoEPlusPlus.pdf"
webbrowser.open("file://"+os.path.abspath(doc))
##
## @brief This method takes all values informed by the user through
## the lineEdit fields. After analyze the sampling method
## choosed, the UI will call the actorUser to generate
## the cases. If all happens as it should, then a QmessageBox
## will inform the user. Otherwise, if a "ColumnException"
## raise from the the "actorUser", the user will be informed
## that the Csv or the Idf are not matching.
##
## @param self Non static method.
##
## @return This is a void method.
##
def generateCases(self):
pathToIdf = self.lineIdf.text()
pathToCsv = self.lineCsv.text()
pathToFolder = self.lineFolder.text()
sampleSize = int(self.lineCases.text())
msgBox = QMessageBox()
msgBox.setWindowIcon(QIcon(self.pathToIcon))
msg = ""
if self.lhsRB.isChecked():
method = "LHS"
else:
method = "RANDOM"
try:
self.actorUser.generateCases(pathToIdf, pathToCsv, pathToFolder, sampleSize, method)
msgBox.setIcon(QMessageBox.Information)
msgBox.setWindowTitle("EPlusPlus-INF")
msg = "Processo finalizado! Verifique a pasta informada para acessar os arquivos."
msgBox.setText(msg)
msgBox.exec_()
self.cancelButtonClicked()
except ColumnException as e:
msgBox.setIcon(QMessageBox.Critical)
msgBox.setWindowTitle("EPlusPlus-ERR")
msg = str(e)
msgBox.setText(msg)
msgBox.exec_()
##
## @brief At first lines, we transform the content informed by the
## user at the current screen into strings. After that, we
## create a QMessageBox to show important information. Then
## it will try to run the simulation through the "actorUser" (
## see its documentation for more info). If no IDF file be
## founded at the folder informed, a exception will be raised.
## Otherwise, if at least, one IDF be founded, the simulation
## will occur normally. After that, the 'actorUser' will try
## insert the data from the csv of result into the database.
## If no csv be found, a exception will be raise.
##
## @param self Non static method
##
## @return This is a void method.
##
def runSimulation(self):
pathToFolder = self.lineFolder.text()
pathToEpw = self.lineEpw.text()
msgBox = QMessageBox()
msgBox.setWindowIcon(QIcon(self.pathToIcon))
msg = ""
try:
self.actorUser.findIdfFiles(pathToFolder)
msg = "Processando arquivos..."
self.processingMessage.setText(msg)
QApplication.processEvents()
self.actorUser.runSimulation(pathToFolder, pathToEpw)
except NoIdfException as e:
msgBox.setIcon(QMessageBox.Critical)
msgBox.setWindowTitle("EPlusPlus-ERR")
msg = str(e)
msgBox.setText(msg)
msgBox.exec_()
try:
self.actorUser.insertIntoDatabase(pathToFolder)
msgBox.setIcon(QMessageBox.Information)
msgBox.setWindowTitle("EPlusPlus-INF")
msg = "Processo finalizado com sucesso!"
msgBox.setText(msg)
msgBox.exec_()
ask = 'Você gostaria de apagar os arquivos e manter somente a base de dados?'
reply = QMessageBox.question(self, "EPlusPlus-INF", ask, QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.Yes:
self.actorUser.removeDirectories(pathToFolder)
msg = "Arquivos removidos com sucesso!"
msgBox.setText(msg)
msgBox.exec_()
self.cancelButtonClicked()
except NoCsvException as e:
msgBox.setIcon(QMessageBox.Critical)
msgBox.setWindowTitle("EPlusPlus-ERR")
msg = str(e)
msgBox.setText(msg)
msgBox.exec_()
##
## @brief This method is responsible for check if all tools are
## installed on the curren machine. If not, a message will
## be shown to the user and the installation will start. If
## by any means, a problem occurs, a error message will appear
## at the screen. If all goes well, a mensagem of sucess will
## be show.
##
## @param self Non static method
##
## @return This is a void method
##
def checkAndInstall(self):
msgBox = QMessageBox()
msgBox.setWindowTitle("EPlusPlus-INF")
msgBox.setWindowIcon(QIcon(self.pathToIcon))
msg = "O EPlusPlus irá agora instalar as ferramentas necessárias para"
msg += " o seu correto funcionamento!"
if not self.actorUser.checkTools():
try:
msgBox.setText(msg)
msgBox.setIcon(QMessageBox.Information)
msgBox.exec_()
self.actorUser.checkAndInstall()
msg = "Instalações feitas com sucesso!"
msgBox.setText(msg)
msgBox.exec_()
except InstallException as e:
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Critical)
msgBox.setWindowTitle("EPlusPlus-ERR")
msg = str(e)
msgBox.setText(msg)
msgBox.exec_()
sys.exit()
##
## @brief This method sets the first lineText of the 2nd screen
## with the string equals to the path where the idf file
## is saved, informed by the user through the QFileDialog.
##
## @param self The object
## @param string String that will be show at the lineText.
##
## @return This is a void method.
##
def setLineIdfText(self, string):
self.lineIdf.setText(string)
##
## @brief This method sets the second lineText of the 2nd
## screen with the string equals to the path where
## the csv file is saved, choosed by the user.
##
## @param self Non static method.
## @param string String that will be show at the lineText.
##
## @return This is a void method.
##
def setLineCsvText(self, string):
self.lineCsv.setText(string)
##
## @brief This method sets the third lineText of the 2nd
## screen with the string equals to the path where the new
## idf's file will be saved, choosed by the user.
##
## @param self Non static method.
## @param string String that will be show at the lineText.
##
## @return This is a void method.
##
def setLineFolderText(self, string):
self.lineFolder.setText(string)
##
## @brief This method sets the fourth lineText of the 2nd screen
## with the value equals to the string passed as arg.
##
## @param self Non static method
## @param string String that will be show at the lineCases
##
## @return This is a void method
##
def setLineCasesText(self, string):
self.lineCases.setText(string)
##
## @brief This method sets the second lineText of the 3rd screen
## with the value equals to the string passed as arg.
##
## @param self Non static method
## @param string String that will be show at the lineEpw
##
## @return This is a void method.
##
def setLineEpwText(self, string):
self.lineEpw.setText(string)
##
## @brief This method removes every component at the current window,
## except for the layout. Also, this method clear all lineText
## attributes and clear the values of the radio buttons. The
## "setExclusive" False and "setExclusive" True is needed to
## clear the values of the radio button components.
##
## @param self Non static method.
##
## @return This is a void method.
##
def clearAll(self):
for component in reversed(range(self.gridLayout.count())):
self.gridLayout.itemAt(component).widget().setParent(None)
self.setLineIdfText("")
self.setLineCsvText("")
self.setLineFolderText("")
self.setLineCasesText("")
self.setLineEpwText("")
self.processingMessage.setText("")
self.group.setExclusive(False)
self.randomRB.setChecked(False)
self.lhsRB.setChecked(False)
self.group.setExclusive(True)
| 40.589255
| 101
| 0.608839
| 22,322
| 0.952588
| 0
| 0
| 0
| 0
| 0
| 0
| 10,293
| 0.439252
|
5907d7fbfcc198ea821785faf5ae482c8f858484
| 4,555
|
py
|
Python
|
CHAPTER 11 (search trees)/red_black_trees_class.py
|
ahammadshawki8/Data-Structures-Algorithms-in-Python-
|
fc18b54128cd5bc7639a14999d8f990190b524eb
|
[
"MIT"
] | null | null | null |
CHAPTER 11 (search trees)/red_black_trees_class.py
|
ahammadshawki8/Data-Structures-Algorithms-in-Python-
|
fc18b54128cd5bc7639a14999d8f990190b524eb
|
[
"MIT"
] | null | null | null |
CHAPTER 11 (search trees)/red_black_trees_class.py
|
ahammadshawki8/Data-Structures-Algorithms-in-Python-
|
fc18b54128cd5bc7639a14999d8f990190b524eb
|
[
"MIT"
] | null | null | null |
from tree_map_class import *
class RedBlackTreeMap(TreeMap):
"""Sorted map implementation using red-black tree."""
class _Node(TreeMap._Node):
"""Node class for red-black tree maintains bit that denotes color."""
__slots__ = "_red" # add additional data member to the Node class
def __init__(self,element,parent=None,left=None,right=None):
super().__init__(element,parent,left,right)
self.red = True # new node is red by default
#-----------------positional based utility methods-----------------------------
# we consider a nonexisting child to be trivially black
def _set_red(self,p):
p._node._red = True
def _set_black(self,p):
p._node._red = False
def _set_color(self,p,make_red):
p._node._red = make_red
def _is_red(self,p):
return (p is not None) and p._node._red
def _is_red_leaf(self,p):
return self._is_red(p) and self._is_leaf(p)
def _get_red_child(self,p):
"""Return a red child of p (or None if no such child)."""
for child in (self.left(p),self.right(p)):
if self._is_red(child):
return child
return None
#-----------------------support for insertations------------------------------
def _rebalance_insert(self,p):
self._resolve_red(p) # new node is always red
def _resolve_red(p):
if self.is_root(p):
self._set_black(p) # make root black
else:
parent = self.parent(p)
if self.is_red(parent): # double red problem
uncle = self.sibling(parent)
if not self.is_red(uncle): # Case 1: misshapen 4-node
middle = self._restructure(p) # do trinode restructing
self._set_black(middle) # and fix the colors
self._set_red(self.left(middle))
self._set_red(self.right(middle))
else: # Case 2: overfull 5-node
grand = self.parent(parent)
self._set_red(grand) # grandparent becomes red
self._set_black(self.left(grand)) # its children becomes black
self._resolve_red(grand) # continue recur at grandparent
# the double restrucuture were handled previously in the restructure method
#-------------------------support for deletions--------------------------------
def _rebalance_delete(self,p):
if len(self) == 1:
self._set_black(self.root()) # special case ensure that the root is black
elif p is not None:
n = self.num_children(p)
if n == 1: # deficit exits unless child is red leaf
c = next(self.children(p))
if not self._is_red_leaf(c):
self._fix_deficit(p,c)
elif n == 2: # removed black node with red child
if self._is_red_leaf(self,left(p)):
self._set_black(self.left(p))
else:
self._set_black(self.right(p))
def _fix_deficit(self,z,y):
"""Resolve black deficit at z, where y is the root of z's heavier subtree."""
if not self._is_red(y): # y is black; will apply case 1 or 2
x = self._get_red_child(y)
if x is not None: # Case 1: y is black and has red child x; do transfer
old_color = self._is_red(z)
middle = self._restucture(x)
self._set_color(middle,old_color) # middle gets old color of z
self._set_black(self.left(middle)) # children becomes black
self._set_black(self.right(middle))
else: # case 2: y is black, but no red children; recolor as fusion
self._set_red(y)
if self.is_red(z):
self._set_black(z) # this resolves the problem
elif not self.is_root(x):
self._fix_deficit(self.parent(z), self.sibling(z)) # recur upward
else: # Case 3: y is red; rotate misalligned 3-node and repeat
self.rotate(y)
self.set_black(y)
self.set_red(z)
if z == self.right(y):
self._fix_deficit(z,self.left(z))
else:
self._fix_deficit(z,self.right(z))
| 46.958763
| 88
| 0.528211
| 4,524
| 0.993194
| 0
| 0
| 0
| 0
| 0
| 0
| 1,360
| 0.298573
|
59083cdbd1613168bb0ded29e8cc254a35bff318
| 5,170
|
py
|
Python
|
my_diary_data_structures.py
|
koechkevin/myDiary
|
c5f48fa04a5f8c2bce9f1580c0f92f3f0d5f9bcb
|
[
"Apache-2.0"
] | null | null | null |
my_diary_data_structures.py
|
koechkevin/myDiary
|
c5f48fa04a5f8c2bce9f1580c0f92f3f0d5f9bcb
|
[
"Apache-2.0"
] | null | null | null |
my_diary_data_structures.py
|
koechkevin/myDiary
|
c5f48fa04a5f8c2bce9f1580c0f92f3f0d5f9bcb
|
[
"Apache-2.0"
] | 1
|
2018-11-04T09:48:46.000Z
|
2018-11-04T09:48:46.000Z
|
from functools import wraps
import datetime
from flask import jsonify, Flask, request, session
from my_class import ExternalFunctions
app = Flask(__name__)
app.config["SECRET_KEY"] = 'kkkoech'
user_details = dict()
diary_entries = dict()
@app.route("/api/v1", methods=['GET'])
def home():
return jsonify({"message":"welcome to my diary"})
@app.route("/api/v1/register", methods=['POST'])
def register():
try:
data = request.get_json()
fname = data["fname"]
lname = data["lname"]
email = data["email"]
username = data["username"]
password = data["password"]
cpassword = data["cpassword"]
if ExternalFunctions.valid_email(email):
if ExternalFunctions.password_verify(password, cpassword):
if fname.strip() == '' or lname.strip() == '' or \
username.strip() == '' or password.strip() == '':
return jsonify("fields cannot be empty"), 422
else:
if username not in user_details:
user_details.update({username:{"name":fname+" "+lname, \
"email":email, "password":password}})
else:
return jsonify({"message":"such user already exists"}), 409
else:
return jsonify({"message":"password and confirm password do not match"}), 403
else:
return jsonify("email is invalid"), 403
return jsonify({"message":"success ! you can now login to continue"}), 200
except KeyError:
return jsonify('fname, lname, email, username, password, cpassword should be provided'), 422
@app.route("/api/v1/login", methods=['POST'])
def login():
try:
username = request.get_json()["username"]
password = request.get_json()["password"]
if username in user_details:
if password == user_details[username]["password"]:
session['username'] = username
session['logged_in'] = True
return jsonify({"message":"you are successfully logged in "}), 200
return jsonify({"message":"wrong password, try again"}), 401
return jsonify({"message":"you are not a registered user"}), 403
except KeyError:
return jsonify('username and passwordshould be provided'), 422
def on_session(t):
@wraps(t)
def decorator(*a, **kw):
if "logged_in" in session:
return t(*a, **kw)
return jsonify({"message":"please login first"}), 401
return decorator
@app.route("/api/v1/create_entry", methods=['POST'])
@on_session
def create_entry():
try:
comment = request.get_json()["comment"]
username = session.get('username')
if username not in diary_entries:
diary_entries.update({username:{1:str(datetime.datetime.utcnow())+" "+comment}})
else:
diary_entries[username].update\
({len(diary_entries[username])+1:str(datetime.datetime.utcnow())+" "+comment})
return jsonify(diary_entries[username]), 200
except KeyError:
return jsonify('comment should be provided'), 422
@app.route("/api/v1/entries", methods=['GET'])
@on_session
def entries():
try:
username = session.get('username')
return jsonify(diary_entries[username]), 200
except KeyError:
return jsonify('ensure that you have made entries before'), 422
@app.route("/api/v1/view_entry/<int:entry_id>", methods=["GET"])
@on_session
def view_entry(entry_id):
username = session.get('username')
return jsonify({"entry "+str(entry_id):diary_entries[username][entry_id]}), 200
@app.route("/api/v1/delete_entry/<int:entry_id>", methods=["DELETE"])
@on_session
def delete_entry(entry_id):
try:
username = session.get('username')
del diary_entries[username][entry_id]
return jsonify({"message":"deleted successfully"}), 202
except KeyError:
return jsonify('you can only delete an \
entry you made. please confirm you have an entry of id \
'+str(entry_id)+' in http://127.0.0.1:5555/api/v1/entries'), 422
@app.route("/api/v1/modify_entry/<int:entry_id>", methods=["PUT"])
def modify_entry(entry_id):
try:
comment = request.get_json()["comment"]
username = session.get('username')
del diary_entries[username][entry_id]
diary_entries[username].update({entry_id:str(datetime.datetime.utcnow())+" "+comment})
return jsonify({"message":"successfully edited an entry"}), 200
except KeyError:
return jsonify('comment should be provided \
and you should have made more than'+str(entry_id)+' comments to modify an entry'), 422
@app.route("/api/v1/account", methods=['GET'])
def account():
username = session.get('username')
my_details = {"name":user_details[username]['name'], "email":user_details[username]['email']}
return jsonify(my_details), 200
@app.route("/api/v1/logout", methods=['GET'])
def logout():
session.clear()
return jsonify({"message":"successful"}), 200
if __name__ == '__main__':
app.run(port=5555, debug=True)
| 38.014706
| 100
| 0.624178
| 0
| 0
| 0
| 0
| 4,800
| 0.928433
| 0
| 0
| 1,454
| 0.281238
|
5909eb773cf91122abfbd155ab1ef7779d77f23a
| 26
|
py
|
Python
|
micro-benchmark-key-errs/snippets/parameters/imported_assigned_call/to_import.py
|
WenJinfeng/PyCG
|
b45e8e04fe697d8301cf27222a8f37646d69f168
|
[
"Apache-2.0"
] | 121
|
2020-12-16T20:31:37.000Z
|
2022-03-21T20:32:43.000Z
|
micro-benchmark-key-errs/snippets/parameters/imported_assigned_call/to_import.py
|
WenJinfeng/PyCG
|
b45e8e04fe697d8301cf27222a8f37646d69f168
|
[
"Apache-2.0"
] | 24
|
2021-03-13T00:04:00.000Z
|
2022-03-21T17:28:11.000Z
|
micro-benchmark-key-errs/snippets/parameters/imported_assigned_call/to_import.py
|
WenJinfeng/PyCG
|
b45e8e04fe697d8301cf27222a8f37646d69f168
|
[
"Apache-2.0"
] | 19
|
2021-03-23T10:58:47.000Z
|
2022-03-24T19:46:50.000Z
|
const1 = "a"
const2 = "b"
| 8.666667
| 12
| 0.538462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0.230769
|
5909f08bda2ad877f9982af2cd854a38d7dd516a
| 13,029
|
py
|
Python
|
intake_sdmx.py
|
dr-leo/intake_sdmx
|
dccd51e6ce4aa352fba0a0c25dfac82148acd1e3
|
[
"Apache-2.0"
] | null | null | null |
intake_sdmx.py
|
dr-leo/intake_sdmx
|
dccd51e6ce4aa352fba0a0c25dfac82148acd1e3
|
[
"Apache-2.0"
] | 3
|
2021-05-29T19:46:36.000Z
|
2022-01-15T14:15:22.000Z
|
intake_sdmx.py
|
dr-leo/intake_sdmx
|
dccd51e6ce4aa352fba0a0c25dfac82148acd1e3
|
[
"Apache-2.0"
] | 1
|
2021-05-28T13:14:53.000Z
|
2021-05-28T13:14:53.000Z
|
"""intake plugin for SDMX data sources"""
import intake
from intake.catalog import Catalog
from intake.catalog.utils import reload_on_change
from intake.catalog.local import LocalCatalogEntry, UserParameter
import pandasdmx as sdmx
from collections.abc import MutableMapping
from datetime import date
from itertools import chain
__version__ = "0.1.0"
NOT_SPECIFIED = "n/a"
class LazyDict(MutableMapping):
def __init__(self, func, *args, **kwargs):
super().__init__()
self._dict = dict(*args, **kwargs)
self._func = func
def update(self, *args, **kwargs):
return self._dict.update(*args, **kwargs)
def __getitem__(self, key):
if self._dict[key] is None:
self._dict[key] = self._func(key)
return self._dict[key]
def __setitem__(self, key, value):
return self._dict.__setitem__(key, value)
def __contains__(self, key):
return self._dict.__contains__(key)
def __len__(self):
return self._dict.__len__()
def __delitem__(self, key):
return self._dict.__delitem__(key)
def __iter__(self):
return self._dict.__iter__()
def __str__(self):
return "".join((self.__class__.__name__, "(", str(self._dict), ")"))
class SDMXSources(Catalog):
"""
catalog of SDMX data sources, a.k.a. agencies
supported by pandaSDMX
"""
name = "sdmx"
description = "SDMX sources supported by pandaSDMX"
version = __version__
container = "catalog"
def _load(self):
# exclude sources which do not support dataflows
# and datasets (eg json-based ABS and OECD)
excluded = ["ABS", "OECD", "IMF", "SGR", "STAT_EE"]
for source_id, source in sdmx.source.sources.items():
if source_id not in excluded:
descr = source.name
metadata = {"source_id": source_id}
e = LocalCatalogEntry(
source_id + "_SDMX_dataflows",
descr,
SDMXDataflows,
direct_access=True,
# set storage_options to {} if not set. This avoids TypeError
# when passing it to sdmx.Request() later
args={"storage_options": self.storage_options or {}},
cache=[],
parameters=[],
metadata=metadata,
catalog_dir="",
getenv=False,
getshell=False,
catalog=self,
)
self._entries[source_id] = e
class SDMXCodeParam(UserParameter):
def __init__(self, allowed=None, **kwargs):
super(SDMXCodeParam, self).__init__(**kwargs)
self.allowed = allowed
def validate(self, value):
# Convert short-form multiple selections to list, e.g. 'DE+FR'
if isinstance(value, str) and "+" in value:
value = value.split("+")
# Single code as str
if isinstance(value, str):
if not value in self.allowed:
raise ValueError(
"%s=%s is not one of the allowed values: %s"
% (self.name, value, ",".join(map(str, self.allowed)))
)
# So value must be an iterable of str, e.g. multiple selection
elif not all(c in self.allowed for c in value):
not_allowed = [c for c in value if not c in self.allowed]
raise ValueError(
"%s=%s is not one of the allowed values: %s"
% (self.name, not_allowed, ",".join(map(str, self.allowed)))
)
return value
class SDMXDataflows(Catalog):
"""
catalog of dataflows for a given SDMX source
"""
version = __version__
container = "catalog"
partition_access = False
def _make_entries_container(self):
return LazyDict(self._make_dataflow_entry)
def _load(self):
# read metadata on dataflows
self.name = self.metadata["source_id"] + "_SDMX_dataflows"
# Request dataflows from remote SDMX service
self.req = sdmx.Request(self.metadata["source_id"], **self.storage_options)
# get full list of dataflows
self._flows_msg = self.req.dataflow()
# to mapping from names to IDs for later back-translation
# We use this catalog to store 2 entries per dataflow: ID and# human-readable name
self.name2id = {}
for dataflow in self._flows_msg.dataflow.values():
flow_id, flow_name = dataflow.id, str(dataflow.name)
# make 2 entries per dataflow using its ID and name
self._entries[flow_id] = None
self._entries[flow_name] = None
self.name2id[flow_name] = flow_id
def _make_dataflow_entry(self, flow_id):
# if flow_id is actually its name, get the real id
if flow_id in self.name2id:
flow_id = self.name2id[flow_id]
# Download metadata on specified dataflow
flow_msg = self.req.dataflow(flow_id)
flow = flow_msg.dataflow[flow_id]
dsd = flow.structure
descr = str(flow.name)
metadata = self.metadata.copy()
metadata["dataflow_id"] = flow_id
metadata["structure_id"] = dsd.id
# Make user params for coded dimensions
# Check for any content constraints to codelists
if hasattr(flow_msg, "constraint") and flow_msg.constraint:
constraint = (
next(iter(flow_msg.constraint.values())).data_content_region[0].member
)
else:
constraint = None
params = []
# params for coded dimensions
for dim in dsd.dimensions:
lr = dim.local_representation
# only dimensions with enumeration, i.e. where values are codes
if lr.enumerated:
ci = dim.concept_identity
# Get code ID and name as its description
if constraint and dim.id in constraint:
codes_iter = (
c
for c in lr.enumerated.items.values()
if c in constraint[dim.id]
)
else:
codes_iter = lr.enumerated.items.values()
codes = {*chain(*((c.id, str(c.name)) for c in codes_iter))}
# allow "" to indicate wild-carded dimension
codes.add(NOT_SPECIFIED)
p = UserParameter(
name=dim.id,
description=str(ci.name),
type="str",
allowed=codes,
default=NOT_SPECIFIED,
)
params.append(p)
# Try to retrieve ID of time and freq dimensions for DataFrame index
dim_candidates = [d.id for d in dsd.dimensions if "TIME" in d.id]
try:
time_dim_id = dim_candidates[0]
except IndexError:
time_dim_id = NOT_SPECIFIED
# Ffrequency for period index generation
dim_candidates = [p.name for p in params if "FREQ" in p.name]
try:
freq_dim_id = dim_candidates[0]
except IndexError:
freq_dim_id = NOT_SPECIFIED
# params for startPeriod and endPeriod
year = date.today().year
params.extend(
[
UserParameter(
name="startPeriod",
description="startPeriod",
type="datetime",
default=str(year - 1),
),
UserParameter(
name="endPeriod", description="endPeriod", type="datetime"
),
UserParameter(
name="dtype",
description="""data type for pandas.DataFrame. See pandas docs
for allowed values.
Default is '' which translates to 'float64'.""",
type="str",
),
UserParameter(
name="attributes",
description="""Include any attributes alongside observations
in the DataFrame. See pandasdmx docx for details.
Examples: 'osgd' for all attributes, or
'os': only attributes at observation and series level.""",
type="str",
),
UserParameter(
name="index_type",
description="""Type of pandas Series/DataFrame index""",
type="str",
allowed=["object", "datetime", "period"],
default="object",
),
UserParameter(
name="freq_dim",
description="""To generate PeriodIndex (index_type='period')
Default is set based on heuristics.""",
type="str",
default=freq_dim_id,
),
UserParameter(
name="time_dim",
description="""To generate datetime or period index.
Ignored if index_type='object'.""",
type="str",
default=time_dim_id,
),
]
)
args = {p.name: f"{{{{{p.name}}}}}" for p in params}
args["storage_options"] = self.storage_options
return LocalCatalogEntry(
name=flow_id,
description=descr,
driver=SDMXData,
direct_access=True,
cache=[],
parameters=params,
args=args,
metadata=metadata,
catalog_dir="",
getenv=False,
getshell=False,
catalog=self,
)
@reload_on_change
def search(self, text):
words = text.lower().split()
cat = SDMXDataflows(
name=self.name + "_search",
description=self.description,
ttl=self.ttl,
getenv=self.getenv,
getshell=self.getshell,
metadata=(self.metadata or {}).copy(),
storage_options=self.storage_options,
)
cat.metadata["search"] = {"text": text, "upstream": self.name}
cat.cat = self
cat._entries._dict.clear()
keys = [
*chain.from_iterable(
(self.name2id[k], k)
for k in self
if any(word in k.lower() for word in words)
)
]
cat._entries.update({k: None for k in keys})
return cat
def filter(self, func):
raise NotImplemented
class SDMXData(intake.source.base.DataSource):
"""
Driver for SDMX data sets of a given SDMX dataflow
"""
version = __version__
name = "sdmx_dataset"
container = "dataframe"
partition_access = True
def __init__(self, metadata=None, **kwargs):
super(SDMXData, self).__init__(metadata=metadata)
self.name = self.metadata["dataflow_id"]
self.req = sdmx.Request(self.metadata["source_id"], **self.storage_options)
self.kwargs = kwargs
def read(self):
# construct key
key_ids = (
p.name for p in self.entry._user_parameters if isinstance(p, SDMXCodeParam)
)
key = {i: self.kwargs[i] for i in key_ids if self.kwargs[i]}
# params for request. Currently, only start- and endPeriod are supported
params = {k: str(self.kwargs[k].year) for k in ["startPeriod", "endPeriod"]}
# remove endPeriod if it is prior to startPeriod (which is the default)
if params["endPeriod"] < params["startPeriod"]:
del params["endPeriod"]
# Now request the data via HTTP
# TODO: handle Request.get kwargs eg. fromfile, timeout.
data_msg = self.req.data(self.metadata["dataflow_id"], key=key, params=params)
# get writer config.
# Capture only non-empty values as these will be filled by the writer
writer_config = {
k: self.kwargs[k] for k in ["dtype", "attributes"] if self.kwargs[k]
}
# construct args to conform to writer API
index_type = self.kwargs["index_type"]
freq_dim = self.kwargs["freq_dim"]
time_dim = self.kwargs["time_dim"]
if index_type == "datetime":
writer_config["datetime"] = True if freq_dim == NOT_SPECIFIED else freq_dim
elif index_type == "period":
datetime = {}
datetime["freq"] = True if freq_dim == NOT_SPECIFIED else freq_dim
datetime["dim"] = True if time_dim == NOT_SPECIFIED else time_dim
writer_config["datetime"] = datetime
# generate the Series or dataframe
self._dataframe = data_msg.to_pandas(**writer_config)
return self._dataframe
def _close(self):
self._dataframe = None
| 36.805085
| 90
| 0.54939
| 12,636
| 0.969837
| 0
| 0
| 801
| 0.061478
| 0
| 0
| 3,309
| 0.253972
|
5910779f16295dd8d8929f180e23470f2321f629
| 1,388
|
py
|
Python
|
apps/exp/afe/afe_bfcc.py
|
yt7589/mgs
|
2faae1b69e6d4cde63afb9b2432b1bf49ebdd770
|
[
"Apache-2.0"
] | null | null | null |
apps/exp/afe/afe_bfcc.py
|
yt7589/mgs
|
2faae1b69e6d4cde63afb9b2432b1bf49ebdd770
|
[
"Apache-2.0"
] | null | null | null |
apps/exp/afe/afe_bfcc.py
|
yt7589/mgs
|
2faae1b69e6d4cde63afb9b2432b1bf49ebdd770
|
[
"Apache-2.0"
] | null | null | null |
#
#import scipy
#from scipy import io as sio
import scipy.io.wavfile
from ext.spafe.utils import vis
from ext.spafe.features.bfcc import bfcc
class AfeBfcc:
@staticmethod
def extract_bfcc(wav_file):
print('获取BFCC特征')
num_ceps = 13
low_freq = 0
high_freq = 2000
nfilts = 24
nfft = 512
dct_type = 2,
use_energy = False,
lifter = 5
normalize = False
# read wav
fs, sig_raw = scipy.io.wavfile.read(wav_file)
sig = sig_raw #[:, :1] #.reshape((sig_raw.shape[0],))
print('fs: {0}\r\n{1}\r\n***********'.format(type(fs), fs))
print('sig: {0}\r\n{1}\r\n******************'.format(sig.shape, sig))
# compute features
bfccs = bfcc(sig=sig,
fs=fs,
num_ceps=num_ceps,
nfilts=nfilts,
nfft=nfft,
low_freq=low_freq,
high_freq=high_freq,
dct_type=dct_type,
use_energy=use_energy,
lifter=lifter,
normalize=normalize)
print('step 1')
# visualize spectogram
vis.spectogram(sig, fs)
print('step 2')
# visualize features
vis.visualize_features(bfccs, 'BFCC Index', 'Frame Index')
print('step 3')
| 30.844444
| 77
| 0.50072
| 1,253
| 0.897564
| 0
| 0
| 1,234
| 0.883954
| 0
| 0
| 289
| 0.20702
|
591250889987fba85313b14511149afd485e1e41
| 268
|
py
|
Python
|
rustplus/exceptions/exceptions.py
|
fieu/rustplus
|
d1e82a7a32988d48ce2c3fd386f464bd48f50385
|
[
"MIT"
] | 1
|
2021-08-10T12:59:42.000Z
|
2021-08-10T12:59:42.000Z
|
rustplus/exceptions/exceptions.py
|
fieu/rustplus
|
d1e82a7a32988d48ce2c3fd386f464bd48f50385
|
[
"MIT"
] | null | null | null |
rustplus/exceptions/exceptions.py
|
fieu/rustplus
|
d1e82a7a32988d48ce2c3fd386f464bd48f50385
|
[
"MIT"
] | null | null | null |
class Error(Exception):
"""Base class for other exceptions"""
pass
class ClientError(Error):
"""Raised when the client details are not valid"""
pass
class ImageError(Error):
"""Raised when the Image Returned is not valid"""
pass
| 20.615385
| 55
| 0.641791
| 256
| 0.955224
| 0
| 0
| 0
| 0
| 0
| 0
| 136
| 0.507463
|
591491ff550ba32d4e2ae2cbc52705d6ad0c7c72
| 4,673
|
py
|
Python
|
notifier_bot.py
|
maticardenas/football_api_notif
|
81f9e265d4effb7545e3d9ad80ee1109cd9b8edf
|
[
"MIT"
] | null | null | null |
notifier_bot.py
|
maticardenas/football_api_notif
|
81f9e265d4effb7545e3d9ad80ee1109cd9b8edf
|
[
"MIT"
] | null | null | null |
notifier_bot.py
|
maticardenas/football_api_notif
|
81f9e265d4effb7545e3d9ad80ee1109cd9b8edf
|
[
"MIT"
] | null | null | null |
import logging
from datetime import date
from telegram import Update
from telegram.ext import ApplicationBuilder, CommandHandler
from config.notif_config import NotifConfig
from src.emojis import Emojis
from src.team_fixtures_manager import TeamFixturesManager
from src.telegram_bot.bot_commands_handler import NextAndLastMatchCommandHandler, NotifierBotCommandsHandler
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
async def start(update: Update, context):
await context.bot.send_photo(
chat_id=update.effective_chat.id,
photo="https://media.api-sports.io/football/players/154.png",
caption=f"{Emojis.WAVING_HAND.value} Hola {update.effective_user.first_name}, soy FootballNotifier bot!\n\n"
f"{Emojis.JOYSTICK.value} /help - Chequeá mis comandos disponibles ;) \n\n"
f"{Emojis.GOAT.value} {Emojis.ARGENTINA.value} Vamos Messi!",
parse_mode="HTML",
)
async def help(update: Update, context):
text = (
f"{Emojis.WAVING_HAND.value}Hola {update.effective_user.first_name}!\n\n"
f" {Emojis.JOYSTICK.value} Estos son mis comandos disponibles (por ahora):\n\n"
f"• /next_match <team>: próximo partido del equipo.\n"
f"• /last_match <team>: último partido jugado del equipo.\n"
f"• /available_teams: equipos disponibles."
)
await context.bot.send_message(chat_id=update.effective_chat.id, text=text)
async def available_teams(update: Update, context):
notifier_commands_handler = NotifierBotCommandsHandler()
text = (
f"{Emojis.WAVING_HAND.value}Hola {update.effective_user.first_name}!\n\n"
f" {Emojis.TELEVISION.value} Estos son los equipos disponibles:\n\n"
f"{notifier_commands_handler.available_teams_text()}"
)
await context.bot.send_message(chat_id=update.effective_chat.id, text=text)
async def next_match(update: Update, context):
command_handler = NextAndLastMatchCommandHandler(context.args)
validated_input = command_handler.validate_command_input()
if validated_input:
await context.bot.send_message(
chat_id=update.effective_chat.id, text=validated_input
)
else:
team = command_handler.get_managed_team(context.args[0])
current_season = date.today().year
team_fixtures_manager = TeamFixturesManager(current_season, team.id)
text, photo = team_fixtures_manager.get_next_team_fixture_text(
update.effective_user.first_name
)
if photo:
await context.bot.send_photo(
chat_id=update.effective_chat.id,
photo=photo,
caption=text,
parse_mode="HTML",
)
else:
context.bot.send_message(
chat_id=update.effective_chat.id,
text=text,
parse_mode="HTML",
)
async def last_match(update: Update, context):
command_handler = NextAndLastMatchCommandHandler(context.args)
validated_input = command_handler.validate_command_input()
if validated_input:
await context.bot.send_message(
chat_id=update.effective_chat.id, text=validated_input
)
else:
team = command_handler.get_managed_team(context.args[0])
current_season = date.today().year
team_fixtures_manager = TeamFixturesManager(current_season, team.id)
text, photo = team_fixtures_manager.get_last_team_fixture_text(
update.effective_user.first_name
)
if photo:
await context.bot.send_photo(
chat_id=update.effective_chat.id,
photo=photo,
caption=text,
parse_mode="HTML",
)
else:
context.bot.send_message(
chat_id=update.effective_chat.id,
text=text,
parse_mode="HTML",
)
if __name__ == "__main__":
application = ApplicationBuilder().token(NotifConfig.TELEGRAM_TOKEN).build()
start_handler = CommandHandler("start", start)
next_match_handler = CommandHandler("next_match", next_match)
last_match_handler = CommandHandler("last_match", last_match)
available_teams_handler = CommandHandler("available_teams", available_teams)
help_handler = CommandHandler("help", help)
application.add_handler(start_handler)
application.add_handler(next_match_handler)
application.add_handler(last_match_handler)
application.add_handler(help_handler)
application.add_handler(available_teams_handler)
application.run_polling()
| 37.685484
| 116
| 0.684571
| 0
| 0
| 0
| 0
| 0
| 0
| 3,497
| 0.746903
| 949
| 0.202691
|
59166d910964300c77243a402c6b75bc3f352d74
| 1,517
|
py
|
Python
|
homeassistant_cli/plugins/event.py
|
dotlambda/home-assistant-cli
|
e8c5a493ca902a739a357d3053a2f09d589e9be1
|
[
"Apache-2.0"
] | null | null | null |
homeassistant_cli/plugins/event.py
|
dotlambda/home-assistant-cli
|
e8c5a493ca902a739a357d3053a2f09d589e9be1
|
[
"Apache-2.0"
] | null | null | null |
homeassistant_cli/plugins/event.py
|
dotlambda/home-assistant-cli
|
e8c5a493ca902a739a357d3053a2f09d589e9be1
|
[
"Apache-2.0"
] | null | null | null |
"""Edit plugin for Home Assistant CLI (hass-cli)."""
import json as json_
import logging
import click
import homeassistant_cli.autocompletion as autocompletion
from homeassistant_cli.cli import pass_context
from homeassistant_cli.config import Configuration
from homeassistant_cli.helper import raw_format_output, req_raw
import yaml
_LOGGING = logging.getLogger(__name__)
@click.group('event')
@pass_context
def cli(ctx):
"""Interact with events."""
@cli.command()
@click.argument( # type: ignore
'event', required=True, autocompletion=autocompletion.events
)
@click.option(
'--json',
help="Raw JSON state to use for event. Overrides any other state"
"values provided.",
)
@pass_context
def fire(ctx: Configuration, event, json):
"""Fire event in Home Assistant."""
if json:
click.echo("Fire {}".format(event))
response = req_raw(ctx, 'post', 'events/{}'.format(event), json)
response.raise_for_status()
else:
existing = raw_format_output(ctx.output, {})
new = click.edit(existing, extension='.{}'.format(ctx.output))
if new is not None:
click.echo("Fire {}".format(event))
if ctx.output == 'yaml':
new = json_.dumps(yaml.load(new))
response = req_raw(ctx, 'post', 'events/{}'.format(event), new)
response.raise_for_status()
else:
click.echo("No edits/changes.")
return
ctx.echo(raw_format_output(ctx.output, response.json()))
| 29.173077
| 75
| 0.661173
| 0
| 0
| 0
| 0
| 1,136
| 0.748846
| 0
| 0
| 310
| 0.204351
|
59177fedfb201ef7cf401094e43b1d49ac1b2c09
| 8,576
|
py
|
Python
|
events/models.py
|
Strategy-Tap/Novizi-BackEnd
|
536edde68dc79ad5467f2dbb0931a56930a4edea
|
[
"MIT"
] | null | null | null |
events/models.py
|
Strategy-Tap/Novizi-BackEnd
|
536edde68dc79ad5467f2dbb0931a56930a4edea
|
[
"MIT"
] | 4
|
2021-04-08T21:23:49.000Z
|
2022-03-12T00:44:54.000Z
|
events/models.py
|
Strategy-Tap/Novizi-BackEnd
|
536edde68dc79ad5467f2dbb0931a56930a4edea
|
[
"MIT"
] | 1
|
2020-06-12T16:08:46.000Z
|
2020-06-12T16:08:46.000Z
|
"""Collection of model."""
from typing import Any
from django.conf import settings
from django.db import models
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.utils.translation import gettext_lazy as _
from djgeojson.fields import PointField
from .utils import get_read_time, unique_slug
def event_upload_to(instance: "Event", filename: str) -> str:
"""A help Function to change the image upload path.
Args:
instance: django model
filename: the uploaded file name
Returns:
path in string format
"""
return f"images/events/cover/{instance.title}/{filename}"
class Tag(models.Model):
"""Reference tag model."""
name = models.CharField(verbose_name=_("name"), max_length=200, unique=True)
class Meta:
"""Meta data."""
verbose_name = _("tag")
verbose_name_plural = _("tags")
def __str__(self: "Tag") -> str:
"""It return readable name for the model."""
return f"{self.name}"
def total_events(self: "Tag") -> int:
"""Getting total of events for the tag."""
return self.events.count()
total_events.short_description = _("Events")
total_events.int = 0
class Event(models.Model):
"""Reference event model."""
title = models.CharField(verbose_name=_("title"), max_length=400)
description = models.TextField(verbose_name=_("description"))
read_time = models.IntegerField(default=0, verbose_name=_("read time"))
slug = models.SlugField(verbose_name=_("slug"), unique=True, blank=True)
event_date = models.DateTimeField(verbose_name=_("event date"))
total_guest = models.PositiveIntegerField(
verbose_name=_("total of guest"), default=1
)
hosted_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name=_("hosted by"),
on_delete=models.CASCADE,
related_name="events",
db_index=True,
)
cover = models.ImageField(
verbose_name=_("cover"), blank=True, null=True, upload_to=event_upload_to
)
tags = models.ManyToManyField(
to=Tag, verbose_name=_("tags"), related_name="events", blank=True
)
organizers = models.ManyToManyField(
to=settings.AUTH_USER_MODEL,
verbose_name=_("organizers"),
related_name="events_organizers",
blank=True,
)
created_at = models.DateTimeField(verbose_name=_("created at"), auto_now_add=True)
updated_at = models.DateTimeField(verbose_name=_("updated at"), auto_now=True)
geom = PointField(verbose_name=_("geo location"))
class Meta:
"""Meta data."""
verbose_name = _("event")
verbose_name_plural = _("events")
def __str__(self: "Event") -> str:
"""It return readable name for the model."""
return f"{self.title}"
def total_attendees(self: "Event") -> int:
"""Getting total of attendees for the event."""
return self.attendees.count()
def available_place(self: "Event") -> int:
"""Getting total of available place for the event."""
return self.total_guest - self.attendees.count()
def total_attended(self: "Event") -> int:
"""Getting total of people who actual attended for the event."""
return self.attendees.filter(has_attended=True).count()
def total_not_attended(self: "Event") -> int:
"""Getting total of people who didn't attended for the event."""
return self.attendees.filter(has_attended=False).count()
def total_sessions(self: "Event") -> int:
"""Getting total of sessions in event."""
return self.sessions.count()
def total_draft_sessions(self: "Event") -> int:
"""Getting total of draft sessions in event."""
return self.sessions.filter(status="Draft").count()
def total_accepted_sessions(self: "Event") -> int:
"""Getting total of accepted sessions in event."""
return self.sessions.filter(status="Accepted").count()
def total_denied_sessions(self: "Event") -> int:
"""Getting total of denied sessions in event."""
return self.sessions.filter(status="Denied").count()
def total_talk(self: "Event") -> int:
"""Getting total of talk in event."""
return self.sessions.filter(session_type="Talk", status="Accepted").count()
def total_lighting_talk(self: "Event") -> int:
"""Getting total of lighting talk in event."""
return self.sessions.filter(
session_type="Lighting Talk", status="Accepted"
).count()
def total_workshop(self: "Event") -> int:
"""Getting total of workshop in event."""
return self.sessions.filter(session_type="WorkShop", status="Accepted").count()
total_sessions.short_description = _("Sessions")
total_draft_sessions.short_description = _("Draft Sessions")
total_accepted_sessions.short_description = _("Accepted Sessions")
total_denied_sessions.short_description = _("Denied Sessions")
total_talk.short_description = _("Talk")
total_lighting_talk.short_description = _("Lighting Talk")
total_workshop.short_description = _("Workshop")
total_attendees.short_description = _("Attendees")
total_attended.short_description = _("Has Attended")
total_not_attended.short_description = _("Has Not Attended")
available_place.short_description = _("Available Place")
class Attendee(models.Model):
"""Reference attendee model."""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name=_("user"),
on_delete=models.CASCADE,
related_name="attendees",
db_index=True,
)
events = models.ForeignKey(
Event,
verbose_name=_("events"),
on_delete=models.CASCADE,
related_name="attendees",
db_index=True,
)
has_attended = models.BooleanField(
verbose_name=_("has attended"), blank=True, null=True
)
created_at = models.DateTimeField(verbose_name=_("created at"), auto_now_add=True)
updated_at = models.DateTimeField(verbose_name=_("updated at"), auto_now=True)
class Meta:
"""Meta data."""
verbose_name = _("attendee")
verbose_name_plural = _("attendees")
def __str__(self: "Attendee") -> str:
"""It return readable name for the model."""
return f"{self.user}"
class Session(models.Model):
"""Reference session model."""
choose_category = (
("Talk", _("Talk")),
("Lighting Talk", _("Lighting Talk")),
("WorkShop", _("WorkShop")),
)
choose_status = (
("Draft", _("Draft")),
("Accepted", _("Accepted")),
("Denied", _("Denied")),
)
title = models.CharField(verbose_name=_("title"), max_length=400)
description = models.TextField(verbose_name=_("description"))
session_type = models.CharField(
max_length=100, choices=choose_category, verbose_name=_("session type")
)
slug = models.SlugField(verbose_name=_("slug"), unique=True, blank=True)
events = models.ForeignKey(
Event,
verbose_name=_("events"),
on_delete=models.CASCADE,
related_name="sessions",
db_index=True,
)
status = models.CharField(
verbose_name=_("status"), max_length=10, choices=choose_status, default="Draft"
)
proposed_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name=_("proposed by"),
on_delete=models.CASCADE,
related_name="sessions",
db_index=True,
)
created_at = models.DateTimeField(verbose_name=_("created at"), auto_now_add=True)
updated_at = models.DateTimeField(verbose_name=_("updated at"), auto_now=True)
class Meta:
"""Meta data."""
verbose_name = _("session")
verbose_name_plural = _("sessions")
def __str__(self: "Session") -> str:
"""It return readable name for the model."""
return f"{self.title}"
@receiver(pre_save, sender=Session)
def session_slug_creator(sender: Session, instance: Session, **kwargs: Any) -> None:
"""Single for Session."""
if not instance.slug:
instance.slug = unique_slug(title=instance.title)
@receiver(pre_save, sender=Event)
def event_creator(sender: Event, instance: Event, **kwargs: Any) -> None:
"""Single for Event."""
if not instance.slug:
instance.slug = unique_slug(title=instance.title)
if instance.description:
instance.read_time = get_read_time(words=instance.description)
| 30.519573
| 87
| 0.653451
| 7,349
| 0.856926
| 0
| 0
| 554
| 0.064599
| 0
| 0
| 2,222
| 0.259095
|
5917ad709fbc60f4121dfd8d315e221b94423156
| 1,938
|
py
|
Python
|
src/nlp/classification/tf1/traditional_cls/bert_embedding.py
|
wu-uw/OpenCompetition
|
9aa9d7a50ada1deb653d295dd8a7fe46321b9094
|
[
"Apache-2.0"
] | 15
|
2019-12-22T14:26:47.000Z
|
2020-11-02T10:57:37.000Z
|
src/nlp/classification/tf1/traditional_cls/bert_embedding.py
|
GT-JLU/OpenCompetition
|
5262fc5fa7efd7b483c1dc09cb7747dd75e37175
|
[
"Apache-2.0"
] | 2
|
2020-02-03T07:10:11.000Z
|
2020-02-11T16:38:56.000Z
|
src/nlp/classification/tf1/traditional_cls/bert_embedding.py
|
GT-JLU/OpenCompetition
|
5262fc5fa7efd7b483c1dc09cb7747dd75e37175
|
[
"Apache-2.0"
] | 12
|
2020-01-06T14:16:52.000Z
|
2020-05-23T14:12:30.000Z
|
# coding = utf-8
import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from activation_function import gelu, swish, ACT2FN
import logging
logger = logging.getLogger(__name__)
from bert_layernorm import BertLayerNorm
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=0)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size, padding_idx=0)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
| 35.888889
| 114
| 0.750774
| 1,564
| 0.807018
| 0
| 0
| 0
| 0
| 0
| 0
| 230
| 0.118679
|
5918b94351e68baf0dc788cb62fb44c5a012741d
| 2,276
|
py
|
Python
|
raster_compare/base/raster_data_difference.py
|
jomey/raster_compare
|
5199005d01f569e187e944d62af0ea70c383d16a
|
[
"MIT"
] | 1
|
2021-11-13T12:59:53.000Z
|
2021-11-13T12:59:53.000Z
|
raster_compare/base/raster_data_difference.py
|
jomey/raster_compare
|
5199005d01f569e187e944d62af0ea70c383d16a
|
[
"MIT"
] | null | null | null |
raster_compare/base/raster_data_difference.py
|
jomey/raster_compare
|
5199005d01f569e187e944d62af0ea70c383d16a
|
[
"MIT"
] | null | null | null |
import numpy as np
from osgeo import gdal
from .median_absolute_deviation import MedianAbsoluteDeviation
from .raster_file import RasterFile
class RasterDataDifference(object):
GDAL_DRIVER = gdal.GetDriverByName('GTiff')
def __init__(self, lidar, sfm, band_number):
self.lidar = RasterFile(lidar, band_number)
self.sfm = RasterFile(sfm, band_number)
self._aspect = None
self.band_values = self.sfm.band_values() - self.lidar.band_values()
self.band_mask = self.band_values.mask
self.mad = MedianAbsoluteDeviation(self.band_values.compressed())
self._slope = None
@property
def band_values(self):
return self._band_values
@band_values.setter
def band_values(self, value):
self._band_values = value
@property
def band_mask(self):
return self._band_mask
@band_mask.setter
def band_mask(self, value):
self._band_mask = np.copy(value)
def band_outlier_max(self):
return self.mad.data_median + self.mad.standard_deviation(2)
def band_outlier_min(self):
return self.mad.data_median - self.mad.standard_deviation(2)
@property
def band_filtered(self):
self.band_values.mask = np.ma.mask_or(
self.band_mask,
np.ma.masked_outside(
self.band_unfiltered,
self.band_outlier_min(),
self.band_outlier_max()
).mask
)
return self.band_values
@property
def band_unfiltered(self):
self.band_values.mask = self.band_mask
return self.band_values
@property
def band_outliers(self):
self.band_values.mask = np.ma.mask_or(
self.band_mask,
np.ma.masked_inside(
self.band_unfiltered,
self.band_outlier_min(),
self.band_outlier_max()
).mask
)
return self.band_values
@property
def aspect(self):
if self._aspect is None:
self._aspect = self.sfm.aspect - self.lidar.aspect
return self._aspect
@property
def slope(self):
if self._slope is None:
self._slope = self.sfm.slope - self.lidar.slope
return self._slope
| 27.756098
| 76
| 0.629174
| 2,131
| 0.936292
| 0
| 0
| 1,387
| 0.609402
| 0
| 0
| 7
| 0.003076
|
591c39e1d0a64ea2423fa974b75251f4ec29ed0a
| 3,386
|
py
|
Python
|
dbcontext/Module/construct.py
|
jimmg35/Sensor_Crawling_v2
|
5154885cad5173127539487a2fcf2140a4409b8b
|
[
"MIT"
] | null | null | null |
dbcontext/Module/construct.py
|
jimmg35/Sensor_Crawling_v2
|
5154885cad5173127539487a2fcf2140a4409b8b
|
[
"MIT"
] | null | null | null |
dbcontext/Module/construct.py
|
jimmg35/Sensor_Crawling_v2
|
5154885cad5173127539487a2fcf2140a4409b8b
|
[
"MIT"
] | null | null | null |
import time
import hmac
import base64
import datetime
import schedule
import psycopg2
from time import mktime
from hashlib import sha1
from pprint import pprint
from requests import request
from datetime import datetime
from wsgiref.handlers import format_date_time
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
class Constructor():
def __init__(self, DB_list, DB_detail, PGSQLDetail):
# Databases need to be constructed
self.DB_list = DB_list
# Information of Databases and Tables
self.DB_detail = DB_detail
# PostgreSQL server variable
self.user = PGSQLDetail['user']
self.password = PGSQLDetail['password']
self.host = PGSQLDetail['host']
self.port = PGSQLDetail['port']
# Connect to PostgreSQL
self.cursor = self.ConnectToPGSQL(self.user, self.password, self.host, self.port)
# Kill query
self.Kill = '''SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE pid <> pg_backend_pid() AND datname = 'template1';'''
def ConnectToPGSQL(self, user, password, host, port):
'''Connect to PostgreSQL'''
conn = psycopg2.connect(user=user,password=password,host=host,port=port)
conn.autocommit = True
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
print(f'Successfully connected | User:{user}')
cursor = conn.cursor()
return cursor
def ConnectToDatabase(self, database, user, password, host, port):
'''Connect to Database'''
conn = psycopg2.connect(database = database, user=user,password=password,host=host,port=port)
conn.autocommit = True
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
print(f'Successfully connected | User:{user}')
cursor = conn.cursor()
return cursor
def constructDatabases(self):
'''Create Databases'''
print("Initializing Databases...")
self.cursor.execute(self.Kill)
for DB in self.DB_list:
self.cursor.execute("SELECT 1 FROM pg_catalog.pg_database WHERE datname = \'{}\'".format(DB))
exists = self.cursor.fetchone()
if exists == None:
# Create Database
self.cursor.execute("CREATE DATABASE {}".format(DB))
print("Database {} has been created!".format(DB))
else:
print("Database {} is already existed!".format(DB))
def constructTables(self):
'''Iterate through each database and create tables'''
for DB in self.DB_detail.keys():
temp_cursor = self.ConnectToDatabase(DB,self.user,self.password,self.host,self.port)
for table in self.DB_detail[DB].keys():
query = self.TableBuilder(DB, table)
temp_cursor.execute(query)
print("Table {} has been created in {}".format(table, DB))
def TableBuilder(self, DB, table):
'''Helper function of constructTable function'''
query_head = '''CREATE TABLE {} '''.format(table)
query_head += self.DB_detail[DB][table]
#print(query_head)
return query_head
| 33.86
| 137
| 0.60632
| 3,005
| 0.887478
| 0
| 0
| 0
| 0
| 0
| 0
| 794
| 0.234495
|
591d8ee660b8804dd218cc4cd1c5374e204e9abe
| 1,540
|
py
|
Python
|
ppqq/src/qq/main.py
|
xiaomin0322/mypqq
|
8bdd5d1dafb5fa53d65cb732b7372fbcfe9c7c2c
|
[
"Apache-2.0"
] | 1
|
2018-11-11T14:34:53.000Z
|
2018-11-11T14:34:53.000Z
|
ppqq/src/qq/main.py
|
xiaomin0322/mypqq
|
8bdd5d1dafb5fa53d65cb732b7372fbcfe9c7c2c
|
[
"Apache-2.0"
] | null | null | null |
ppqq/src/qq/main.py
|
xiaomin0322/mypqq
|
8bdd5d1dafb5fa53d65cb732b7372fbcfe9c7c2c
|
[
"Apache-2.0"
] | 1
|
2021-02-04T08:46:09.000Z
|
2021-02-04T08:46:09.000Z
|
# -*- coding: utf8 -*-
# 作者:Light.紫.星
# QQ:1311817771
import AndroidQQ,threading,time
from AndroidQQ import Android
class Main:
def __init__(self):
pass
def login(self,qqnum,qqpass):
print 15*"-",("开始登陆QQ:"),qqnum,15*"-"
self.sdk = Android()
self.state = 0
self.code = ""
self.sdk.init(qqnum,qqpass)
#self.sdk.QQ_loadTokenData()
#self.state = self.sdk.Fun_again_Login ()
#print "self.state",self.state
self.state = self.sdk.Fun_Login()
self.do_login()
def do_login(self):
if(self.state == self.sdk.login_state_success):
self.sdk.QQ_online( ); #上线
print "登陆成功"
self.sdk.QQ_sendfriendmsg(635674608,"1234test测试") #发好友消息
self.sdk.QQ_sendgroupmsg(189884897," 测试 test 43 2 1 ") #发群消息
#self.sdk.QQ_offline( ); #下线
# sdk.qq_savetokendata()# 保存二次登陆数据
# 启动线程 (&循环处理消息, , thread_hwnd)
return
elif(self.state == self.sdk.login_state_veriy):
print "需要验证码"
open("vpic.jpg","wb").write(self.sdk.getViery())
code = raw_input("请输入验证码(vpic.jpg):")
self.state = self.sdk.Fun_SendCode(code)
# self.state = self.sdk.Fun_SendCode("abcd")
print "验证码返回",self.state
self.do_login()
else:
print "登陆失败:",self.sdk.getLastError()
if __name__ == "__main__":
qq = Main()
qq.login("634545399","")
| 35
| 75
| 0.546104
| 1,468
| 0.872771
| 0
| 0
| 0
| 0
| 0
| 0
| 588
| 0.349584
|
591f579f62bec7c986797fa9d6cc59de7656817e
| 527
|
py
|
Python
|
util/logger.py
|
code4hk/NewsdiffHK-Backend
|
76ffd933fe9900a0bd2191597a210ddf86d2a8cd
|
[
"MIT"
] | 5
|
2015-03-29T19:19:16.000Z
|
2015-06-20T09:37:39.000Z
|
util/logger.py
|
code4hk/NewsdiffHK-Backend
|
76ffd933fe9900a0bd2191597a210ddf86d2a8cd
|
[
"MIT"
] | 28
|
2015-04-07T13:34:57.000Z
|
2015-05-25T13:30:36.000Z
|
util/logger.py
|
code4hk/NewsdiffHK-Backend
|
76ffd933fe9900a0bd2191597a210ddf86d2a8cd
|
[
"MIT"
] | null | null | null |
from util.env import log_dir
import logging
class MyFormatter(logging.Formatter):
def formatTime(self, record, datefmt=None):
return logging.Formatter.formatTime(self, record, datefmt).replace(',', '.')
def get(name):
log = logging.getLogger(name)
log.setLevel(logging.DEBUG)
formatter = MyFormatter('%(asctime)s:%(levelname)s:%(message)s')
ch = logging.FileHandler(log_dir() + '/news_diff.log')
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
log.addHandler(ch)
return log
| 29.277778
| 84
| 0.70019
| 170
| 0.322581
| 0
| 0
| 0
| 0
| 0
| 0
| 61
| 0.11575
|
59203bba648641256e52153b47303c1f888cf09a
| 1,347
|
py
|
Python
|
src/core/migrations/0096_auto_20191023_1441.py
|
metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3
|
c754d3b1b401906a21640b8eacb6b724a448b31c
|
[
"MIT"
] | null | null | null |
src/core/migrations/0096_auto_20191023_1441.py
|
metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3
|
c754d3b1b401906a21640b8eacb6b724a448b31c
|
[
"MIT"
] | null | null | null |
src/core/migrations/0096_auto_20191023_1441.py
|
metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3
|
c754d3b1b401906a21640b8eacb6b724a448b31c
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.2 on 2019-10-23 14:41
from django.db import migrations, models
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('core', '0095_auto_20191022_1602'),
]
operations = [
migrations.AlterModelOptions(
name='casestudy',
options={'verbose_name_plural': 'case studies'},
),
migrations.RemoveField(
model_name='tag',
name='gps',
),
migrations.AlterField(
model_name='tag',
name='description',
field=tinymce.models.HTMLField(blank=True, null=True, verbose_name='description'),
),
migrations.AlterField(
model_name='tag',
name='hidden',
field=models.BooleanField(db_index=True, default=False, help_text='Mark if tag is superseded/not yet approved/deactivated'),
),
migrations.AlterField(
model_name='tag',
name='parent',
field=models.CharField(blank=True, choices=[(1, 'Publication Types'), (2, 'Metabolism Studies'), (3, 'Countries'), (4, 'Cities'), (5, 'Scales'), (6, 'Flows'), (7, 'Time Horizon'), (9, 'Methodologies'), (10, 'Other')], help_text='This was a previous classification - can be left empty', max_length=2, null=True),
),
]
| 35.447368
| 323
| 0.590943
| 1,232
| 0.914625
| 0
| 0
| 0
| 0
| 0
| 0
| 412
| 0.305865
|
592176ee7d34af8c375b741cef8c2df674d9c4b5
| 2,243
|
py
|
Python
|
piservicebusclient.py
|
nikkh/pi
|
237c0c0effcf69c15c6fb2791c7fd49eb1e254aa
|
[
"Unlicense"
] | null | null | null |
piservicebusclient.py
|
nikkh/pi
|
237c0c0effcf69c15c6fb2791c7fd49eb1e254aa
|
[
"Unlicense"
] | null | null | null |
piservicebusclient.py
|
nikkh/pi
|
237c0c0effcf69c15c6fb2791c7fd49eb1e254aa
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python
import colorsys
from azure.servicebus import ServiceBusService
from azure.servicebus import Message
from blinkt import set_pixel, set_brightness, show, clear
import time
import json
class Payload(object):
def __init__(self, j):
self.__dict__ = json.loads(j)
def snake( r, g, b ):
"This creates a snake effect on the blinkt using the specified colour"
clear()
for count in range(1,20):
print(count)
for i in range(8):
clear()
set_pixel(i, r, g, b)
show()
time.sleep(0.05)
clear()
return;
def rainbow():
clear()
spacing = 360.0 / 16.0
hue = 0
set_brightness(0.1)
for count in range(1,160):
print(count)
hue = int(time.time() * 100) % 360
for x in range(8):
offset = x * spacing
h = ((hue + offset) % 360) / 360.0
r, g, b = [int(c * 255) for c in colorsys.hsv_to_rgb(h, 1.0, 1.0)]
set_pixel(x, r, g, b)
show()
time.sleep(0.001)
return;
set_brightness(0.1)
print('Nicks Raspberry Pi Python Service Bus Client version 0.1')
service_namespace='nixpitest'
key_name = 'RootManageSharedAccessKey' # SharedAccessKeyName from Azure portal
with open('private/keys.txt', 'r') as myfile:
keyval=myfile.read().replace('\n', '')
key_value = keyval # SharedAccessKey from Azure portal
sbs = ServiceBusService(service_namespace,
shared_access_key_name=key_name,
shared_access_key_value=key_value)
sbs.create_queue('testpythonqueue1')
while True:
newmsg = None
newmsg = sbs.receive_queue_message('testpythonqueue1', peek_lock=False)
if newmsg.body is not None:
print ("message: ", newmsg.body, "\n")
p = Payload(newmsg.body)
if p.device: print(p.device)
if p.effect: print(p.effect)
if p.led: print(p.led)
if p.colour: print(p.colour)
if p.state: print(p.state)
if p.effect == 'snake':
if p.colour == 'red':
snake(255,0,0)
elif p.colour == 'green':
snake(0,255,0)
elif p.colour == 'blue':
snake(0,0,255)
if p.effect == 'rainbow':
rainbow()
clear()
time.sleep(1)
| 28.392405
| 78
| 0.602764
| 89
| 0.039679
| 0
| 0
| 0
| 0
| 0
| 0
| 369
| 0.164512
|
59245ad69bc6ea799437abf97159920fe65df34f
| 149
|
py
|
Python
|
exercicio-mundo-1/ex010.py
|
amosxrl/python
|
8399a9c42fdb49184fcfe906f8bce82d1a671667
|
[
"MIT"
] | 2
|
2020-05-06T23:49:20.000Z
|
2020-05-06T23:49:25.000Z
|
exercicio-mundo-1/ex010.py
|
amosxrl/python
|
8399a9c42fdb49184fcfe906f8bce82d1a671667
|
[
"MIT"
] | null | null | null |
exercicio-mundo-1/ex010.py
|
amosxrl/python
|
8399a9c42fdb49184fcfe906f8bce82d1a671667
|
[
"MIT"
] | null | null | null |
print('Quanto dinheiro voce tem na carteiro')
print('-'*20)
re = float(input('R$ '))
dol = re * 0.1874
print('-'*20)
print('US$ {:.2f}'.format(dol))
| 21.285714
| 45
| 0.610738
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 61
| 0.409396
|
59253833cbcb18241d731edddc82a7004e814b3e
| 3,163
|
py
|
Python
|
catkin_ws/build/baxter_core_msgs/cmake/baxter_core_msgs-genmsg-context.py
|
roop-pal/robotic-folding
|
a0e062ac6d23cd07fe10e3f45abc4ba50e533141
|
[
"RSA-MD"
] | null | null | null |
catkin_ws/build/baxter_core_msgs/cmake/baxter_core_msgs-genmsg-context.py
|
roop-pal/robotic-folding
|
a0e062ac6d23cd07fe10e3f45abc4ba50e533141
|
[
"RSA-MD"
] | null | null | null |
catkin_ws/build/baxter_core_msgs/cmake/baxter_core_msgs-genmsg-context.py
|
roop-pal/robotic-folding
|
a0e062ac6d23cd07fe10e3f45abc4ba50e533141
|
[
"RSA-MD"
] | null | null | null |
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/AnalogIOState.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/AnalogIOStates.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/AnalogOutputCommand.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/AssemblyState.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/AssemblyStates.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/CameraControl.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/CameraSettings.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/CollisionAvoidanceState.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/CollisionDetectionState.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/DigitalIOState.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/DigitalIOStates.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/DigitalOutputCommand.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/EndEffectorCommand.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/EndEffectorProperties.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/EndEffectorState.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/EndpointState.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/EndpointStates.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/HeadPanCommand.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/HeadState.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/JointCommand.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/NavigatorState.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/NavigatorStates.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/RobustControllerStatus.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/SEAJointState.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/URDFConfiguration.msg"
services_str = "/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/srv/CloseCamera.srv;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/srv/ListCameras.srv;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/srv/OpenCamera.srv;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/srv/SolvePositionIK.srv"
pkg_name = "baxter_core_msgs"
dependencies_str = "geometry_msgs;sensor_msgs;std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "baxter_core_msgs;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg;geometry_msgs;/opt/ros/kinetic/share/geometry_msgs/cmake/../msg;sensor_msgs;/opt/ros/kinetic/share/sensor_msgs/cmake/../msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| 263.583333
| 2,159
| 0.863737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,980
| 0.942144
|
59269ff1d7149784a5bf3e067f0e6975db562830
| 14,031
|
py
|
Python
|
apps/part_interpolation&replacement/part_replacement.py
|
GuillaumeDufau/3D-point-capsule-networks
|
369206df643edb263d43cf2d05923cf0a26841e5
|
[
"MIT"
] | 283
|
2019-04-14T12:58:54.000Z
|
2022-03-30T11:49:38.000Z
|
apps/part_interpolation&replacement/part_replacement.py
|
LONG-9621/3D-Point-Capsule-Networks
|
161ac9042ca9c048f4b531ae26fe94a29b13e777
|
[
"MIT"
] | 20
|
2019-05-01T05:40:02.000Z
|
2021-11-20T11:15:17.000Z
|
apps/part_interpolation&replacement/part_replacement.py
|
LONG-9621/3D-Point-Capsule-Networks
|
161ac9042ca9c048f4b531ae26fe94a29b13e777
|
[
"MIT"
] | 55
|
2019-04-22T12:14:42.000Z
|
2022-03-25T06:26:36.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 12 17:45:51 2018
@author: zhao
"""
import argparse
import torch
import torch.nn.parallel
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
import sys
import os
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(BASE_DIR, '../../models')))
sys.path.append(os.path.abspath(os.path.join(BASE_DIR, '../../dataloaders')))
import shapenet_part_loader
import matplotlib.pyplot as plt
from pointcapsnet_ae import PointCapsNet,PointCapsNetDecoder
from capsule_seg_net import CapsSegNet
import json
from open3d import *
def main():
blue = lambda x:'\033[94m' + x + '\033[0m'
cat_no={'Airplane': 0, 'Bag': 1, 'Cap': 2, 'Car': 3, 'Chair': 4, 'Earphone': 5,
'Guitar': 6, 'Knife': 7, 'Lamp': 8, 'Laptop': 9, 'Motorbike': 10,
'Mug': 11, 'Pistol': 12, 'Rocket': 13, 'Skateboard': 14, 'Table': 15}
#generate part label one-hot correspondence from the catagory:
dataset_main_path=os.path.abspath(os.path.join(BASE_DIR, '../../dataset'))
oid2cpid_file_name=os.path.join(dataset_main_path, opt.dataset,'shapenetcore_partanno_segmentation_benchmark_v0/shapenet_part_overallid_to_catid_partid.json')
oid2cpid = json.load(open(oid2cpid_file_name, 'r'))
object2setofoid = {}
for idx in range(len(oid2cpid)):
objid, pid = oid2cpid[idx]
if not objid in object2setofoid.keys():
object2setofoid[objid] = []
object2setofoid[objid].append(idx)
all_obj_cat_file = os.path.join(dataset_main_path, opt.dataset, 'shapenetcore_partanno_segmentation_benchmark_v0/synsetoffset2category.txt')
fin = open(all_obj_cat_file, 'r')
lines = [line.rstrip() for line in fin.readlines()]
objcats = [line.split()[1] for line in lines]
# objnames = [line.split()[0] for line in lines]
# on2oid = {objcats[i]:i for i in range(len(objcats))}
fin.close()
colors = plt.cm.tab10((np.arange(10)).astype(int))
blue = lambda x:'\033[94m' + x + '\033[0m'
# load the model for point cpas auto encoder
capsule_net = PointCapsNet(opt.prim_caps_size, opt.prim_vec_size, opt.latent_caps_size, opt.latent_vec_size, opt.num_points)
if opt.model != '':
capsule_net.load_state_dict(torch.load(opt.model))
if USE_CUDA:
capsule_net = torch.nn.DataParallel(capsule_net).cuda()
capsule_net=capsule_net.eval()
# load the model for only decoding
capsule_net_decoder = PointCapsNetDecoder(opt.prim_caps_size, opt.prim_vec_size, opt.latent_caps_size, opt.latent_vec_size, opt.num_points)
if opt.model != '':
capsule_net_decoder.load_state_dict(torch.load(opt.model),strict=False)
if USE_CUDA:
capsule_net_decoder = capsule_net_decoder.cuda()
capsule_net_decoder=capsule_net_decoder.eval()
# load the model for capsule wised part segmentation
caps_seg_net = CapsSegNet(latent_caps_size=opt.latent_caps_size, latent_vec_size=opt.latent_vec_size , num_classes=opt.n_classes)
if opt.part_model != '':
caps_seg_net.load_state_dict(torch.load(opt.part_model))
if USE_CUDA:
caps_seg_net = caps_seg_net.cuda()
caps_seg_net = caps_seg_net.eval()
train_dataset = shapenet_part_loader.PartDataset(classification=False, class_choice=opt.class_choice, npoints=opt.num_points, split='test')
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=4)
# container for ground truth
pcd_gt_source=[]
for i in range(2):
pcd = PointCloud()
pcd_gt_source.append(pcd)
pcd_gt_target=[]
for i in range(2):
pcd = PointCloud()
pcd_gt_target.append(pcd)
# container for ground truth cut and paste
pcd_gt_replace_source=[]
for i in range(2):
pcd = PointCloud()
pcd_gt_replace_source.append(pcd)
pcd_gt_replace_target=[]
for i in range(2):
pcd = PointCloud()
pcd_gt_replace_target.append(pcd)
# container for capsule based part replacement
pcd_caps_replace_source=[]
for i in range(opt.latent_caps_size):
pcd = PointCloud()
pcd_caps_replace_source.append(pcd)
pcd_caps_replace_target=[]
for i in range(opt.latent_caps_size):
pcd = PointCloud()
pcd_caps_replace_target.append(pcd)
# apply a transformation in order to get a better view point
##airplane
rotation_angle=np.pi/2
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
flip_transforms = [[1, 0, 0,-2],[0,cosval, -sinval,1.5],[0,sinval, cosval, 0],[0, 0, 0, 1]]
flip_transforms_r = [[1, 0, 0,2],[0, 1, 0,-1.5],[0, 0, 1,0],[0, 0, 0, 1]]
flip_transform_gt_s = [[1, 0, 0,-3],[0,cosval, -sinval,-1],[0,sinval, cosval, 0],[0, 0, 0, 1]]
flip_transform_gt_t = [[1, 0, 0,-3],[0,cosval, -sinval,1],[0,sinval, cosval, 0],[0, 0, 0, 1]]
flip_transform_gt_re_s = [[1, 0, 0,0],[0,cosval, -sinval,-1],[0,sinval, cosval, 0],[0, 0, 0, 1]]
flip_transform_gt_re_t = [[1, 0, 0,0],[0,cosval, -sinval,1],[0,sinval, cosval, 0],[0, 0, 0, 1]]
flip_transform_caps_re_s = [[1, 0, 0,3],[0,cosval, -sinval,-1],[0,sinval, cosval, 0],[0, 0, 0, 1]]
flip_transform_caps_re_t = [[1, 0, 0,3],[0,cosval, -sinval,1],[0,sinval, cosval, 0],[0, 0, 0, 1]]
colors = plt.cm.tab20((np.arange(20)).astype(int))
part_replace_no=1 # the part that is replaced
for batch_id, data in enumerate(train_dataloader):
points, part_label, cls_label= data
if not (opt.class_choice==None ):
cls_label[:]= cat_no[opt.class_choice]
if(points.size(0)<opt.batch_size):
break
all_model_pcd=PointCloud()
gt_source_list0=[]
gt_source_list1=[]
gt_target_list0=[]
gt_target_list1=[]
for point_id in range(opt.num_points):
if(part_label[0,point_id]==part_replace_no ):
gt_source_list0.append(points[0,point_id,:])
else:
gt_source_list1.append(points[0,point_id,:])
if( part_label[1,point_id]==part_replace_no):
gt_target_list0.append(points[1,point_id,:])
else:
gt_target_list1.append(points[1,point_id,:])
# viz GT with colored part
pcd_gt_source[0].points=Vector3dVector(gt_source_list0)
pcd_gt_source[0].paint_uniform_color([colors[5,0], colors[5,1], colors[5,2]])
pcd_gt_source[0].transform(flip_transform_gt_s)
all_model_pcd+=pcd_gt_source[0]
pcd_gt_source[1].points=Vector3dVector(gt_source_list1)
pcd_gt_source[1].paint_uniform_color([0.8,0.8,0.8])
pcd_gt_source[1].transform(flip_transform_gt_s)
all_model_pcd+=pcd_gt_source[1]
pcd_gt_target[0].points=Vector3dVector(gt_target_list0)
pcd_gt_target[0].paint_uniform_color([colors[6,0], colors[6,1], colors[6,2]])
pcd_gt_target[0].transform(flip_transform_gt_t)
all_model_pcd+=pcd_gt_target[0]
pcd_gt_target[1].points=Vector3dVector(gt_target_list1)
pcd_gt_target[1].paint_uniform_color([0.8,0.8,0.8])
pcd_gt_target[1].transform(flip_transform_gt_t)
all_model_pcd+=pcd_gt_target[1]
# viz replaced GT colored parts
pcd_gt_replace_source[0].points=Vector3dVector(gt_target_list0)
pcd_gt_replace_source[0].paint_uniform_color([colors[6,0], colors[6,1], colors[6,2]])
pcd_gt_replace_source[0].transform(flip_transform_gt_re_s)
all_model_pcd+=pcd_gt_replace_source[0]
pcd_gt_replace_source[1].points=Vector3dVector(gt_source_list1)
pcd_gt_replace_source[1].paint_uniform_color([0.8,0.8,0.8])
pcd_gt_replace_source[1].transform(flip_transform_gt_re_s)
all_model_pcd+=pcd_gt_replace_source[1]
pcd_gt_replace_target[0].points=Vector3dVector(gt_source_list0)
pcd_gt_replace_target[0].paint_uniform_color([colors[5,0], colors[5,1], colors[5,2]])
pcd_gt_replace_target[0].transform(flip_transform_gt_re_t)
all_model_pcd+=pcd_gt_replace_target[0]
pcd_gt_replace_target[1].points=Vector3dVector(gt_target_list1)
pcd_gt_replace_target[1].paint_uniform_color([0.8,0.8,0.8])
pcd_gt_replace_target[1].transform(flip_transform_gt_re_t)
all_model_pcd+=pcd_gt_replace_target[1]
#capsule based replacement
points_ = Variable(points)
points_ = points_.transpose(2, 1)
if USE_CUDA:
points_ = points_.cuda()
latent_caps, reconstructions= capsule_net(points_)
reconstructions=reconstructions.transpose(1,2).data.cpu()
cur_label_one_hot = np.zeros((2, 16), dtype=np.float32)
for i in range(2):
cur_label_one_hot[i, cls_label[i]] = 1
cur_label_one_hot=torch.from_numpy(cur_label_one_hot).float()
expand =cur_label_one_hot.unsqueeze(2).expand(2,16,opt.latent_caps_size).transpose(1,2)
latent_caps, expand = Variable(latent_caps), Variable(expand)
latent_caps,expand = latent_caps.cuda(), expand.cuda()
# predidt the part label of each capsule
latent_caps_with_one_hot=torch.cat((latent_caps,expand),2)
latent_caps_with_one_hot,expand=Variable(latent_caps_with_one_hot),Variable(expand)
latent_caps_with_one_hot,expand=latent_caps_with_one_hot.cuda(),expand.cuda()
latent_caps_with_one_hot=latent_caps_with_one_hot.transpose(2, 1)
output_digit=caps_seg_net(latent_caps_with_one_hot)
for i in range (2):
iou_oids = object2setofoid[objcats[cls_label[i]]]
non_cat_labels = list(set(np.arange(50)).difference(set(iou_oids)))
mini = torch.min(output_digit[i,:,:])
output_digit[i,:, non_cat_labels] = mini - 1000
pred_choice = output_digit.data.cpu().max(2)[1]
#
# saved the index of capsules which are assigned to current part
part_no=iou_oids[part_replace_no]
part_viz=[]
for caps_no in range (opt.latent_caps_size):
if(pred_choice[0,caps_no]==part_no and pred_choice[1,caps_no]==part_no):
part_viz.append(caps_no)
#replace the capsules
latent_caps_replace=latent_caps.clone()
latent_caps_replace= Variable(latent_caps_replace)
latent_caps_replace = latent_caps_replace.cuda()
for j in range (len(part_viz)):
latent_caps_replace[0,part_viz[j],]=latent_caps[1,part_viz[j],]
latent_caps_replace[1,part_viz[j],]=latent_caps[0,part_viz[j],]
reconstructions_replace = capsule_net_decoder(latent_caps_replace)
reconstructions_replace=reconstructions_replace.transpose(1,2).data.cpu()
for j in range(opt.latent_caps_size):
current_patch_s=torch.zeros(int(opt.num_points/opt.latent_caps_size),3)
current_patch_t=torch.zeros(int(opt.num_points/opt.latent_caps_size),3)
for m in range(int(opt.num_points/opt.latent_caps_size)):
current_patch_s[m,]=reconstructions_replace[0][opt.latent_caps_size*m+j,]
current_patch_t[m,]=reconstructions_replace[1][opt.latent_caps_size*m+j,]
pcd_caps_replace_source[j].points = Vector3dVector(current_patch_s)
pcd_caps_replace_target[j].points = Vector3dVector(current_patch_t)
part_no=iou_oids[part_replace_no]
if(j in part_viz):
pcd_caps_replace_source[j].paint_uniform_color([colors[6,0], colors[6,1], colors[6,2]])
pcd_caps_replace_target[j].paint_uniform_color([colors[5,0], colors[5,1], colors[5,2]])
else:
pcd_caps_replace_source[j].paint_uniform_color([0.8,0.8,0.8])
pcd_caps_replace_target[j].paint_uniform_color([0.8,0.8,0.8])
pcd_caps_replace_source[j].transform(flip_transform_caps_re_s)
pcd_caps_replace_target[j].transform(flip_transform_caps_re_t)
all_model_pcd+=pcd_caps_replace_source[j]
all_model_pcd+=pcd_caps_replace_target[j]
draw_geometries([all_model_pcd])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=2, help='input batch size')
parser.add_argument('--prim_caps_size', type=int, default=1024, help='number of primary point caps')
parser.add_argument('--prim_vec_size', type=int, default=16, help='scale of primary point caps')
parser.add_argument('--latent_caps_size', type=int, default=64, help='number of latent caps')
parser.add_argument('--latent_vec_size', type=int, default=64, help='scale of latent caps')
parser.add_argument('--num_points', type=int, default=2048, help='input point set size')
parser.add_argument('--part_model', type=str, default='../../checkpoints/part_seg_100percent.pth', help='model path for the pre-trained part segmentation network')
parser.add_argument('--model', type=str, default='../../checkpoints/shapenet_part_dataset_ae_200.pth', help='model path')
parser.add_argument('--dataset', type=str, default='shapenet_part', help='dataset: shapenet_part, shapenet_core13, shapenet_core55, modelent40')
parser.add_argument('--n_classes', type=int, default=50, help='part classes in all the catagories')
parser.add_argument('--class_choice', type=str, default='Airplane', help='choose the class to eva')
opt = parser.parse_args()
print(opt)
USE_CUDA = True
main()
| 44.26183
| 170
| 0.662818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,889
| 0.13463
|
592749e0c27abaef8d986702717878c311749a54
| 6,839
|
py
|
Python
|
src/Grid.py
|
RavinSG/aaivu-ride-hailing-simulation
|
eb7bc7cc6a5830d40509ce22fe4fa2eb013e6767
|
[
"Apache-2.0"
] | 8
|
2021-02-18T19:02:59.000Z
|
2022-02-19T13:38:48.000Z
|
src/Grid.py
|
Programmer-RD-AI/aaivu-ride-hailing-simulation
|
f315661c94c9e3f26bab1d8bb9c35d21b1a60479
|
[
"Apache-2.0"
] | null | null | null |
src/Grid.py
|
Programmer-RD-AI/aaivu-ride-hailing-simulation
|
f315661c94c9e3f26bab1d8bb9c35d21b1a60479
|
[
"Apache-2.0"
] | 2
|
2021-02-14T03:28:51.000Z
|
2022-02-19T13:38:51.000Z
|
import simpy
import itertools
import numpy as np
from RideSimulator.Driver import Driver
from RideSimulator.HexGrid import HexGrid
def get_spot_locations(width: int, height: int, interval: int) -> np.ndarray:
"""
:param width: width of the grid
:param height: height of the grid
:param interval: distance between two spots
:return: an array of all the spot locations
"""
x_points = np.arange(0, width, interval)
y_points = np.arange(0, height, interval)
# If the distance to the nearest taxi spot from the corner is greater than the minimum search radius additional
# spots are added along the edges of thr map.
if (width - x_points[-1]) > (interval / np.sqrt(2)):
x_points = np.append(x_points, width)
if (height - y_points[-1]) > (interval / np.sqrt(2)):
y_points = np.append(y_points, height)
spots = np.array([list(i) for i in itertools.product(x_points, y_points)])
return np.array([spots, len(y_points), len(x_points)], dtype=object)
class Grid(object):
"""
Handles all the information and processes related to the grid. The distances between grid units can be translated to
real world distances using the units_per_km attribute.
Taxi spots are used to anchor drivers into locations in the map to make it easier to find the closest driver for a
given trip.
A hexagon overlay is used to cluster grid locations into regions where hotspots, traffic and other features are
calculated based on the hotspots.
"""
def __init__(self, env: simpy.Environment, width: int, height: int, interval: int, num_drivers: int,
hex_area: float, units_per_km: int = 1, seed: int = None):
"""
:param env: simpy environment
:param width: width of the grid
:param height: height of the grid
:param interval: distance between two spots
:param num_drivers: number of drivers in the grid
:param hex_area: area size of a single hex tile
:param units_per_km: number of grid units per km
"""
if seed is not None:
np.random.seed(seed)
self.width = width
self.height = height
self.interval = interval
self.hex_overlay = HexGrid(hex_area=hex_area, width=width, height=height, units_per_km=units_per_km)
self.taxi_spots, self.spot_height, self.spot_width = get_spot_locations(width=width, height=height,
interval=interval)
self.driver_pools = simpy.FilterStore(env, capacity=num_drivers)
def get_random_location(self) -> np.ndarray:
x = np.random.randint(0, self.width)
y = np.random.randint(0, self.height)
return np.array([x, y])
# Temp function to get location id until hexagons are implemented
def get_location_id(self, location):
grid_width = 10 # no. of cells in one axis (create 10x10 grid)
x = np.floor((location[0] - 0) * grid_width / self.width)
y = np.floor((location[1] - 0) * grid_width / self.height)
return x * grid_width + y
@staticmethod
def get_distance(loc1: np.ndarray, loc2: np.ndarray) -> float:
distance = np.linalg.norm(loc1 - loc2)
return np.round(distance, 1)
def get_spot_id(self, location):
return int(np.round(location[0]) * self.spot_height + np.round(location[1]))
def get_nearest_spot(self, location: np.ndarray, search_radius=1) -> list:
"""
Find the nearest driver spot for a given location.
Initially it'll only return the nearest spot to the driver. When search_radius = 2, the 4 taxi spots surrounding
the rider are returned. Afterwards, with each increment to the search_radius, all taxi spots inside a square
centered on the driver location with a side length of search_radius are returned.
:param location: x,y coords of the location
:param search_radius: number of breaths the search will carry out on
:return: a list of the closest taxi spots
"""
x_spot = location[0] / self.interval
y_spot = location[1] / self.interval
closet_spot = [np.round(x_spot), np.round(y_spot)]
if search_radius == 1:
spot_no = [self.get_spot_id(closet_spot)]
elif search_radius == 2:
spot_no = []
x_points = {np.floor(x_spot), np.ceil(x_spot)}
y_points = {np.floor(y_spot), np.ceil(y_spot)}
spots = np.array([list(i) for i in itertools.product(x_points, y_points)])
for spot in spots:
spot_no.append(self.get_spot_id(spot))
else:
spot_no = []
x_points = [closet_spot[0]]
y_points = [closet_spot[1]]
for i in range(1, search_radius - 1):
x_points.append(max(0, closet_spot[0] - i))
x_points.append(min(self.spot_width - 1, closet_spot[0] + i))
y_points.append(max(0, closet_spot[1] - i))
y_points.append(min(self.spot_height - 1, closet_spot[1] + i))
x_points = set(x_points)
y_points = set(y_points)
spots = np.array([list(i) for i in itertools.product(x_points, y_points)])
for spot in spots:
spot_no.append(self.get_spot_id(spot))
return spot_no
def get_closest_drivers(self, location: np.ndarray, search_radius: int) -> list:
"""
A more accurate closest driver search using driver distances of all the drivers in the closest taxi spots.
Since this is more computationally expensive and the increment in accuracy does not outweigh the cost, this is
not used at the moment.
:param location: location the distances should be calculated from
:param search_radius: number of breaths the search will carry out on
:return: a list of driver ids sorted in the ascending order according to their distances to the location
"""
spots = self.get_nearest_spot(location, search_radius=search_radius)
driver_ids = []
distances = []
for driver in self.driver_pools.items:
if driver.spot_id in spots:
driver_ids.append(driver.id)
distances.append(self.get_distance(location, driver.location))
if len(driver_ids) > 0:
_, driver_ids = zip(*sorted(zip(distances, driver_ids)))
return driver_ids
def assign_spot(self, driver: Driver):
"""
Assign the driver to his nearest driver pool.
:param driver: driver object
"""
driver_loc = driver.location
spot_id = self.get_nearest_spot(driver_loc)[0]
driver.spot_id = spot_id
self.driver_pools.put(driver)
| 41.70122
| 120
| 0.640445
| 5,819
| 0.850855
| 0
| 0
| 164
| 0.02398
| 0
| 0
| 2,525
| 0.369206
|
592751375bcd4d68f888638835a70c28a75cc554
| 427
|
py
|
Python
|
template.py
|
imsofi/advent-of-code
|
8ac1406cfee689d9da0302363eaee7c8fea5c722
|
[
"0BSD"
] | null | null | null |
template.py
|
imsofi/advent-of-code
|
8ac1406cfee689d9da0302363eaee7c8fea5c722
|
[
"0BSD"
] | null | null | null |
template.py
|
imsofi/advent-of-code
|
8ac1406cfee689d9da0302363eaee7c8fea5c722
|
[
"0BSD"
] | null | null | null |
"""
Day $:
https://adventofcode.com/2021/day/$
"""
Data = list
def parse_file(path: str) -> Data:
with open(path) as f:
raw = f.read().splitlines()
data = raw
return data
def part_1(data: Data):
...
def part_2(data: Data):
...
def main():
data = parse_file("example.txt")
print("Part 1", part_1(data))
print("Part 2", part_2(data))
if __name__ == "__main__":
main()
| 12.558824
| 36
| 0.559719
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 90
| 0.210773
|
59276445280313e61b8b7bc4ae85576dc76c9f96
| 3,174
|
py
|
Python
|
src/ReichardtDS8.py
|
smittal6/i3d
|
e347b5415f5665a6f25b644a3dda5dd32f01dbbb
|
[
"MIT"
] | null | null | null |
src/ReichardtDS8.py
|
smittal6/i3d
|
e347b5415f5665a6f25b644a3dda5dd32f01dbbb
|
[
"MIT"
] | null | null | null |
src/ReichardtDS8.py
|
smittal6/i3d
|
e347b5415f5665a6f25b644a3dda5dd32f01dbbb
|
[
"MIT"
] | null | null | null |
import numpy as np
def Reichardt8(video, dirs=[0,1,2,3,4,5,6,7]):
'''
Returns a tuple of Reichardt-Hassenstein correlators in 8 directions
args:
video: Shape ~ [TimeSteps, H, W, 1]
'''
vp1, vm1 = Reichardt_vertical_2channels_Vectorized(video) #Directions 1, -1
vp3, vm3 = Reichardt_horizontal_2channels_Vectorized(video) #Directions 3, -3
vp2, vm2 = Reichardt_diagonal1_2channels_Vectorized(video) #Directions 2, -2
vp4, vm4 = Reichardt_diagonal2_2channels_Vectorized(video) #Directions 4, -4
all_dirs = [vp1, vm1, vp2, vm2, vp3, vm3, vp4, vm4]
return [all_dirs[i] for i in dirs]
#timeDelay is unused in the Vectorized method, but may be useful later
def Reichardt_vertical_2channels_Vectorized(video,timeDelay=1):
'''
Reichardt-Hassenstein inspired video processing
Put negative values into another tensor and then concat for the two (e.g. red and green) channels
'''
vc_shift_vert_by1back=np.roll(video,1,axis=1)
vc_shift_time_by1forw=np.roll(video,-1,axis=0)
vc_shift_vert_by1back_time_by1forw=np.roll(vc_shift_vert_by1back,-1,axis=0)
vc = vc_shift_vert_by1back*vc_shift_time_by1forw - vc_shift_vert_by1back_time_by1forw*video
vc_neg = vc.clip(max=0)
vc_neg = -1*vc_neg
vc = vc.clip(0)
return vc, vc_neg
def Reichardt_diagonal1_2channels_Vectorized(video,timeDelay=1):
'''
Reichardt-Hassenstein inspired video processing
Put negative values into another tensor and then concat for the two (e.g. red and green) channels
'''
vc_shift_diag_by1back=np.roll(video,(1,1),axis=(1,2))
vc_shift_time_by1forw=np.roll(video,-1,axis=0)
vc_shift_diag_by1back_time_by1forw=np.roll(vc_shift_diag_by1back,-1,axis=0)
vc= vc_shift_diag_by1back*vc_shift_time_by1forw - vc_shift_diag_by1back_time_by1forw*video
vc_neg=vc.clip(max=0)
vc_neg=-1*vc_neg
vc=vc.clip(0)
return vc, vc_neg
def Reichardt_horizontal_2channels_Vectorized(video,timeDelay=1):
'''
Reichardt-Hassenstein inspired video processing
Put negative values into another tensor and then concat for the two (e.g. red and green) channels
'''
vc_shift_horz_by1back=np.roll(video,1,axis=2)
vc_shift_time_by1forw=np.roll(video,-1,axis=0)
vc_shift_horz_by1back_time_by1forw=np.roll(vc_shift_horz_by1back,-1,axis=0)
vc= vc_shift_horz_by1back*vc_shift_time_by1forw - vc_shift_horz_by1back_time_by1forw*video
vc_neg=vc.clip(max=0)
vc_neg=-1*vc_neg
vc=vc.clip(0)
return vc, vc_neg
def Reichardt_diagonal2_2channels_Vectorized(video,timeDelay=1):
'''
Reichardt-Hassenstein inspired video processing
Put negative values into another tensor and then concat for the two (e.g. red and green) channels
'''
vc_shift_diag_by1back=np.roll(video,(-1,1),axis=(1,2))
vc_shift_time_by1forw=np.roll(video,-1,axis=0)
vc_shift_diag_by1back_time_by1forw=np.roll(vc_shift_diag_by1back,-1,axis=0)
vc= vc_shift_diag_by1back*vc_shift_time_by1forw - vc_shift_diag_by1back_time_by1forw*video
vc_neg=vc.clip(max=0)
vc_neg=-1*vc_neg
vc=vc.clip(0)
return vc, vc_neg
| 34.5
| 104
| 0.732199
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 970
| 0.305608
|
592904d4f5f76e99f9c27babc8743707b85f9a4e
| 1,662
|
py
|
Python
|
shopdrawing.py
|
Maxim-Kovalenko/turtle-graphics-programms
|
768866f9b6658dc0933b0391387a6bdec64ad6ec
|
[
"Apache-2.0"
] | 1
|
2020-04-14T08:31:24.000Z
|
2020-04-14T08:31:24.000Z
|
shopdrawing.py
|
Maxim-Kovalenko/turtle-graphics-programms
|
768866f9b6658dc0933b0391387a6bdec64ad6ec
|
[
"Apache-2.0"
] | null | null | null |
shopdrawing.py
|
Maxim-Kovalenko/turtle-graphics-programms
|
768866f9b6658dc0933b0391387a6bdec64ad6ec
|
[
"Apache-2.0"
] | 1
|
2021-01-05T15:47:59.000Z
|
2021-01-05T15:47:59.000Z
|
from turtle import *
coeficient = 0.5
speed(5)
def base():
fillcolor("#fae0bd")
begin_fill()
pencolor("#c8835c")
pensize(10 * coeficient)
penup()
backward(100 * coeficient)
pendown()
for i in range(0,2):
forward(300 * coeficient)
right(90)
forward(200 * coeficient)
right(90)
end_fill()
def roof():
begin_fill()
fillcolor("#fe8e97")
pencolor("#fe8e97")
backward(40 * coeficient)
left(90)
forward(50 * coeficient)
right(90)
forward(380 * coeficient)
right(90)
forward(50 * coeficient)
right(90)
forward(40 * coeficient)
end_fill()
def window():
penup()
forward(250 * coeficient)
left(90)
forward(50 * coeficient)
pendown()
fillcolor("#87d5e0")
pencolor("#87d5e0")
begin_fill()
for f in range(0,4):
forward(90 * coeficient)
left(90)
end_fill()
def door():
penup()
left(90)
forward(150 * coeficient)
pendown()
pencolor("#de692e")
fillcolor("#de692e")
begin_fill()
for v in range(0,2):
forward(50 * coeficient)
right(90)
forward(130 * coeficient)
right(90)
end_fill()
def move():
penup()
goto(800 * coeficient,0)
pendown()
def tree():
fillcolor("#b56f2f")
pencolor("#b56f2f")
begin_fill()
for m in range(0,2):
forward(30 * coeficient)
right(90)
forward(200 * coeficient)
right(90)
end_fill()
base()
roof()
window()
door()
move()
tree()
| 17.494737
| 34
| 0.527677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 90
| 0.054152
|
592b099ed5239bc2e197e2c20d2d55bdd277f278
| 881
|
py
|
Python
|
src/block_constants.py
|
cemulate/minecraft-hdl
|
a46da8d2a29aad9c2fc84037d677190c6db80dcd
|
[
"MIT"
] | 5
|
2015-09-11T04:13:01.000Z
|
2021-11-17T14:35:28.000Z
|
src/block_constants.py
|
cemulate/minecraft-hdl
|
a46da8d2a29aad9c2fc84037d677190c6db80dcd
|
[
"MIT"
] | null | null | null |
src/block_constants.py
|
cemulate/minecraft-hdl
|
a46da8d2a29aad9c2fc84037d677190c6db80dcd
|
[
"MIT"
] | 1
|
2021-03-15T17:31:27.000Z
|
2021-03-15T17:31:27.000Z
|
REDSTONE = 55
REPEATER = 93
TORCH = 75
AIR = 0
GLASS = 20
SLAB = 44
DOUBLE_SLAB = 43
WOOL = 35
DIR_WEST_POS_Z = 0
DIR_NORTH_NEG_X = 1
DIR_EAST_NEG_Z = 2
DIR_SOUTH_POS_X = 3
TORCH_ON_GROUND = 5
TORCH_POINTING_POS_X = 1
TORCH_POINTING_NEG_X = 2
TORCH_POINTING_POS_Z = 3
TORCH_POINTING_NEG_Z = 4
STONE_SLAB_TOP = 8
DOUBLE_SLAB_STONE = 0
WOOL_BLACK = 15
REPEATER_TOWARD_POS_X = 1
REPEATER_TOWARD_POS_Z = 2
REPEATER_TOWARD_NEG_X = 3
CLOSE_SIDE = 0
FAR_SIDE = 1
WOOL_NAMES = {0: "White",
1: "Orange",
2: "Magenta",
3: "Light blue",
4: "Yellow",
5: "Lime",
6: "Pink",
7: "Grey",
8: "Light grey",
9: "Cyan",
10: "Purple",
11: "Blue",
12: "Brown",
13: "Green",
14: "Red",
15: "Black"}
| 17.62
| 29
| 0.538025
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 120
| 0.136209
|
592b8f8cacb2754ab7e4528631c3f40cfdc1b7e7
| 4,973
|
py
|
Python
|
qfc/dirhandler.py
|
akhilkedia/qfc
|
101861bd2fb818564245249fc93f278752684b51
|
[
"MIT"
] | null | null | null |
qfc/dirhandler.py
|
akhilkedia/qfc
|
101861bd2fb818564245249fc93f278752684b51
|
[
"MIT"
] | null | null | null |
qfc/dirhandler.py
|
akhilkedia/qfc
|
101861bd2fb818564245249fc93f278752684b51
|
[
"MIT"
] | null | null | null |
import os
import subprocess
import sys
class CVSHandler():
""" Handler of CVS (fir, mercurial...) directories,
The main purpose of this class is to cache external cvs command output, and determine the appropriate files to yield when navigating to a subdirectory of a project.
This basically means that the external command is run once (ie git ls-files), cached, and when calling get_source_files on a subdirectory of the project root (ie project-root/subdir),
filtering from all project files of is done here.
"""
def __init__(self, cvs):
self._roots_cache = {}
self._not_tracked_cache = set()
self.cvs = cvs
def _get_root_from_cache(self, directory):
""" a directory is considered cached if it's the project root or a subdirectory of that project root.
returns the project root dir, or None if the directory is not cached.
"""
if directory in self._roots_cache:
return directory
if os.path.dirname(directory) == directory:
return None
return self._get_root_from_cache(os.path.dirname(directory))
def get_source_files(self, directory):
if directory in self._not_tracked_cache:
return None
root_dir = self._get_root_from_cache(directory)
if not root_dir:
try:
# check if it's a tracked cvs dir, if yes, get the project root and the source files
root_dir = self.cvs._get_root(directory)
self._roots_cache[root_dir] = self.cvs._get_tracked_files(root_dir)
except Exception as e:
# not a cvs tracked dir, save it to not issue that command again
self._not_tracked_cache.add(directory)
return None
files = self._roots_cache[root_dir]
# the passed directory argument is a subdirectory of the project root
if directory != root_dir:
rel_dir = os.path.relpath(directory, root_dir)
files = [f[len(rel_dir)+1:] for f in files if f.startswith(rel_dir)]
return files
class Git():
@staticmethod
def _get_root(directory):
return run_command("cd %s && git rev-parse --show-toplevel" % directory).strip()
@staticmethod
def _get_tracked_files(directory):
return run_command("cd %s && git ls-files && git ls-files --others --exclude-standard" % directory).strip().split('\n')
class Mercurial():
@staticmethod
def _get_root(directory):
return run_command("cd %s && hg root" % directory).strip()
@staticmethod
def _get_tracked_files(directory):
return run_command("cd %s && (hg status -marcu | cut -d' ' -f2)" % directory).strip().split('\n')
class DefaultDirHandler():
""" The default directory handler uses the 'find' external program to return all the files inside a given directory up to MAX_depth depth (ie, if maxdepth=2, returns all files inside that dir, and all files in a subdir of that directory)"""
def __init__(self):
self._cache = {}
self.MAX_DEPTH = 2
def _walk_down(self, start_dir):
try:
out = run_command(
"{ find %s -maxdepth %s -not -path '*/\\.*' -type d -print | sed 's!$!/!'; find %s -maxdepth %s -not -path '*/\\.*' -type f -or -type l ; } | sed -n 's|^%s/||p'" % (start_dir, self.MAX_DEPTH, start_dir, self.MAX_DEPTH, start_dir))
except subprocess.CalledProcessError as e:
# Find returns a non 0 exit status if listing a directory fails (for example, permission denied), but still output all files in other dirs
# ignore those failed directories.
out = e.output
if sys.version_info >= (3, 0):
out = out.decode('utf-8')
if not out:
return []
files = out.split('\n')
return [f for f in files if f]
def get_source_files(self, start_dir):
if not start_dir in self._cache:
self._cache[start_dir] = self._walk_down(start_dir)
return self._cache[start_dir]
def run_command(string):
''' fork a process to execute the command string given as argument, returning the string written to STDOUT '''
DEVNULL = open(os.devnull, 'wb')
out = subprocess.check_output(string, stderr=DEVNULL, shell=True)
if sys.version_info >= (3, 0):
return out.decode('utf-8')
return out
git = CVSHandler(Git)
hg = CVSHandler(Mercurial)
default = DefaultDirHandler()
def get_source_files(directory):
""" check first if the given directory is inside a git tracked project, if no, check with mercurial, if no, fallback to the default handler """
files = git.get_source_files(directory)
# if the returned files list is empty, it's considered not a tracked directory
if files:
return files
files = hg.get_source_files(directory)
if files:
return files
return default.get_source_files(directory)
| 42.87069
| 246
| 0.648904
| 4,041
| 0.812588
| 0
| 0
| 580
| 0.11663
| 0
| 0
| 1,998
| 0.40177
|
592c8f23fd0453baefac3223ac8d226123072b8f
| 436
|
py
|
Python
|
demo1/jsons.py
|
dollarkillerx/Python-Data-Analysis
|
f208d5ce9951e9fca2d084a89290100b7e543154
|
[
"MIT"
] | null | null | null |
demo1/jsons.py
|
dollarkillerx/Python-Data-Analysis
|
f208d5ce9951e9fca2d084a89290100b7e543154
|
[
"MIT"
] | null | null | null |
demo1/jsons.py
|
dollarkillerx/Python-Data-Analysis
|
f208d5ce9951e9fca2d084a89290100b7e543154
|
[
"MIT"
] | null | null | null |
import json
filename = "data.json"
mydata = {
"title":"我的测试数据",
"lesson":{
"python":"学习中",
'vue':"学习完毕",
"golang":"基本精通"
},
"games":{
"GAT":"一年没有玩了"
},
}
# 文件写入
with open(filename,'w',encoding="utf-8") as data:
# 数据,文件句柄,json缩进空格数
json.dump(mydata,data,indent=4)
# 读文件
with open(filename,'r',encoding='utf-8') as data:
# 句柄
rdata = json.load(data)
print(rdata)
| 16.769231
| 49
| 0.538991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 232
| 0.444444
|
592ca011fcc9c84fa4da0a8bde9dd4daf4629fd5
| 280
|
py
|
Python
|
Scripts/malware_scan/classess/progress.py
|
Team-Zed-cf/Team-Zed
|
662eee2948502fca0bdc477955db17e2d32f92aa
|
[
"MIT"
] | null | null | null |
Scripts/malware_scan/classess/progress.py
|
Team-Zed-cf/Team-Zed
|
662eee2948502fca0bdc477955db17e2d32f92aa
|
[
"MIT"
] | null | null | null |
Scripts/malware_scan/classess/progress.py
|
Team-Zed-cf/Team-Zed
|
662eee2948502fca0bdc477955db17e2d32f92aa
|
[
"MIT"
] | null | null | null |
import progressbar, time
from .colors import *
# progress bar
def animated_marker():
widgets = ['In Process: ', progressbar.AnimatedMarker()]
bar = progressbar.ProgressBar(widgets=widgets).start()
for i in range(18):
time.sleep(0.1)
bar.update(i)
| 28
| 61
| 0.660714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 0.1
|
593150e1f3c9a373acbf0b4f5ce7f05a49bde1de
| 4,406
|
py
|
Python
|
single_subject_workflow.py
|
tknapen/reward_np_analysis
|
29bcc02d5acd23689dee7059ecb1607d2814cdf0
|
[
"MIT"
] | null | null | null |
single_subject_workflow.py
|
tknapen/reward_np_analysis
|
29bcc02d5acd23689dee7059ecb1607d2814cdf0
|
[
"MIT"
] | null | null | null |
single_subject_workflow.py
|
tknapen/reward_np_analysis
|
29bcc02d5acd23689dee7059ecb1607d2814cdf0
|
[
"MIT"
] | null | null | null |
# from nipype import config
# config.enable_debug_mode()
# Importing necessary packages
import os
import os.path as op
import glob
import json
import nipype
from nipype import config, logging
import matplotlib.pyplot as plt
import nipype.interfaces.fsl as fsl
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as util
import nipype.interfaces.io as nio
from nipype.utils.filemanip import copyfile
import nibabel as nib
from IPython.display import Image
from nipype.interfaces.utility import Function, Merge, IdentityInterface
from nipype.interfaces.io import SelectFiles, DataSink
from IPython.display import Image
from IPython import embed as shell
from workflows.pupil_workflow import create_pupil_workflow
from workflows.bold_wholebrain_fir_workflow import create_bold_wholebrain_fir_workflow
# we will create a workflow from a BIDS formatted input, at first for the specific use case
# of a 7T PRF experiment's preprocessing.
# a project directory that we assume has already been created.
raw_data_dir = '/home/raw_data/-2014/reward/human_reward/data/'
preprocessed_data_dir = '/home/shared/-2014/reward/new/'
FS_subject_dir = os.path.join(raw_data_dir, 'FS_SJID')
# booleans that determine whether given stages of the
# analysis are run
pupil = True
wb_fir = True
for si in range(1,7): #
sub_id, FS_ID = 'sub-00%i'%si, 'sub-00%i'%si
sess_id = 'ses-*'
# now we set up the folders and logging there.
opd = op.join(preprocessed_data_dir, sub_id)
try:
os.makedirs(op.join(opd, 'log'))
except OSError:
pass
config.update_config({ 'logging': {
'log_directory': op.join(opd, 'log'),
'log_to_file': True,
'workflow_level': 'INFO',
'interface_level': 'INFO'
},
'execution': {
'stop_on_first_crash': False
}
})
logging.update_logging(config)
# load the sequence parameters from json file
with open(os.path.join(raw_data_dir, 'acquisition_parameters.json')) as f:
json_s = f.read()
acquisition_parameters = json.loads(json_s)
# load the analysis parameters from json file
with open(os.path.join(raw_data_dir, 'analysis_parameters.json')) as f:
json_s = f.read()
analysis_info = json.loads(json_s)
# load the analysis/experimental parameters for this subject from json file
with open(os.path.join(raw_data_dir, sub_id ,'experimental_parameters.json')) as f:
json_s = f.read()
experimental_parameters = json.loads(json_s)
analysis_info.update(experimental_parameters)
if not op.isdir(os.path.join(preprocessed_data_dir, sub_id)):
try:
os.makedirs(os.path.join(preprocessed_data_dir, sub_id))
except OSError:
pass
# copy json files to preprocessed data folder
# this allows these parameters to be updated and synced across subjects by changing only the raw data files.
copyfile(os.path.join(raw_data_dir, 'acquisition_parameters.json'), os.path.join(preprocessed_data_dir, 'acquisition_parameters.json'), copy = True)
copyfile(os.path.join(raw_data_dir, 'analysis_parameters.json'), os.path.join(preprocessed_data_dir, 'analysis_parameters.json'), copy = True)
copyfile(os.path.join(raw_data_dir, sub_id ,'experimental_parameters.json'), os.path.join(preprocessed_data_dir, sub_id ,'experimental_parameters.json'), copy = True)
if pupil:
pwf = create_pupil_workflow(analysis_info,'pupil')
pwf.inputs.inputspec.sub_id = sub_id
pwf.inputs.inputspec.preprocessed_directory = preprocessed_data_dir
pwf.write_graph(opd + '_pupil.svg', format='svg', graph2use='colored', simple_form=False)
pwf.run('MultiProc', plugin_args={'n_procs': 6})
if wb_fir:
wbfwf = create_bold_wholebrain_fir_workflow(analysis_info,'wb_fir')
wbfwf.inputs.inputspec.sub_id = sub_id
wbfwf.inputs.inputspec.preprocessed_directory = preprocessed_data_dir
wbfwf.write_graph(opd + '_wb_fir.svg', format='svg', graph2use='colored', simple_form=False)
wbfwf.run('MultiProc', plugin_args={'n_procs': 6})
| 40.422018
| 170
| 0.682705
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,322
| 0.300045
|
593382e994272402d7ed09a0a47388a40b5bfde8
| 3,692
|
py
|
Python
|
thm.py
|
brenolf/k-flow
|
f2ab6e2e6aa09aad437acb2ef071257adc0464c1
|
[
"Apache-2.0"
] | null | null | null |
thm.py
|
brenolf/k-flow
|
f2ab6e2e6aa09aad437acb2ef071257adc0464c1
|
[
"Apache-2.0"
] | null | null | null |
thm.py
|
brenolf/k-flow
|
f2ab6e2e6aa09aad437acb2ef071257adc0464c1
|
[
"Apache-2.0"
] | null | null | null |
import sys
N = -1
G = None
H = None
vis = None
vis_aux = None
valence = None
flows = {}
answer = []
allowed_flows = {
3 : [-1, 1],
4 : [-1, 1, 2],
5 : [-1, 1, 2, -2]
}
def has_k_flow (graph):
global N, G, H, vis, valence, flows
G = graph
N = len(G)
H = [[0] * N for i in xrange(0, N)]
vis = [False] * N
valence = [0] * N
for v in xrange(0, N):
valence[v] = len(G[v])
if valence[v] not in flows and valence[v] != 0:
flows[valence[v]] = getWeights(valence[v])
for v in xrange(0, N):
G[v] = sorted(G[v], key=lambda u : valence[u], reverse=True)
del answer[:]
v = find_next()
return dfs(v)
def getWeights (VALENCE, e = 0):
global answer, E, edges
if e == 0:
del answer[:]
edges = [0] * VALENCE
elif e >= VALENCE:
return None
isLast = (e == (VALENCE - 1))
weight2 = [0, 2]
for w in xrange(0, 2):
edges[e] = weight2[w]
getWeights(VALENCE, e + 1)
if isLast:
edges2 = sum(edges) / 2
if (VALENCE - edges2) % 2 == 0 and not (edges2 == VALENCE and edges2 % 2 != 0):
answer.append(edges[:])
if e == 0:
return answer[:]
def find_next ():
vertices = xrange(0, N)
vertices = filter(lambda v : not vis[v], vertices)
# pick most constrained variable
vertices = sorted(vertices, key=lambda v : valence[v], reverse=True)
return vertices.pop(0)
def dfs (v = 0):
vis[v] = True
if valence[v] == 0:
sys.stderr.write ('error: vertex "%d" is 0-valent. Have you forgotten it?\n' % v)
exit(1)
constraints, neighbours = getConstraints(v)
weights = flows[valence[v]]
W = select(constraints, weights, v)
isLast = (sum(vis) == N)
if len(W) == 0:
vis[v] = False
return False
for w in W:
clear(v, neighbours)
assign(v, w)
counter = 0
for u in G[v]:
if not vis[u]:
counter += 1
if dfs(u):
return True
else:
break
deadlock = (not isLast and counter == 0)
if deadlock and dfs(find_next()):
return True
elif isLast and checkEulerian():
answer.append(H[:][:])
return True
vis[v] = False
clear(v, neighbours)
return False
def dfs_check(v, one_vertex, component, path):
global vis_aux
vis_aux[v] = component
path.append(v)
recursive_ones = 0
for u in G[v]:
if vis_aux[u] == 0 and H[v][u] == 0:
recursive_ones += dfs_check(u, one_vertex, component, path)
return int(one_vertex[v]) + recursive_ones
def checkEulerian():
global vis_aux
# for v in xrange(0, N):
# weight2 = sum(H[v]) / 2
# if (valence[v] - weight2) % 2 != 0:
# return False
vis_aux = [False] * N
one_vertex = [(sum(H[v]) / 2) % 2 != 0 for v in xrange(0, N)]
components = 0
result = True
paths = {}
for v in xrange(0, N):
if vis_aux[v] == 0:
components += 1
path = []
C_ones = dfs_check(v, one_vertex, components, path)
paths[components] = path
if C_ones % 2 != 0:
result = False
if result and False:
for i in xrange(0, components):
print i + 1, paths[i + 1]
return result
def getConstraints (v):
constraints = {}
neighbours = []
i = 0
for u in G[v]:
if H[v][u] != 0 or H[u][v] != 0:
constraints[i] = 2
neighbours.append(u)
i += 1
return constraints, neighbours
def select (constraints, possibilities, v):
r = []
for p in possibilities:
for field in constraints:
if p[field] != constraints[field]:
break
else:
r.append(p[:])
def valid (vector):
for i in xrange(0, len(vector)):
if vis[G[v][i]] and vector[i] == 2 and i not in constraints:
return False
return True
return [i for i in r if valid(i)]
def assign (v, weights):
for u in G[v]:
w = weights.pop(0)
H[u][v] = H[v][u] = w
def clear (v, neighbours):
for u in G[v]:
if u not in neighbours:
H[u][v] = H[v][u] = 0
| 16.93578
| 83
| 0.597237
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 194
| 0.052546
|
5933bc9be206bb31b3b20546a2f728540ffb2f7a
| 45,642
|
py
|
Python
|
ms/MS/index.py
|
jcnelson/syndicate
|
4837265be3e0aa18cdf4ee50316dbfc2d1f06e5b
|
[
"Apache-2.0"
] | 16
|
2015-01-02T15:39:04.000Z
|
2016-03-17T06:38:46.000Z
|
ms/MS/index.py
|
jcnelson/syndicate
|
4837265be3e0aa18cdf4ee50316dbfc2d1f06e5b
|
[
"Apache-2.0"
] | 37
|
2015-01-28T20:58:05.000Z
|
2016-03-22T04:01:32.000Z
|
ms/MS/index.py
|
jcnelson/syndicate
|
4837265be3e0aa18cdf4ee50316dbfc2d1f06e5b
|
[
"Apache-2.0"
] | 8
|
2015-04-08T02:26:03.000Z
|
2016-03-04T05:56:24.000Z
|
#!/usr/bin/pyhon
"""
Copyright 2014 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import storage.storagetypes as storagetypes
import storage.shardcounter as shardcounter
import protobufs.ms_pb2 as ms_pb2
import logging
import random
import os
import types
import errno
import time
import datetime
import collections
import pickle
import base64
from common.msconfig import *
class MSEntryIndexNode( storagetypes.Object ):
"""
Directory entry index node.
"""
parent_id = storagetypes.String( default="None" )
file_id = storagetypes.String( default="None", indexed=False )
volume_id = storagetypes.Integer( default=-1 )
dir_index = storagetypes.Integer( default=-1 )
generation = storagetypes.Integer( default=-1 )
alloced = storagetypes.Boolean( default=False )
nonce = storagetypes.Integer( default=0, indexed=False ) # for uniqueness
class MSEntryEntDirIndex( MSEntryIndexNode ):
"""
(file_id --> dir_index) relation
NOTE: define this separately so it can be indexed independently of other node types.
"""
@classmethod
def make_key_name( cls, volume_id, file_id ):
# name for index nodes that resolve file_id to dir_index (entry index node)
return "MSEntryEntDirIndex: volume_id=%s,file_id=%s" % (volume_id, file_id)
class MSEntryDirEntIndex( MSEntryIndexNode ):
"""
(dir_index --> file_id) relation
NOTE: define this separately so it can be indexed independently of other node types
"""
@classmethod
def make_key_name( cls, volume_id, parent_id, dir_index ):
# name for index nodes that resolve dir_index to file_id (directory index node)
return "MSEntryDirEntIndex: volume_id=%s,parent_id=%s,dir_index=%s" % (volume_id, parent_id, dir_index)
class MSEntryIndex( storagetypes.Object ):
"""
"""
NUM_COUNTER_SHARDS = 20
@classmethod
def __parent_child_counter_name( cls, volume_id, parent_id ):
return "MSEntryIndex: volume_id=%s,parent_id=%s" % (volume_id, parent_id)
@classmethod
@storagetypes.concurrent
def __update_index_node_async( cls, volume_id, parent_id, file_id, dir_index, alloced, **attrs ):
"""
Set the allocation status of a directory index node (but not its matching entry index node).
Return 0 on success
Return -EINVAL if the given file_id doesn't match the directory index node's file_id
Return -EEXIST if the given directory index node's allocation status is the same as alloced
"""
index_key_name = MSEntryDirEntIndex.make_key_name( volume_id, parent_id, dir_index )
index_key = storagetypes.make_key( MSEntryDirEntIndex, index_key_name )
old_alloced = None
idx = yield index_key.get_async()
if idx is None:
old_alloced = alloced
idx = MSEntryDirEntIndex( key=index_key, volume_id=volume_id, parent_id=parent_id, file_id=file_id, dir_index=dir_index, alloced=alloced, **attrs )
else:
if idx.file_id != file_id:
# wrong node
storagetypes.concurrent_return( -errno.EINVAL )
old_alloced = idx.alloced
if old_alloced != alloced:
# changing allocation status
idx.populate( -1, volume_id=volume_id, parent_id=parent_id, file_id=file_id, dir_index=dir_index, alloced=alloced, **attrs )
yield idx.put_async()
storagetypes.concurrent_return( 0 )
else:
storagetypes.concurrent_return( -errno.EEXIST )
@classmethod
@storagetypes.concurrent
def __update_or_alloc_async( cls, volume_id, parent_id, file_id, dir_index, generation, alloced ):
"""
Update or allocate the index node pair and/or set the directory index node's allocation status, asynchronously.
If the directory index node does not exist, it and its entry index node will be created and the allocation status set accordingly.
If the directory index node exists, but has a different allocation status, then the allocation status will be set accordingly.
If we succeed in allocating a new index node, incremenet the number of children in the parent directory.
Return True on success.
Return False if the index node existed, but the file_id did not match its record or the allocation status did not change.
"""
index_key_name = MSEntryDirEntIndex.make_key_name( volume_id, parent_id, dir_index )
idx = None
nonce = random.randint( -2**63, 2**63 - 1 )
result = True
if idx is None:
idx = yield MSEntryDirEntIndex.get_or_insert_async( index_key_name, volume_id=volume_id, parent_id=parent_id, file_id=file_id, dir_index=dir_index, generation=generation, alloced=alloced, nonce=nonce )
if idx.nonce == nonce:
# created.
"""
if alloced:
logging.info("Directory /%s/%s: allocated index slot for /%s/%s at %s" % (volume_id, parent_id, volume_id, file_id, dir_index))
else:
logging.info("Directory /%s/%s: freed index slot at %s" % (volume_id, parent_id, dir_index))
"""
# need to create an entry index node as well.
entry_key_name = MSEntryEntDirIndex.make_key_name( volume_id, file_id )
entry_key = storagetypes.make_key( MSEntryEntDirIndex, entry_key_name )
entry_idx = MSEntryEntDirIndex( key=entry_key, volume_id=volume_id, parent_id=parent_id, file_id=file_id, dir_index=dir_index, generation=generation, alloced=alloced, nonce=nonce )
yield entry_idx.put_async()
# storagetypes.memcache.set( entry_key_name, entry_idx )
else:
# already exists. changing allocation status?
if idx.alloced != alloced:
# allocation status needs to be changed
# want to change allocation status
rc = yield storagetypes.transaction_async( lambda: cls.__update_index_node_async( volume_id, parent_id, file_id, dir_index, alloced, generation=generation ), xg=True )
if rc == 0:
result = True
else:
logging.error("__update_index_node_async(/%s/%s file_id=%s dir_index=%s alloced=%s) rc = %s" % (volume_id, parent_id, file_id, dir_index, alloced, rc ))
result = False
else:
if alloced and idx.file_id != file_id:
# collision
logging.error("Directory /%s/%s: collision inserting /%s/%s at %s (occupied by /%s/%s)" % (volume_id, parent_id, volume_id, file_id, dir_index, volume_id, idx.file_id))
result = False
else:
# created/set correctly
result = True
"""
if result:
storagetypes.memcache.set( index_key_name, idx )
"""
storagetypes.concurrent_return( result )
@classmethod
def __alloc( cls, volume_id, parent_id, file_id, dir_index, generation, async=False ):
"""
Get or create an allocated index node, for the given directory index.
Return True if we succeeded.
Return False if the node already exists for this dir_index value, or the file ID is wrong.
"""
result_fut = cls.__update_or_alloc_async( volume_id, parent_id, file_id, dir_index, generation, True )
if not async:
storagetypes.wait_futures( [result_fut] )
return result_fut.get_result()
else:
return result_fut
@classmethod
def __free( cls, volume_id, parent_id, file_id, dir_index, async=False ):
"""
Get or create a free index node, for a given directory index.
Return True if we succeeded.
Return False if the node already exists for this dir_index value, or if it's already freed (or the file ID is wrong)
"""
result_fut = cls.__update_or_alloc_async( volume_id, parent_id, file_id, dir_index, -1, False )
if not async:
storagetypes.wait_futures( [result_fut] )
return result_fut.get_result()
else:
return result_fut
@classmethod
def __num_children_inc( cls, volume_id, parent_id, num_shards, do_transaction=True, async=False ):
"""
Increment the number of children in a directory.
"""
counter_name = cls.__parent_child_counter_name( volume_id, parent_id )
if async:
fut = shardcounter.increment_async( counter_name, num_shards, do_transaction=do_transaction, use_memcache=False )
return fut
else:
shardcounter.increment( counter_name, num_shards, do_transaction=do_transaction, use_memcache=False )
return 0
@classmethod
def __num_children_dec( cls, volume_id, parent_id, num_shards, do_transaction=True, async=False ):
"""
Decrement the number of children in a directory
"""
counter_name = cls.__parent_child_counter_name( volume_id, parent_id )
if async:
fut = shardcounter.decrement_async( counter_name, num_shards, do_transaction=do_transaction, use_memcache=False )
return fut
else:
shardcounter.decrement( counter_name, num_shards, do_transaction=do_transaction, use_memcache=False )
return 0
@classmethod
def __num_children_delete( cls, volume_id, parent_id, num_shards, async=False ):
"""
Delete a shard counter for the number of children.
"""
counter_name = cls.__parent_child_counter_name( volume_id, parent_id )
if async:
fut = shardcounter.delete_async( counter_name, num_shards )
return fut
else:
shardcounter.delete( counter_name, num_shards )
return 0
@classmethod
@storagetypes.concurrent
def __read_node( cls, file_id, index, idx_key, check_file_id=True ):
"""
Read a dir-index node, given its key.
Return (rc, idx):
* return -ENOENT if the index node doesn't exist
* return -EINVAL if the file IDs don't match, and check_file_id is true
* return -EPERM if the directory indexes don't match
"""
idx = yield idx_key.get_async( use_cache=False, use_memcache=False )
if idx is None:
storagetypes.concurrent_return( (-errno.ENOENT, None) )
if check_file_id and idx.file_id != file_id:
storagetypes.concurrent_return( (-errno.EINVAL, None) )
if idx.dir_index != index:
storagetypes.concurrent_return( (-errno.EPERM, None) )
storagetypes.concurrent_return( (0, idx) )
@classmethod
def __read_dirent_node( cls, volume_id, parent_id, file_id, index, async=False, check_file_id=True ):
"""
Read a node key, and verify that it is consistent.
Return (rc, idx)
"""
idx_key_name = MSEntryDirEntIndex.make_key_name( volume_id, parent_id, index )
idx_key = storagetypes.make_key( MSEntryDirEntIndex, idx_key_name )
ret_fut = cls.__read_node( file_id, index, idx_key, check_file_id=check_file_id )
if async:
return ret_fut
else:
storagetypes.wait_futures( [ret_fut] )
return ret_fut.get_result()
@classmethod
def __compactify_get_candidates_delete( cls, volume_id, parent_id, dir_index_cutoff, async=False ):
"""
Find the set of allocated index nodes beyond a given offset, suitable for swapping into a newly-freed slot.
"""
to_compactify = MSEntryDirEntIndex.ListAll( {"MSEntryDirEntIndex.parent_id ==": parent_id,
"MSEntryDirEntIndex.volume_id ==": volume_id,
"MSEntryDirEntIndex.alloced ==" : True,
"MSEntryDirEntIndex.dir_index >=": dir_index_cutoff}, async=async, limit=1024 )
return to_compactify
@classmethod
def FindFreeGaps( cls, volume_id, parent_id, dir_index_cutoff, async=False, limit=1024 ):
"""
Find the set of unallocated index slots less than a given offset, suitable for holding a newly-allocated directory index.
"""
to_compactify = MSEntryDirEntIndex.ListAll( {"MSEntryDirEntIndex.parent_id ==": parent_id,
"MSEntryDirEntIndex.volume_id ==": volume_id,
"MSEntryDirEntIndex.alloced ==" : True,
"MSEntryDirEntIndex.dir_index <": dir_index_cutoff}, async=async, limit=limit )
gaps = list( set(range(0, dir_index_cutoff)) - set([idx.dir_index for idx in to_compactify]) )
return gaps
@classmethod
def __compactify_swap( cls, volume_id, parent_id, alloced_file_id, alloced_dir_index, free_file_id, free_dir_index, async=False ):
"""
Atomically swap an allocated directory index node with an unallocated (or non-existant) directory index node, thereby placing the
allocated directory index node into the "gap" left by the unallocated directory index node.
This will delete the unallocated directory index node and its companion entry index node, and move the
allocated directory index node's companion entry index node into place.
alloced_file_id corresponds to the existing MSEntry (i.e. the one associated with the allocated index node)
free_file_id corresponds to the deleted MSEntry, if applicable (i.e. the one associated with the free index node). It can be None if there is no index node to delete for the file id.
Return the dir index of the overwritten gap node on success, or None if free_file_id was None
Return -ENOENT if the allocated dir index node no longer exists.
Return -EAGAIN if we raced another process to allocate this slot and lost
Return -ESTALE if the index allocation data is invalid (i.e. the free index got allocated, or the allocated index got freed)
"""
alloced_idx_key_name = MSEntryDirEntIndex.make_key_name( volume_id, parent_id, alloced_dir_index )
alloced_entry_idx_key_name = MSEntryEntDirIndex.make_key_name( volume_id, alloced_file_id )
free_idx_key_name = MSEntryDirEntIndex.make_key_name( volume_id, parent_id, free_dir_index )
alloced_entry_idx_key = storagetypes.make_key( MSEntryEntDirIndex, alloced_entry_idx_key_name )
free_idx_key = storagetypes.make_key( MSEntryDirEntIndex, free_idx_key_name )
# if the free file ID is not known, get it
if free_file_id is None:
free_idx_data = cls.__read_dirent_node( volume_id, parent_id, None, free_dir_index, check_file_id=False )
if free_idx_data is not None:
free_idx_rc, free_idx = free_idx_data
# it's okay if this index node does not exist
if free_idx_rc != 0 and free_idx_rc != -errno.ENOENT:
# some other error
logging.error("/%s/%s: __read_dirent_node( /%s/%s, %s ) rc = %s" % (volume_id, parent_id, volume_id, free_file_id, free_dir_index, free_idx_rc ))
if async:
return storagetypes.FutureWrapper( free_idx_rc )
else:
return free_idx_rc
elif free_idx_rc == 0 and free_idx is not None:
if free_idx.alloced:
logging.error("/%s/%s: free index (/%s/%s, %s) is allocated" % (volume_id, parent_id, volume_id, free_idx.file_id, free_dir_index) )
storagetypes.memcache.delete_multi( [alloced_idx_key_name, alloced_entry_idx_key_name, free_idx_key_name] )
if async:
return storagetypes.FutureWrapper( -errno.ESTALE )
else:
return -errno.ESTALE
else:
logging.info("/%s/%s: file id of /%s/%s at %s is %s\n" % (volume_id, parent_id, volume_id, parent_id, free_dir_index, free_idx.file_id) )
@storagetypes.concurrent
def do_swap( free_file_id ):
# confirm that the allocated directory index node and free directory index node still exist
free_idx_data = None
free_idx_rc = None
free_idx = None
free_idx_file_id = None
check_free_file_id = True
if free_file_id is None:
check_free_file_id = False
alloced_idx_data, free_idx_data = yield cls.__read_dirent_node( volume_id, parent_id, alloced_file_id, alloced_dir_index, async=True ), cls.__read_dirent_node( volume_id, parent_id, free_file_id, free_dir_index, async=True, check_file_id=check_free_file_id )
alloced_idx_rc, alloced_idx = alloced_idx_data
if free_idx_data is not None:
free_idx_rc, free_idx = free_idx_data
# possible that we raced another compactify operation and lost (in which case the allocated node might be different than what we expect)
if alloced_idx_rc != 0:
logging.error("/%s/%s: alloced index (/%s/%s, %s) rc = %s" % (volume_id, parent_id, volume_id, alloced_file_id, alloced_dir_index, alloced_idx_rc) )
storagetypes.concurrent_return( (-errno.EAGAIN, None, None) )
elif not alloced_idx.alloced:
logging.error("/%s/%s: alloced index (/%s/%s, %s) is free" % (volume_id, parent_id, volume_id, alloced_file_id, alloced_dir_index) )
storagetypes.concurrent_return( (-errno.ESTALE, None, None) )
if free_idx_data is not None:
if free_idx_rc != 0:
if free_idx_rc == -errno.ENOENT:
# the entry doesn't exist, which is fine by us since we're about to overwrite it anyway
free_idx_rc = None
free_idx = None
free_idx_data = None
else:
logging.error("/%s/%s: __read_dirent_node(/%s/%s, %s) rc = %s" % (volume_id, parent_id, volume_id, free_file_id, free_dir_index, free_idx_rc) )
storagetypes.concurrent_return( (free_idx_rc, None, None) )
elif free_idx.alloced:
logging.error("/%s/%s: free index (/%s/%s, %s) is allocated" % (volume_id, parent_id, volume_id, free_idx.file_id, free_dir_index) )
storagetypes.concurrent_return( (-errno.ESTALE, None, None) )
elif free_idx.dir_index != free_dir_index:
raise Exception("/%s/%s: free index slot mismatch: %s != %s" % (volume_id, free_file_id, free_idx.dir_index, free_dir_index))
else:
# save this for later...
free_idx_file_id = free_idx.file_id
# sanity check
if alloced_idx.dir_index != alloced_dir_index:
raise Exception("/%s/%s: allocated index slot mismatch: %s != %s" % (volume_id, alloced_file_id, alloced_idx.dir_index, alloced_dir_index))
# do the swap:
# * overwrite the free dir index node with the allocated dir index node's data (moving it into place over the freed one)
# * update the alloced ent node with the free dir index node's dir index (compactifying the index)
new_dir_idx = MSEntryDirEntIndex( key=free_idx_key, **alloced_idx.to_dict() )
new_entry_dir_idx = MSEntryEntDirIndex( key=alloced_entry_idx_key, **alloced_idx.to_dict() ) # overwrites existing entry index node
new_dir_idx.dir_index = free_dir_index
new_entry_dir_idx.dir_index = free_dir_index
logging.debug( "swap index slot of /%s/%s: slot %s --> slot %s (overwrites %s)" % (volume_id, alloced_file_id, alloced_dir_index, free_dir_index, free_file_id) )
yield new_dir_idx.put_async(), new_entry_dir_idx.put_async(), alloced_idx.key.delete_async()
storagetypes.memcache.delete_multi( [alloced_idx_key_name, alloced_entry_idx_key_name, free_idx_key_name] )
storagetypes.concurrent_return( (0, alloced_idx, free_idx_file_id) )
@storagetypes.concurrent
def swap( free_file_id ):
rc, alloced_idx, free_idx_file_id = yield storagetypes.transaction_async( lambda: do_swap( free_file_id ), xg=True )
if rc < 0:
storagetypes.concurrent_return( rc )
old_dir_index = None
if free_file_id is None:
free_file_id = free_idx_file_id
if free_file_id is not None:
# blow away the newly-freed index node
old_entry_idx_key_name = MSEntryEntDirIndex.make_key_name( volume_id, free_file_id )
old_entry_idx_key = storagetypes.make_key( MSEntryEntDirIndex, old_entry_idx_key_name )
yield old_entry_idx_key.delete_async()
storagetypes.memcache.delete( old_entry_idx_key_name )
old_dir_index = alloced_idx.dir_index
storagetypes.concurrent_return( old_dir_index )
rc_fut = swap( free_file_id )
if async:
return rc_fut
else:
storagetypes.wait_futures( [rc_fut] )
return rc_fut.get_result()
@classmethod
@storagetypes.concurrent
def __compactify_remove_index_async( cls, volume_id, parent_id, dead_file_id, dead_dir_index ):
"""
Remove a freed index slot's node data.
"""
idx_key_name = MSEntryDirEntIndex.make_key_name( volume_id, parent_id, dead_dir_index )
ent_key_name = MSEntryEntDirIndex.make_key_name( volume_id, dead_file_id )
idx_key = storagetypes.make_key( MSEntryDirEntIndex, idx_key_name )
ent_key = storagetypes.make_key( MSEntryEntDirIndex, ent_key_name )
@storagetypes.concurrent
def delete_index_if_unallocated():
idx_node = yield idx_key.get_async( use_cache=False, use_memcache=False )
if idx_node is None:
# already gone
storagetypes.concurrent_return( 0 )
if not idx_node.alloced:
yield idx_key.delete_async()
storagetypes.concurrent_return( 0 )
yield ent_key.delete_async(), storagetypes.transaction_async( delete_index_if_unallocated )
storagetypes.memcache.delete_multi( [idx_key_name, ent_key_name] )
@classmethod
def __compactify_child_delete( cls, volume_id, parent_id, free_file_id, free_dir_index, dir_index_cutoff ):
"""
Repeatedly find a child's index node that (1) is allocated, and (2) is beyond a given cutoff (i.e. the number of index nodes at the time of the call),
and then atomically swap the identified freed node with child's node in the index order. The effect is that allocated index nodes at the end
of the index get moved to replace the gaps in the index, thereby compactifying it.
Return (old_dir_index, free dir index node) on success
Return -EPERM if no compactification can happen (i.e. all children have directory index values smaller than the maximum)
Return -EAGAIN if the caller should refresh the parent directory index maximum value
"""
# find all entries in parent with a dir index greater than the current one
to_compactify = None
while True:
to_compactify = cls.__compactify_get_candidates_delete( volume_id, parent_id, dir_index_cutoff )
if len(to_compactify) > 0:
# it's possible there are more than one. Pick one that's allocated (but go in random order)
order = range( 0, len(to_compactify) )
random.shuffle( order )
idx = None
for i in order:
if to_compactify[i].alloced:
idx = to_compactify[i]
break
if idx is None:
# try again--there are no candidates to swap at this time
logging.info("Directory /%s/%s: no compactifiable candidates >= %s" % (volume_id, parent_id, dir_index_cutoff))
return (-errno.EAGAIN, None)
old_dir_index = cls.__compactify_swap( volume_id, parent_id, idx.file_id, idx.dir_index, free_file_id, free_dir_index )
if old_dir_index >= 0:
# swapped!
return (old_dir_index, idx)
else:
logging.info("Directory /%s/%s: __compactify_swap(%s (%s <--> %s)) rc = %s" % (volume_id, parent_id, idx.file_id, free_dir_index, idx.dir_index, old_dir_index))
if old_dir_index == -errno.ENOENT:
# entry queried doesn't exist, for some reason. Need dir_index_cutoff to be refreshed
return (-errno.EAGAIN, None)
if old_dir_index in [-errno.EAGAIN, -errno.EPERM, -errno.ESTALE]:
# had multiple concurrent deletes, and they tried to operate on the same entry. Try again
continue
else:
# No compactification needs to happen
# (assuming the ListAll returned consistent data)
logging.info("Directory /%s/%s: no compactifiable candidates at this time" % (volume_id, parent_id))
return (-errno.EPERM, None)
@classmethod
def __compactify_parent_delete( cls, volume_id, parent_id, free_file_id, free_dir_index, num_shards, compactify_continuation=None ):
"""
Given a free directory index, repeatedly find a child with a
directory index value that can be swapped into a gap in the parent's index.
That is, find children with index values that are beyond the number of children,
and swap their index nodes with index nodes that represent gaps.
"""
old_max_cutoff = None
while True:
# refresh the index max cutoff--it may have changed
# the cutoff is the new number of children, after this entry has been deleted
parent_max_cutoff = cls.GetNumChildren( volume_id, parent_id, num_shards ) - 1
if parent_max_cutoff is None:
# directory doesn't exist anymore...nothing to compactify
logging.info("Index node /%s/%s does not exist" % (volume_id, parent_id) )
return 0
if parent_max_cutoff == 0:
# nothing left to compactify!
logging.info("Directory /%s/%s appears to be empty" % (volume_id, parent_id))
return 0
if old_max_cutoff is not None:
# choose the smallest parent size seen so far as the cutoff, since it maimizes the number of entries
# that can be selected to fill the gap. If we don't do this, we could accidentally
# loop forever by never finding an entry to replace the gap.
parent_max_cutoff = min( old_max_cutoff, parent_max_cutoff )
if parent_max_cutoff < free_dir_index:
# gap no longer exists--the directory shrank out from under it
logging.info("Directory /%s/%s compactification threshold %s exceeded (by %s)" % (volume_id, parent_id, parent_max_cutoff, free_dir_index) )
return 0
if parent_max_cutoff == free_dir_index:
# gap is at the end.
logging.info("Directory /%s/%s entry is at the end (%s)" % (volume_id, parent_id, free_dir_index))
rc_fut = cls.__compactify_remove_index_async( volume_id, parent_id, free_file_id, free_dir_index )
storagetypes.wait_futures( [rc_fut] )
return 0
old_max_cutoff = parent_max_cutoff
replaced_dir_index, child_idx = cls.__compactify_child_delete( volume_id, parent_id, free_file_id, free_dir_index, parent_max_cutoff )
if replaced_dir_index >= 0:
# success!
if compactify_continuation is not None:
compactify_continuation( compacted_index_node=child_idx, replaced_index=replaced_dir_index )
# verify that we didn't leave a gap by compactifying
# (can happen if another process creates an entry while we're compactifying)
new_parent_max_cutoff = cls.GetNumChildren( volume_id, parent_id, num_shards )
if new_parent_max_cutoff is None:
# directory doesn't exist anymore...nothing to compactify
logging.info("Index node /%s/%s does not exist" % (volume_id, parent_id) )
return 0
if parent_max_cutoff < new_parent_max_cutoff:
# left a gap--need to compactify again
free_dir_index = parent_max_cutoff
old_max_cutoff = None
continue
else:
# done!
logging.info("Directory /%s/%s compactified" % (volume_id, parent_id))
return 0
elif replaced_dir_index == -errno.EAGAIN or replaced_dir_index == -errno.EPERM:
# need to re-check the maximum cutoff
# (NOTE: EPERM can mean that the children beyond the cutoff aren't showing up in queries yet)
# TODO: can loop forever?
logging.info("__compactify_child_delete( /%s/%s index=%s threshold=%s ) rc = %s" % (volume_id, parent_id, free_dir_index, parent_max_cutoff, replaced_dir_index))
continue
else:
logging.error("BUG: failed to compactify /%s/%s, rc = %s\n", volume_id, parnet_id, replaced_dir_index )
return replaced_dir_index
@classmethod
def __compactify_on_delete( cls, volume_id, parent_id, free_file_id, free_dir_index, num_shards, retry=True, compactify_continuation=None ):
"""
Compactify the parent's index on delete, trying again in a deferred task if need be.
This is the top-level entry point.
"""
try:
# compact the index--move entries from the end of the index into the gaps
cls.__compactify_parent_delete( volume_id, parent_id, free_file_id, free_dir_index, num_shards, compactify_continuation=compactify_continuation )
# account that there is now one less index node
cls.__num_children_dec( volume_id, parent_id, num_shards )
except storagetypes.RequestDeadlineExceededError:
if retry:
# keep trying
storagetypes.deferred.defer( cls.__compactify_on_delete, volume_id, parent_id, free_dir_index )
return
else:
raise
except Exception, e:
logging.exception( e )
raise e
@classmethod
def __compactify_parent_insert_once( cls, volume_id, parent_id, new_file_id, new_dir_index, num_shards ):
"""
Given a newly-allocated directory index, try to find a free slot lower in the index to swap it with.
Return the (0, final directory index) on success.
Return negative on error.
"""
# find gaps to swap into that are in range [0, new_dir_index].
# see if we can get new_dir_index to be beneath the number of children.
free_gaps = cls.FindFreeGaps( volume_id, parent_id, new_dir_index )
if len(free_gaps) == 0:
logging.info("Directory /%s/%s: no free gaps, so keep /%s/%s at %s" % (volume_id, parent_id, volume_id, new_file_id, new_dir_index))
return (0, new_dir_index)
# move us there
free_gap = free_gaps[ random.randint( 0, len(free_gaps) - 1 ) ]
# are we in a "gap" already?
if free_gap == new_dir_index:
logging.info("Directory /%s/%s: already inserted /%s/%s at %s" % (volume_id, parent_id, volume_id, new_file_id, free_gap))
return (0, free_gap)
# attempt the swap
rc = cls.__compactify_swap( volume_id, parent_id, new_file_id, new_dir_index, None, free_gap )
# get the number of children again--maybe the set expanded up to include us, even if we failed
parent_num_children = cls.GetNumChildren( volume_id, parent_id, num_shards )
if rc is None:
# success
if free_gap < parent_num_children:
# swapped into a lower slot!
logging.info("Directory /%s/%s: inserted /%s/%s at %s" % (volume_id, parent_id, volume_id, new_file_id, free_gap))
return (0, free_gap)
else:
# succeeded, but after the number of children got decreased
# try again
logging.error("Directory /%s/%s: inserted /%s/%s at %s (from %s), but directory size is %s" % (volume_id, parent_id, volume_id, new_file_id, free_gap, new_dir_index, parent_num_children))
new_dir_index = free_gap
return (-errno.EAGAIN, new_dir_index)
else:
logging.debug("Directory /%s/%s: failed to swap /%s/%s from %s to %s, num_children = %s" % (volume_id, parent_id, volume_id, new_file_id, new_dir_index, free_gap, parent_num_children))
# maybe we failed to swap, but are we now in range?
if new_dir_index < parent_num_children:
# success
logging.debug("Directory /%s/%s: inserted /%s/%s at %s, since directory size is %s" % (volume_id, parent_id, volume_id, new_file_id, new_dir_index, parent_num_children))
return (0, new_dir_index)
elif rc == -errno.ENOENT:
# someone else swapped us
logging.debug("Directory /%s/%s: swapped /%s/%s out of %s by another process" % (volume_id, parent_id, volume_id, new_file_id, new_dir_index))
return (0, -1)
else:
# otherwise, try again
logging.error("Directory /%s/%s: __compactify_swap(%s (%s <--> %s)) rc = %s; try again" % (volume_id, parent_id, new_file_id, new_dir_index, free_gap, rc))
return (-errno.EAGAIN, new_dir_index)
@classmethod
def __compactify_on_insert( cls, volume_id, parent_id, new_file_id, new_dir_index, num_shards ):
"""
Compactify the parent's index on insert, trying again in a deferred task if need be.
Return an error code and the directory entry's new directory index, if successful.
(0, nonnegative) means successfully inserted at nonzero place
(0, negative) means successfully inserted, but we no longer know where
(-EAGAIN, nonegative) means try again at the new directory index.
"""
try:
# compact the index--move entries from the end of the index into the gaps
rc, final_dir_index = cls.__compactify_parent_insert_once( volume_id, parent_id, new_file_id, new_dir_index, num_shards )
return (rc, final_dir_index)
except Exception, e:
logging.exception( e )
raise e
@classmethod
def NextGeneration( cls ):
"""
Get a generation number.
"""
now_sec, now_nsec = storagetypes.clock_gettime()
# 10ths of milliseconds
generation = now_sec * 10000 + now_nsec / 100000
return generation
@classmethod
def TryInsert( cls, volume_id, parent_id, file_id, new_dir_index, parent_capacity, num_shards, async=False ):
"""
Try to insert a given file ID into its parent's index, atomically.
Otherwise, it selects a slot at random that is likely to be free
Return a (return code, value)
If return code is 0, then the value is the generation number
If return code is negative, then the value is either the new directory index to try, or None to choose a new one outright.
"""
generation = cls.NextGeneration()
@storagetypes.concurrent
def try_insert( new_dir_index ):
rc = yield cls.__alloc( volume_id, parent_id, file_id, new_dir_index, generation, async=True )
if rc:
# compactify--see if we can shift it closer
rc, final_dir_index = cls.__compactify_on_insert( volume_id, parent_id, file_id, new_dir_index, num_shards )
if rc == 0:
storagetypes.concurrent_return( (0, generation) )
elif rc == -errno.EAGAIN:
# try again
storagetypes.concurrent_return( (-errno.EAGAIN, final_dir_index) )
else:
storagetypes.concurrent_return( (rc, None) )
else:
logging.info("Directory /%s/%s: Failed to insert /%s/%s (capacity %s) at %s; will need to retry" % (volume_id, parent_id, volume_id, file_id, parent_capacity, new_dir_index) )
# probably collided. Try again, and have the caller pick a different index
storagetypes.concurrent_return( (-errno.EAGAIN, None) )
fut = try_insert( new_dir_index )
if async:
return fut
else:
storagetypes.wait_futures( [fut] )
return fut.get_result()
@classmethod
def Delete( cls, volume_id, parent_id, file_id, dir_index, num_shards, async=False, retry=True, compactify_continuation=None ):
"""
Free and then compactify the index. This will result in the directory index and
entry nodes getting deleted.
"""
@storagetypes.concurrent
def do_delete():
rc = yield cls.__free( volume_id, parent_id, file_id, dir_index, async=True )
if not rc:
logging.error("Failed to free index node /%s/%s (%s,%s)" % (volume_id, parent_id, file_id, dir_index))
storagetypes.concurrent_return( -errno.EAGAIN )
cls.__compactify_on_delete( volume_id, parent_id, file_id, dir_index, num_shards, retry=retry, compactify_continuation=compactify_continuation )
storagetypes.concurrent_return( 0 )
result_fut = do_delete()
if async:
return result_fut
storagetypes.wait_futures( [result_fut] )
return result_fut.get_result()
@classmethod
def Purge( cls, volume_id, parent_id, file_id, num_shards, async=False ):
"""
Remove both DirEnt and EntDir index nodes, and don't bother to compactify.
This is suitable for deleting the index wholesale, such as when deleting a Volume
or an AG.
Return 0 on success, if async == False
Return a list of futures to wait on, if async == True
"""
futs = []
fut = cls.__num_children_delete( volume_id, parent_id, num_shards, async=True )
futs.append( futs )
entdir = MSEntryIndex.ReadIndex( volume_id, file_id )
if entdir is not None:
dirent_key_name = MSEntryDirEntIndex.make_key_name( volume_id, parent_id, entdir.dir_index )
entdir_key_name = MSEntryEntDirIndex.make_key_name( volume_id, file_id )
dirent_key = storagetypes.make_key( MSEntryDirEntIndex, dirent_key_name )
entdir_key = storagetypes.make_key( MSEntryEntDirIndex, entdir_key_name )
dirent_del_fut = dirent_key.delete_async()
entdir_del_fut = entdir_key.delete_async()
futs.append( dirent_del_fut )
futs.append( entdir_del_fut )
if not async:
storagetypes.wait_futures( futs )
return 0
else:
return futs
@classmethod
def GetNumChildren( cls, volume_id, parent_id, num_shards, async=False ):
"""
Get the number of children in a directory
"""
num_children_counter = cls.__parent_child_counter_name( volume_id, parent_id )
if async:
return shardcounter.get_count_async( num_children_counter, num_shards, use_memcache=False )
else:
num_children = shardcounter.get_count( num_children_counter, num_shards, use_memcache=False )
return num_children
@classmethod
def NumChildrenInc( cls, volume_id, parent_id, num_shards, async=False ):
"""
Increase the number of children in a directory
"""
return cls.__num_children_inc( volume_id, parent_id, num_shards, async=async )
@classmethod
def NumChildrenDec( cls, volume_id, parent_id, num_shards, async=False ):
"""
Get the number of children in a directory
"""
return cls.__num_children_inc( volume_id, parent_id, num_shards, async=async )
@classmethod
def Read( cls, volume_id, parent_id, dir_index, async=False ):
"""
Read a directory index node. Return the whole node, not the value
"""
idx_key_name = MSEntryDirEntIndex.make_key_name( volume_id, parent_id, dir_index )
idx_key = storagetypes.make_key( MSEntryDirEntIndex, idx_key_name )
idx = storagetypes.memcache.get( idx_key_name )
if idx is not None:
if async:
return storagetypes.FutureWrapper( idx )
else:
return idx
else:
if async:
@storagetypes.concurrent
def read_and_cache():
idx = yield idx_key.get_async()
if idx is not None and idx.alloced:
MSEntryIndex.SetCache( idx )
storagetypes.concurrent_return( idx )
return read_and_cache()
else:
return idx_key.get()
@classmethod
def ReadIndex( cls, volume_id, file_id, async=False ):
"""
Read an entry index node. Return an MSEntryEntDirIndex
"""
idx_key_name = MSEntryEntDirIndex.make_key_name( volume_id, file_id )
idx_key = storagetypes.make_key( MSEntryEntDirIndex, idx_key_name )
idx = storagetypes.memcache.get( idx_key_name )
if idx is not None:
if async:
return storagetypes.FutureWrapper( idx )
else:
return idx
else:
if async:
@storagetypes.concurrent
def read_and_cache():
idx = yield idx_key.get_async()
if idx is not None and idx.alloced:
MSEntryIndex.SetCache( idx )
if not idx.alloced:
storagetypes.concurrent_return( None )
storagetypes.concurrent_return( idx )
return read_and_cache()
else:
return idx_key.get()
@classmethod
def SetCache( cls, dir_index_node ):
"""
Cache a node
"""
idx_key_name = MSEntryDirEntIndex.make_key_name( dir_index_node.volume_id, dir_index_node.parent_id, dir_index_node.dir_index )
ent_key_name = MSEntryEntDirIndex.make_key_name( dir_index_node.volume_id, dir_index_node.file_id )
storagetypes.memcache.set_multi( {idx_key_name: dir_index_node, ent_key_name: dir_index_node} )
@classmethod
def GenerationQuery( cls, volume_id, parent_id, generation_begin, generation_end ):
"""
Get a range of directory index nodes for a given parent/volume, by generation
"""
qry = MSEntryDirEntIndex.query()
qry = qry.filter( storagetypes.opAND( MSEntryDirEntIndex.volume_id == volume_id, MSEntryDirEntIndex.parent_id == parent_id ) )
if generation_begin >= 0:
qry = qry.filter( MSEntryDirEntIndex.generation >= generation_begin )
if generation_end >= 0:
qry = qry.filter( MSEntryDirEntIndex.generation < generation_end )
return qry
| 40.534636
| 267
| 0.619101
| 44,677
| 0.978857
| 20,772
| 0.455107
| 43,273
| 0.948096
| 0
| 0
| 13,769
| 0.301674
|
59346b4914120bc35ba05709bdea9720cdc9dfbc
| 3,009
|
py
|
Python
|
tests/test_utils.py
|
caseygrun/plates
|
156069487560d0c72f080f7e45a4dc2ae7a466ac
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
caseygrun/plates
|
156069487560d0c72f080f7e45a4dc2ae7a466ac
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
caseygrun/plates
|
156069487560d0c72f080f7e45a4dc2ae7a466ac
|
[
"MIT"
] | null | null | null |
from microplates.utils import *
def test_itertuples():
assert list(itertuples((0,0),(0,2))) == [(0,0),(0,1),(0,2)]
assert list(itertuples((1,0),(2,0))) == [(1,0),(2,0)]
assert list(itertuples((1,0),(2,1))) == [(1,0),(1,1),(2,0),(2,1)]
assert list(itertuples((1,0),(2,1), by='column')) == [(1,0),(2,0),(1,1),(2,1)]
def test_letters2row():
assert letters2row('A') == 0
assert letters2row('H') == 7
assert letters2row('G') == 6
assert letters2row('AA') == 26
assert letters2row('AB') == 27
assert letters2row('BA') == 52
def test_cell2tuple():
assert cell2tuple('A1') == (0,0)
assert cell2tuple('H10') == (7,9)
assert cell2tuple('G11') == (6,10)
assert cell2tuple('AA1') == (26,0)
assert cell2tuple('AB10') == (27,9)
assert cell2tuple('BA12') == (52,11)
def test_is_cell():
assert is_cell('A1')
assert is_cell('F12')
assert is_cell('BC256')
assert not is_cell('H 12')
assert not is_cell('5S')
def test_row2letters():
assert row2letters(7) == 'H'
assert row2letters(27) == 'AB'
assert row2letters(55) == 'BD'
def test_tuple2cell():
assert tuple2cell(7,9) == 'H10'
assert tuple2cell(27,9) == 'AB10'
assert tuple2cell(55,11) == 'BD12'
def test_range2cells():
assert range2cells('A1:B1') == range2cells('B1:A1') == ('A1', 'B1')
assert range2cells('A:B') == ('A1', 'B12')
assert range2cells('A:A') == ('A1','A12')
assert range2cells('B:D') == ('B1','D12')
assert range2cells('C:B') == ('B1','C12')
assert range2cells('1:1') == ('A1','H1')
assert range2cells('1:3') == ('A1','H3')
assert range2cells('A11:A12') == ('A11','A12')
assert range2cells('2:10') == range2cells('10:2') == ('A2','H10')
assert range2cells("A:A",wells=384) == ("A1","A24")
assert range2cells("I:I",wells=384) == ("I1","I24")
assert range2cells("23:23",wells=384) == ("A23","P23")
def test_range2tuple():
assert range2tuple('A1:C10') == range2tuple('C10:A1') == ((0,0),(2,9))
assert range2tuple('G7:G10') == range2tuple('G10:G7') == ((6,6),(6,9))
assert range2tuple("A:A",wells=384) == ((0,0),(0,23))
def test_range2cell_list():
assert range2cell_list('A1:A2') == ['A1','A2']
assert range2cell_list('A1:B2') == ['A1','A2','B1','B2']
assert range2cell_list('A1:B2', by='column') == ['A1','B1','A2','B2']
def test_iterwells():
assert list(iterwells(2,start='H12')) == ['H12', 'A1']
assert list(iterwells(13)) == ['A1','A2','A3','A4','A5','A6','A7','A8','A9','A10','A11','A12','B1']
assert list(iterwells(9)) == ['A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8', 'A9']
def test_infer_plate_size():
assert infer_plate_size(['H12']) == infer_plate_size(['A1','H12']) == infer_plate_size(range2cell_list('A1:H12')) == 96
assert infer_plate_size(['H13']) == 384
assert infer_plate_size(['A6'], all=True) == [24, 48, 96, 384, 1536]
assert infer_plate_size(['A6'], prefer=96) == 96
assert infer_plate_size(['A6'], prefer=384) == 384
| 38.576923
| 123
| 0.580592
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 553
| 0.183782
|
593527dd9bb90c5f584c18500adffe54b948dd13
| 1,066
|
py
|
Python
|
example.py
|
train255/deep-speaker
|
d63b111b904faab34fe58637b3d0e7fd188e1b36
|
[
"MIT"
] | 3
|
2021-08-20T16:40:09.000Z
|
2022-02-08T23:17:52.000Z
|
example.py
|
train255/deep-speaker
|
d63b111b904faab34fe58637b3d0e7fd188e1b36
|
[
"MIT"
] | 1
|
2022-03-22T04:16:15.000Z
|
2022-03-22T04:26:03.000Z
|
example.py
|
train255/deep-speaker
|
d63b111b904faab34fe58637b3d0e7fd188e1b36
|
[
"MIT"
] | 1
|
2020-11-06T08:07:27.000Z
|
2020-11-06T08:07:27.000Z
|
import numpy as np
import random
from audio import read_mfcc
from batcher import sample_from_mfcc
from constants import SAMPLE_RATE, NUM_FRAMES
from conv_models import DeepSpeakerModel
from test import batch_cosine_similarity
np.random.seed(123)
random.seed(123)
model = DeepSpeakerModel()
model.m.load_weights('/Users/premy/deep-speaker/checkpoints/ResCNN_triplet_training_checkpoint_175.h5', by_name=True)
mfcc_001 = sample_from_mfcc(read_mfcc('samples/PhilippeRemy/PhilippeRemy_001.wav', SAMPLE_RATE), NUM_FRAMES)
mfcc_002 = sample_from_mfcc(read_mfcc('samples/PhilippeRemy/PhilippeRemy_002.wav', SAMPLE_RATE), NUM_FRAMES)
predict_001 = model.m.predict(np.expand_dims(mfcc_001, axis=0))
predict_002 = model.m.predict(np.expand_dims(mfcc_002, axis=0))
mfcc_003 = sample_from_mfcc(read_mfcc('samples/1255-90413-0001.flac', SAMPLE_RATE), NUM_FRAMES)
predict_003 = model.m.predict(np.expand_dims(mfcc_003, axis=0))
print('SAME SPEAKER', batch_cosine_similarity(predict_001, predict_002))
print('DIFF SPEAKER', batch_cosine_similarity(predict_001, predict_003))
| 41
| 117
| 0.827392
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 225
| 0.211069
|
59373033d6759f87ac888baaf5e7fad69fe7d8fc
| 135,252
|
py
|
Python
|
pychunkedgraph/tests/test.py
|
perlman/PyChunkedGraph
|
2c582f46a8292010e8f9f54c94c63af0b172bdad
|
[
"MIT"
] | null | null | null |
pychunkedgraph/tests/test.py
|
perlman/PyChunkedGraph
|
2c582f46a8292010e8f9f54c94c63af0b172bdad
|
[
"MIT"
] | null | null | null |
pychunkedgraph/tests/test.py
|
perlman/PyChunkedGraph
|
2c582f46a8292010e8f9f54c94c63af0b172bdad
|
[
"MIT"
] | null | null | null |
import sys
import os
import subprocess
import pytest
import numpy as np
from functools import partial
import collections
from grpc._channel import _Rendezvous
from google.cloud import bigtable
from google.auth import credentials
from math import inf
from datetime import datetime, timedelta
from time import sleep
from signal import SIGTERM
from warnings import warn
sys.path.insert(0, os.path.join(sys.path[0], '..'))
from pychunkedgraph.backend import chunkedgraph # noqa
from pychunkedgraph.backend.utils import serializers, column_keys # noqa
from pychunkedgraph.backend import chunkedgraph_exceptions as cg_exceptions # noqa
from pychunkedgraph.creator import graph_tests # noqa
def setup_emulator_env():
bt_env_init = subprocess.run(
["gcloud", "beta", "emulators", "bigtable", "env-init"], stdout=subprocess.PIPE)
os.environ["BIGTABLE_EMULATOR_HOST"] = \
bt_env_init.stdout.decode("utf-8").strip().split('=')[-1]
c = bigtable.Client(
project='IGNORE_ENVIRONMENT_PROJECT',
credentials=credentials.AnonymousCredentials(),
admin=True)
t = c.instance("emulated_instance").table("emulated_table")
try:
t.create()
return True
except Exception as err:
print('Bigtable Emulator not yet ready: %s' % err)
return False
@pytest.fixture(scope='session', autouse=True)
def bigtable_emulator(request):
# Start Emulator
bigtable_emulator = subprocess.Popen(
["gcloud", "beta", "emulators", "bigtable", "start"], preexec_fn=os.setsid,
stdout=subprocess.PIPE)
# Wait for Emulator to start up
print("Waiting for BigTables Emulator to start up...", end='')
retries = 5
while retries > 0:
if setup_emulator_env() is True:
break
else:
retries -= 1
sleep(5)
if retries == 0:
print("\nCouldn't start Bigtable Emulator. Make sure it is installed correctly.")
exit(1)
# Setup Emulator-Finalizer
def fin():
os.killpg(os.getpgid(bigtable_emulator.pid), SIGTERM)
bigtable_emulator.wait()
request.addfinalizer(fin)
@pytest.fixture(scope='function')
def lock_expired_timedelta_override(request):
# HACK: For the duration of the test, set global LOCK_EXPIRED_TIME_DELTA
# to 1 second (otherwise test would have to run for several minutes)
original_timedelta = chunkedgraph.LOCK_EXPIRED_TIME_DELTA
chunkedgraph.LOCK_EXPIRED_TIME_DELTA = timedelta(seconds=1)
# Ensure that we restore the original value, even if the test fails.
def fin():
chunkedgraph.LOCK_EXPIRED_TIME_DELTA = original_timedelta
request.addfinalizer(fin)
return chunkedgraph.LOCK_EXPIRED_TIME_DELTA
@pytest.fixture(scope='function')
def gen_graph(request):
def _cgraph(request, fan_out=2, n_layers=10):
# setup Chunked Graph
dataset_info = {
"data_dir": ""
}
graph = chunkedgraph.ChunkedGraph(
request.function.__name__,
project_id='IGNORE_ENVIRONMENT_PROJECT',
credentials=credentials.AnonymousCredentials(),
instance_id="emulated_instance", dataset_info=dataset_info,
chunk_size=np.array([512, 512, 64], dtype=np.uint64),
is_new=True, fan_out=np.uint64(fan_out),
n_layers=np.uint64(n_layers))
# setup Chunked Graph - Finalizer
def fin():
graph.table.delete()
request.addfinalizer(fin)
return graph
return partial(_cgraph, request)
@pytest.fixture(scope='function')
def gen_graph_simplequerytest(request, gen_graph):
"""
┌─────┬─────┬─────┐
│ A¹ │ B¹ │ C¹ │
│ 1 │ 3━2━┿━━4 │
│ │ │ │
└─────┴─────┴─────┘
"""
graph = gen_graph(n_layers=4)
# Chunk A
create_chunk(graph,
vertices=[to_label(graph, 1, 0, 0, 0, 0)],
edges=[])
# Chunk B
create_chunk(graph,
vertices=[to_label(graph, 1, 1, 0, 0, 0), to_label(graph, 1, 1, 0, 0, 1)],
edges=[(to_label(graph, 1, 1, 0, 0, 0), to_label(graph, 1, 1, 0, 0, 1), 0.5),
(to_label(graph, 1, 1, 0, 0, 0), to_label(graph, 1, 2, 0, 0, 0), inf)])
# Chunk C
create_chunk(graph,
vertices=[to_label(graph, 1, 2, 0, 0, 0)],
edges=[(to_label(graph, 1, 2, 0, 0, 0), to_label(graph, 1, 1, 0, 0, 0), inf)])
graph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]))
graph.add_layer(3, np.array([[2, 0, 0]]))
graph.add_layer(4, np.array([[0, 0, 0], [1, 0, 0]]))
return graph
def create_chunk(cgraph, vertices=None, edges=None, timestamp=None):
"""
Helper function to add vertices and edges to the chunkedgraph - no safety checks!
"""
if not vertices:
vertices = []
if not edges:
edges = []
vertices = np.unique(np.array(vertices, dtype=np.uint64))
edges = [(np.uint64(v1), np.uint64(v2), np.float32(aff)) for v1, v2, aff in edges]
isolated_node_ids = [x for x in vertices if (x not in [edges[i][0] for i in range(len(edges))]) and
(x not in [edges[i][1] for i in range(len(edges))])]
edge_ids = {"in_connected": np.array([], dtype=np.uint64).reshape(0, 2),
"in_disconnected": np.array([], dtype=np.uint64).reshape(0, 2),
"cross": np.array([], dtype=np.uint64).reshape(0, 2),
"between_connected": np.array([], dtype=np.uint64).reshape(0, 2),
"between_disconnected": np.array([], dtype=np.uint64).reshape(0, 2)}
edge_affs = {"in_connected": np.array([], dtype=np.float32),
"in_disconnected": np.array([], dtype=np.float32),
"between_connected": np.array([], dtype=np.float32),
"between_disconnected": np.array([], dtype=np.float32)}
for e in edges:
if cgraph.test_if_nodes_are_in_same_chunk(e[0:2]):
this_edge = np.array([e[0], e[1]], dtype=np.uint64).reshape(-1, 2)
edge_ids["in_connected"] = \
np.concatenate([edge_ids["in_connected"], this_edge])
edge_affs["in_connected"] = \
np.concatenate([edge_affs["in_connected"], [e[2]]])
if len(edge_ids["in_connected"]) > 0:
chunk_id = cgraph.get_chunk_id(edge_ids["in_connected"][0][0])
elif len(vertices) > 0:
chunk_id = cgraph.get_chunk_id(vertices[0])
else:
chunk_id = None
for e in edges:
if not cgraph.test_if_nodes_are_in_same_chunk(e[0:2]):
# Ensure proper order
if chunk_id is not None:
if cgraph.get_chunk_id(e[0]) != chunk_id:
e = [e[1], e[0], e[2]]
this_edge = np.array([e[0], e[1]], dtype=np.uint64).reshape(-1, 2)
if np.isinf(e[2]):
edge_ids["cross"] = \
np.concatenate([edge_ids["cross"], this_edge])
else:
edge_ids["between_connected"] = \
np.concatenate([edge_ids["between_connected"],
this_edge])
edge_affs["between_connected"] = \
np.concatenate([edge_affs["between_connected"], [e[2]]])
isolated_node_ids = np.array(isolated_node_ids, dtype=np.uint64)
cgraph.logger.debug(edge_ids)
cgraph.logger.debug(edge_affs)
# Use affinities as areas
cgraph.add_atomic_edges_in_chunks(edge_ids, edge_affs, edge_affs,
isolated_node_ids,
time_stamp=timestamp)
def to_label(cgraph, l, x, y, z, segment_id):
return cgraph.get_node_id(np.uint64(segment_id), layer=l, x=x, y=y, z=z)
class TestGraphNodeConversion:
@pytest.mark.timeout(30)
def test_compute_bitmasks(self):
pass
@pytest.mark.timeout(30)
def test_node_conversion(self, gen_graph):
cgraph = gen_graph(n_layers=10)
node_id = cgraph.get_node_id(np.uint64(4), layer=2, x=3, y=1, z=0)
assert cgraph.get_chunk_layer(node_id) == 2
assert np.all(cgraph.get_chunk_coordinates(node_id) == np.array([3, 1, 0]))
chunk_id = cgraph.get_chunk_id(layer=2, x=3, y=1, z=0)
assert cgraph.get_chunk_layer(chunk_id) == 2
assert np.all(cgraph.get_chunk_coordinates(chunk_id) == np.array([3, 1, 0]))
assert cgraph.get_chunk_id(node_id=node_id) == chunk_id
assert cgraph.get_node_id(np.uint64(4), chunk_id=chunk_id) == node_id
@pytest.mark.timeout(30)
def test_node_id_adjacency(self, gen_graph):
cgraph = gen_graph(n_layers=10)
assert cgraph.get_node_id(np.uint64(0), layer=2, x=3, y=1, z=0) + np.uint64(1) == \
cgraph.get_node_id(np.uint64(1), layer=2, x=3, y=1, z=0)
assert cgraph.get_node_id(np.uint64(2**53 - 2), layer=10, x=0, y=0, z=0) + np.uint64(1) == \
cgraph.get_node_id(np.uint64(2**53 - 1), layer=10, x=0, y=0, z=0)
@pytest.mark.timeout(30)
def test_serialize_node_id(self, gen_graph):
cgraph = gen_graph(n_layers=10)
assert serializers.serialize_uint64(cgraph.get_node_id(np.uint64(0), layer=2, x=3, y=1, z=0)) < \
serializers.serialize_uint64(cgraph.get_node_id(np.uint64(1), layer=2, x=3, y=1, z=0))
assert serializers.serialize_uint64(cgraph.get_node_id(np.uint64(2 ** 53 - 2), layer=10, x=0, y=0, z=0)) < \
serializers.serialize_uint64(cgraph.get_node_id(np.uint64(2 ** 53 - 1), layer=10, x=0, y=0, z=0))
@pytest.mark.timeout(30)
def test_deserialize_node_id(self):
pass
@pytest.mark.timeout(30)
def test_serialization_roundtrip(self):
pass
@pytest.mark.timeout(30)
def test_serialize_valid_label_id(self):
label = np.uint64(0x01FF031234556789)
assert serializers.deserialize_uint64(
serializers.serialize_uint64(label)) == label
class TestGraphBuild:
@pytest.mark.timeout(30)
def test_build_single_node(self, gen_graph):
"""
Create graph with single RG node 1 in chunk A
┌─────┐
│ A¹ │
│ 1 │
│ │
└─────┘
"""
cgraph = gen_graph(n_layers=2)
# Add Chunk A
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0)])
res = cgraph.table.read_rows()
res.consume_all()
# Check for the RG-to-CG mapping:
# assert chunkedgraph.serialize_uint64(1) in res.rows
# row = res.rows[chunkedgraph.serialize_uint64(1)].cells[cgraph.family_id]
# assert np.frombuffer(row[b'cg_id'][0].value, np.uint64)[0] == to_label(cgraph, 1, 0, 0, 0, 0)
# Check for the Level 1 CG supervoxel:
# to_label(cgraph, 1, 0, 0, 0, 0)
assert serializers.serialize_uint64(to_label(cgraph, 1, 0, 0, 0, 0)) in res.rows
atomic_node_info = cgraph.get_atomic_node_info(to_label(cgraph, 1, 0, 0, 0, 0))
atomic_affinities = atomic_node_info[column_keys.Connectivity.Affinity]
atomic_partners = atomic_node_info[column_keys.Connectivity.Partner]
parents = atomic_node_info[column_keys.Hierarchy.Parent]
assert len(atomic_partners) == 0
assert len(atomic_affinities) == 0
assert len(parents) == 1 and parents[0] == to_label(cgraph, 2, 0, 0, 0, 1)
# Check for the one Level 2 node that should have been created.
# to_label(cgraph, 2, 0, 0, 0, 1)
assert serializers.serialize_uint64(to_label(cgraph, 2, 0, 0, 0, 1)) in res.rows
row = res.rows[serializers.serialize_uint64(to_label(cgraph, 2, 0, 0, 0, 1))].cells[cgraph.family_id]
atomic_cross_edge_dict = cgraph.get_atomic_cross_edge_dict(to_label(cgraph, 2, 0, 0, 0, 1))
column = column_keys.Hierarchy.Child
children = column.deserialize(row[column.key][0].value)
for aces in atomic_cross_edge_dict.values():
assert len(aces) == 0
assert len(children) == 1 and children[0] == to_label(cgraph, 1, 0, 0, 0, 0)
# Make sure there are not any more entries in the table
assert len(res.rows) == 1 + 1 + 1 + 1
@pytest.mark.timeout(30)
def test_build_single_edge(self, gen_graph):
"""
Create graph with edge between RG supervoxels 1 and 2 (same chunk)
┌─────┐
│ A¹ │
│ 1━2 │
│ │
└─────┘
"""
cgraph = gen_graph(n_layers=2)
# Add Chunk A
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1)],
edges=[(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1), 0.5)])
res = cgraph.table.read_rows()
res.consume_all()
# Check for the two RG-to-CG mappings:
# assert chunkedgraph.serialize_uint64(1) in res.rows
# row = res.rows[chunkedgraph.serialize_uint64(1)].cells[cgraph.family_id]
# assert np.frombuffer(row[b'cg_id'][0].value, np.uint64)[0] == to_label(cgraph, 1, 0, 0, 0, 0)
# assert chunkedgraph.serialize_uint64(2) in res.rows
# row = res.rows[chunkedgraph.serialize_uint64(2)].cells[cgraph.family_id]
# assert np.frombuffer(row[b'cg_id'][0].value, np.uint64)[0] == to_label(cgraph, 1, 0, 0, 0, 1)
# Check for the two original Level 1 CG supervoxels
# to_label(cgraph, 1, 0, 0, 0, 0)
assert serializers.serialize_uint64(to_label(cgraph, 1, 0, 0, 0, 0)) in res.rows
atomic_node_info = cgraph.get_atomic_node_info(to_label(cgraph, 1, 0, 0, 0, 0))
atomic_affinities = atomic_node_info[column_keys.Connectivity.Affinity]
atomic_partners = atomic_node_info[column_keys.Connectivity.Partner]
parents = atomic_node_info[column_keys.Hierarchy.Parent]
assert len(atomic_partners) == 1 and atomic_partners[0] == to_label(cgraph, 1, 0, 0, 0, 1)
assert len(atomic_affinities) == 1 and atomic_affinities[0] == 0.5
assert len(parents) == 1 and parents[0] == to_label(cgraph, 2, 0, 0, 0, 1)
# to_label(cgraph, 1, 0, 0, 0, 1)
assert serializers.serialize_uint64(to_label(cgraph, 1, 0, 0, 0, 1)) in res.rows
atomic_node_info = cgraph.get_atomic_node_info(to_label(cgraph, 1, 0, 0, 0, 1))
atomic_affinities = atomic_node_info[column_keys.Connectivity.Affinity]
atomic_partners = atomic_node_info[column_keys.Connectivity.Partner]
parents = atomic_node_info[column_keys.Hierarchy.Parent]
assert len(atomic_partners) == 1 and atomic_partners[0] == to_label(cgraph, 1, 0, 0, 0, 0)
assert len(atomic_affinities) == 1 and atomic_affinities[0] == 0.5
assert len(parents) == 1 and parents[0] == to_label(cgraph, 2, 0, 0, 0, 1)
# Check for the one Level 2 node that should have been created.
assert serializers.serialize_uint64(to_label(cgraph, 2, 0, 0, 0, 1)) in res.rows
row = res.rows[serializers.serialize_uint64(to_label(cgraph, 2, 0, 0, 0, 1))].cells[cgraph.family_id]
atomic_cross_edge_dict = cgraph.get_atomic_cross_edge_dict(to_label(cgraph, 2, 0, 0, 0, 1))
column = column_keys.Hierarchy.Child
children = column.deserialize(row[column.key][0].value)
for aces in atomic_cross_edge_dict.values():
assert len(aces) == 0
assert len(children) == 2 and to_label(cgraph, 1, 0, 0, 0, 0) in children and to_label(cgraph, 1, 0, 0, 0, 1) in children
# Make sure there are not any more entries in the table
assert len(res.rows) == 2 + 1 + 1 + 1
@pytest.mark.timeout(30)
def test_build_single_across_edge(self, gen_graph):
"""
Create graph with edge between RG supervoxels 1 and 2 (neighboring chunks)
┌─────┌─────┐
│ A¹ │ B¹ │
│ 1━━┿━━2 │
│ │ │
└─────┴─────┘
"""
cgraph = gen_graph(n_layers=3)
# Chunk A
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 0), inf)])
# Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 0), inf)])
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]))
res = cgraph.table.read_rows()
res.consume_all()
# Check for the two RG-to-CG mappings:
# assert chunkedgraph.serialize_uint64(1) in res.rows
# row = res.rows[chunkedgraph.serialize_uint64(1)].cells[cgraph.family_id]
# assert np.frombuffer(row[b'cg_id'][0].value, np.uint64)[0] == to_label(cgraph, 1, 0, 0, 0, 0)
# assert chunkedgraph.serialize_uint64(2) in res.rows
# row = res.rows[chunkedgraph.serialize_uint64(2)].cells[cgraph.family_id]
# assert np.frombuffer(row[b'cg_id'][0].value, np.uint64)[0] == to_label(cgraph, 1, 1, 0, 0, 0)
# Check for the two original Level 1 CG supervoxels
# to_label(cgraph, 1, 0, 0, 0, 0)
assert serializers.serialize_uint64(to_label(cgraph, 1, 0, 0, 0, 0)) in res.rows
atomic_node_info = cgraph.get_atomic_node_info(to_label(cgraph, 1, 0, 0, 0, 0))
atomic_affinities = atomic_node_info[column_keys.Connectivity.Affinity]
atomic_partners = atomic_node_info[column_keys.Connectivity.Partner]
cgraph.logger.debug(atomic_node_info.keys())
parents = atomic_node_info[column_keys.Hierarchy.Parent]
assert len(atomic_partners) == 1 and atomic_partners[0] == to_label(cgraph, 1, 1, 0, 0, 0)
assert len(atomic_affinities) == 1 and atomic_affinities[0] == inf
assert len(parents) == 1 and parents[0] == to_label(cgraph, 2, 0, 0, 0, 1)
# to_label(cgraph, 1, 1, 0, 0, 0)
assert serializers.serialize_uint64(to_label(cgraph, 1, 1, 0, 0, 0)) in res.rows
atomic_node_info = cgraph.get_atomic_node_info(to_label(cgraph, 1, 1, 0, 0, 0))
atomic_affinities = atomic_node_info[column_keys.Connectivity.Affinity]
atomic_partners = atomic_node_info[column_keys.Connectivity.Partner]
parents = atomic_node_info[column_keys.Hierarchy.Parent]
assert len(atomic_partners) == 1 and atomic_partners[0] == to_label(cgraph, 1, 0, 0, 0, 0)
assert len(atomic_affinities) == 1 and atomic_affinities[0] == inf
assert len(parents) == 1 and parents[0] == to_label(cgraph, 2, 1, 0, 0, 1)
# Check for the two Level 2 nodes that should have been created. Since Level 2 has the same
# dimensions as Level 1, we also expect them to be in different chunks
# to_label(cgraph, 2, 0, 0, 0, 1)
assert serializers.serialize_uint64(to_label(cgraph, 2, 0, 0, 0, 1)) in res.rows
row = res.rows[serializers.serialize_uint64(to_label(cgraph, 2, 0, 0, 0, 1))].cells[cgraph.family_id]
atomic_cross_edge_dict = cgraph.get_atomic_cross_edge_dict(to_label(cgraph, 2, 0, 0, 0, 1))
column = column_keys.Hierarchy.Child
children = column.deserialize(row[column.key][0].value)
test_ace = np.array([to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 0)], dtype=np.uint64)
assert len(atomic_cross_edge_dict[2]) == 1
assert test_ace in atomic_cross_edge_dict[2]
assert len(children) == 1 and to_label(cgraph, 1, 0, 0, 0, 0) in children
# to_label(cgraph, 2, 1, 0, 0, 1)
assert serializers.serialize_uint64(to_label(cgraph, 2, 1, 0, 0, 1)) in res.rows
row = res.rows[serializers.serialize_uint64(to_label(cgraph, 2, 1, 0, 0, 1))].cells[cgraph.family_id]
atomic_cross_edge_dict = cgraph.get_atomic_cross_edge_dict(to_label(cgraph, 2, 1, 0, 0, 1))
column = column_keys.Hierarchy.Child
children = column.deserialize(row[column.key][0].value)
test_ace = np.array([to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 0)], dtype=np.uint64)
assert len(atomic_cross_edge_dict[2]) == 1
assert test_ace in atomic_cross_edge_dict[2]
assert len(children) == 1 and to_label(cgraph, 1, 1, 0, 0, 0) in children
# Check for the one Level 3 node that should have been created. This one combines the two
# connected components of Level 2
# to_label(cgraph, 3, 0, 0, 0, 1)
assert serializers.serialize_uint64(to_label(cgraph, 3, 0, 0, 0, 1)) in res.rows
row = res.rows[serializers.serialize_uint64(to_label(cgraph, 3, 0, 0, 0, 1))].cells[cgraph.family_id]
atomic_cross_edge_dict = cgraph.get_atomic_cross_edge_dict(to_label(cgraph, 3, 0, 0, 0, 1))
column = column_keys.Hierarchy.Child
children = column.deserialize(row[column.key][0].value)
for aces in atomic_cross_edge_dict.values():
assert len(aces) == 0
assert len(children) == 2 and to_label(cgraph, 2, 0, 0, 0, 1) in children and to_label(cgraph, 2, 1, 0, 0, 1) in children
# Make sure there are not any more entries in the table
assert len(res.rows) == 2 + 2 + 1 + 3 + 1
@pytest.mark.timeout(30)
def test_build_single_edge_and_single_across_edge(self, gen_graph):
"""
Create graph with edge between RG supervoxels 1 and 2 (same chunk)
and edge between RG supervoxels 1 and 3 (neighboring chunks)
┌─────┬─────┐
│ A¹ │ B¹ │
│ 2━1━┿━━3 │
│ │ │
└─────┴─────┘
"""
cgraph = gen_graph(n_layers=3)
# Chunk A
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1)],
edges=[(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1), 0.5),
(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 0), inf)])
# Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 0), inf)])
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]))
res = cgraph.table.read_rows()
res.consume_all()
# Check for the three RG-to-CG mappings:
# assert chunkedgraph.serialize_uint64(1) in res.rows
# row = res.rows[chunkedgraph.serialize_uint64(1)].cells[cgraph.family_id]
# assert np.frombuffer(row[b'cg_id'][0].value, np.uint64)[0] == to_label(cgraph, 1, 0, 0, 0, 0)
# assert chunkedgraph.serialize_uint64(2) in res.rows
# row = res.rows[chunkedgraph.serialize_uint64(2)].cells[cgraph.family_id]
# assert np.frombuffer(row[b'cg_id'][0].value, np.uint64)[0] == to_label(cgraph, 1, 0, 0, 0, 1)
# assert chunkedgraph.serialize_uint64(3) in res.rows
# row = res.rows[chunkedgraph.serialize_uint64(3)].cells[cgraph.family_id]
# assert np.frombuffer(row[b'cg_id'][0].value, np.uint64)[0] == to_label(cgraph, 1, 1, 0, 0, 0)
# Check for the three original Level 1 CG supervoxels
# to_label(cgraph, 1, 0, 0, 0, 0)
assert serializers.serialize_uint64(to_label(cgraph, 1, 0, 0, 0, 0)) in res.rows
atomic_node_info = cgraph.get_atomic_node_info(to_label(cgraph, 1, 0, 0, 0, 0))
atomic_affinities = atomic_node_info[column_keys.Connectivity.Affinity]
atomic_partners = atomic_node_info[column_keys.Connectivity.Partner]
parents = atomic_node_info[column_keys.Hierarchy.Parent]
assert len(atomic_partners) == 2 and to_label(cgraph, 1, 0, 0, 0, 1) in atomic_partners and to_label(cgraph, 1, 1, 0, 0, 0) in atomic_partners
assert len(atomic_affinities) == 2
if atomic_partners[0] == to_label(cgraph, 1, 0, 0, 0, 1):
assert atomic_affinities[0] == 0.5 and atomic_affinities[1] == inf
else:
assert atomic_affinities[0] == inf and atomic_affinities[1] == 0.5
assert len(parents) == 1 and parents[0] == to_label(cgraph, 2, 0, 0, 0, 1)
# to_label(cgraph, 1, 0, 0, 0, 1)
assert serializers.serialize_uint64(to_label(cgraph, 1, 0, 0, 0, 1)) in res.rows
atomic_node_info = cgraph.get_atomic_node_info(to_label(cgraph, 1, 0, 0, 0, 1))
atomic_affinities = atomic_node_info[column_keys.Connectivity.Affinity]
atomic_partners = atomic_node_info[column_keys.Connectivity.Partner]
parents = atomic_node_info[column_keys.Hierarchy.Parent]
assert len(atomic_partners) == 1 and atomic_partners[0] == to_label(cgraph, 1, 0, 0, 0, 0)
assert len(atomic_affinities) == 1 and atomic_affinities[0] == 0.5
assert len(parents) == 1 and parents[0] == to_label(cgraph, 2, 0, 0, 0, 1)
# to_label(cgraph, 1, 1, 0, 0, 0)
assert serializers.serialize_uint64(to_label(cgraph, 1, 1, 0, 0, 0)) in res.rows
atomic_node_info = cgraph.get_atomic_node_info(to_label(cgraph, 1, 1, 0, 0, 0))
atomic_affinities = atomic_node_info[column_keys.Connectivity.Affinity]
atomic_partners = atomic_node_info[column_keys.Connectivity.Partner]
parents = atomic_node_info[column_keys.Hierarchy.Parent]
assert len(atomic_partners) == 1 and atomic_partners[0] == to_label(cgraph, 1, 0, 0, 0, 0)
assert len(atomic_affinities) == 1 and atomic_affinities[0] == inf
assert len(parents) == 1 and parents[0] == to_label(cgraph, 2, 1, 0, 0, 1)
# Check for the two Level 2 nodes that should have been created. Since Level 2 has the same
# dimensions as Level 1, we also expect them to be in different chunks
# to_label(cgraph, 2, 0, 0, 0, 1)
assert serializers.serialize_uint64(to_label(cgraph, 2, 0, 0, 0, 1)) in res.rows
row = res.rows[serializers.serialize_uint64(to_label(cgraph, 2, 0, 0, 0, 1))].cells[cgraph.family_id]
atomic_cross_edge_dict = cgraph.get_atomic_cross_edge_dict(to_label(cgraph, 2, 0, 0, 0, 1))
column = column_keys.Hierarchy.Child
children = column.deserialize(row[column.key][0].value)
test_ace = np.array([to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 0)], dtype=np.uint64)
assert len(atomic_cross_edge_dict[2]) == 1
assert test_ace in atomic_cross_edge_dict[2]
assert len(children) == 2 and to_label(cgraph, 1, 0, 0, 0, 0) in children and to_label(cgraph, 1, 0, 0, 0, 1) in children
# to_label(cgraph, 2, 1, 0, 0, 1)
assert serializers.serialize_uint64(to_label(cgraph, 2, 1, 0, 0, 1)) in res.rows
row = res.rows[serializers.serialize_uint64(to_label(cgraph, 2, 1, 0, 0, 1))].cells[cgraph.family_id]
atomic_cross_edge_dict = cgraph.get_atomic_cross_edge_dict(to_label(cgraph, 2, 1, 0, 0, 1))
column = column_keys.Hierarchy.Child
children = column.deserialize(row[column.key][0].value)
test_ace = np.array([to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 0)], dtype=np.uint64)
assert len(atomic_cross_edge_dict[2]) == 1
assert test_ace in atomic_cross_edge_dict[2]
assert len(children) == 1 and to_label(cgraph, 1, 1, 0, 0, 0) in children
# Check for the one Level 3 node that should have been created. This one combines the two
# connected components of Level 2
# to_label(cgraph, 3, 0, 0, 0, 1)
assert serializers.serialize_uint64(to_label(cgraph, 3, 0, 0, 0, 1)) in res.rows
row = res.rows[serializers.serialize_uint64(to_label(cgraph, 3, 0, 0, 0, 1))].cells[cgraph.family_id]
atomic_cross_edge_dict = cgraph.get_atomic_cross_edge_dict(to_label(cgraph, 3, 0, 0, 0, 1))
column = column_keys.Hierarchy.Child
children = column.deserialize(row[column.key][0].value)
for ace in atomic_cross_edge_dict.values():
assert len(ace) == 0
assert len(children) == 2 and to_label(cgraph, 2, 0, 0, 0, 1) in children and to_label(cgraph, 2, 1, 0, 0, 1) in children
# Make sure there are not any more entries in the table
assert len(res.rows) == 3 + 2 + 1 + 3 + 1
@pytest.mark.timeout(30)
def test_build_big_graph(self, gen_graph):
"""
Create graph with RG nodes 1 and 2 in opposite corners of the largest possible dataset
┌─────┐ ┌─────┐
│ A¹ │ ... │ Z¹ │
│ 1 │ │ 2 │
│ │ │ │
└─────┘ └─────┘
"""
cgraph = gen_graph(n_layers=10)
# Preparation: Build Chunk A
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0)],
edges=[])
# Preparation: Build Chunk Z
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 255, 255, 255, 0)],
edges=[])
cgraph.add_layer(3, np.array([[0x00, 0x00, 0x00]]))
cgraph.add_layer(3, np.array([[0xFF, 0xFF, 0xFF]]))
cgraph.add_layer(4, np.array([[0x00, 0x00, 0x00]]))
cgraph.add_layer(4, np.array([[0x7F, 0x7F, 0x7F]]))
cgraph.add_layer(5, np.array([[0x00, 0x00, 0x00]]))
cgraph.add_layer(5, np.array([[0x3F, 0x3F, 0x3F]]))
cgraph.add_layer(6, np.array([[0x00, 0x00, 0x00]]))
cgraph.add_layer(6, np.array([[0x1F, 0x1F, 0x1F]]))
cgraph.add_layer(7, np.array([[0x00, 0x00, 0x00]]))
cgraph.add_layer(7, np.array([[0x0F, 0x0F, 0x0F]]))
cgraph.add_layer(8, np.array([[0x00, 0x00, 0x00]]))
cgraph.add_layer(8, np.array([[0x07, 0x07, 0x07]]))
cgraph.add_layer(9, np.array([[0x00, 0x00, 0x00]]))
cgraph.add_layer(9, np.array([[0x03, 0x03, 0x03]]))
cgraph.add_layer(10, np.array([[0x00, 0x00, 0x00], [0x01, 0x01, 0x01]]))
res = cgraph.table.read_rows()
res.consume_all()
# cgraph.logger.debug(len(res.rows))
# for row_key in res.rows.keys():
# cgraph.logger.debug(row_key)
# cgraph.logger.debug(cgraph.get_chunk_layer(chunkedgraph.deserialize_uint64(row_key)))
# cgraph.logger.debug(cgraph.get_chunk_coordinates(chunkedgraph.deserialize_uint64(row_key)))
assert serializers.serialize_uint64(to_label(cgraph, 1, 0, 0, 0, 0)) in res.rows
assert serializers.serialize_uint64(to_label(cgraph, 1, 255, 255, 255, 0)) in res.rows
assert serializers.serialize_uint64(to_label(cgraph, 10, 0, 0, 0, 1)) in res.rows
assert serializers.serialize_uint64(to_label(cgraph, 10, 0, 0, 0, 2)) in res.rows
@pytest.mark.timeout(30)
def test_double_chunk_creation(self, gen_graph):
"""
No connection between 1, 2 and 3
┌─────┬─────┐
│ A¹ │ B¹ │
│ 1 │ 3 │
│ 2 │ │
└─────┴─────┘
"""
cgraph = gen_graph(n_layers=4)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 1),
to_label(cgraph, 1, 0, 0, 0, 2)],
edges=[],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 1)],
edges=[],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]),
time_stamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]),
time_stamp=fake_timestamp)
cgraph.add_layer(4, np.array([[0, 0, 0]]),
time_stamp=fake_timestamp)
assert len(cgraph.range_read_chunk(layer=3, x=0, y=0, z=0)) == 6
assert len(cgraph.range_read_chunk(layer=4, x=0, y=0, z=0)) == 3
assert cgraph.get_chunk_layer(cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 1))) == 4
assert cgraph.get_chunk_layer(cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 2))) == 4
assert cgraph.get_chunk_layer(cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 1))) == 4
lvl_3_child_ids = [cgraph.get_segment_id(cgraph.read_node_id_row(cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 1)), column_keys.Hierarchy.Child)[0].value),
cgraph.get_segment_id(cgraph.read_node_id_row(cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 2)), column_keys.Hierarchy.Child)[0].value),
cgraph.get_segment_id(cgraph.read_node_id_row(cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 1)), column_keys.Hierarchy.Child)[0].value)]
assert 4 in lvl_3_child_ids
assert 5 in lvl_3_child_ids
assert 6 in lvl_3_child_ids
class TestGraphSimpleQueries:
"""
┌─────┬─────┬─────┐ L X Y Z S L X Y Z S L X Y Z S L X Y Z S
│ A¹ │ B¹ │ C¹ │ 1: 1 0 0 0 0 ─── 2 0 0 0 1 ─── 3 0 0 0 1 ─── 4 0 0 0 1
│ 1 │ 3━2━┿━━4 │ 2: 1 1 0 0 0 ─┬─ 2 1 0 0 1 ─── 3 0 0 0 2 ─┬─ 4 0 0 0 2
│ │ │ │ 3: 1 1 0 0 1 ─┘ │
└─────┴─────┴─────┘ 4: 1 2 0 0 0 ─── 2 2 0 0 1 ─── 3 1 0 0 1 ─┘
"""
@pytest.mark.timeout(30)
def test_get_parent_and_children(self, gen_graph_simplequerytest):
cgraph = gen_graph_simplequerytest
children10000 = cgraph.get_children(to_label(cgraph, 1, 0, 0, 0, 0))
children11000 = cgraph.get_children(to_label(cgraph, 1, 1, 0, 0, 0))
children11001 = cgraph.get_children(to_label(cgraph, 1, 1, 0, 0, 1))
children12000 = cgraph.get_children(to_label(cgraph, 1, 2, 0, 0, 0))
parent10000 = cgraph.get_parent(to_label(cgraph, 1, 0, 0, 0, 0), get_only_relevant_parent=True, time_stamp=None)
parent11000 = cgraph.get_parent(to_label(cgraph, 1, 1, 0, 0, 0), get_only_relevant_parent=True, time_stamp=None)
parent11001 = cgraph.get_parent(to_label(cgraph, 1, 1, 0, 0, 1), get_only_relevant_parent=True, time_stamp=None)
parent12000 = cgraph.get_parent(to_label(cgraph, 1, 2, 0, 0, 0), get_only_relevant_parent=True, time_stamp=None)
children20001 = cgraph.get_children(to_label(cgraph, 2, 0, 0, 0, 1))
children21001 = cgraph.get_children(to_label(cgraph, 2, 1, 0, 0, 1))
children22001 = cgraph.get_children(to_label(cgraph, 2, 2, 0, 0, 1))
parent20001 = cgraph.get_parent(to_label(cgraph, 2, 0, 0, 0, 1), get_only_relevant_parent=True, time_stamp=None)
parent21001 = cgraph.get_parent(to_label(cgraph, 2, 1, 0, 0, 1), get_only_relevant_parent=True, time_stamp=None)
parent22001 = cgraph.get_parent(to_label(cgraph, 2, 2, 0, 0, 1), get_only_relevant_parent=True, time_stamp=None)
children30001 = cgraph.get_children(to_label(cgraph, 3, 0, 0, 0, 1))
children30002 = cgraph.get_children(to_label(cgraph, 3, 0, 0, 0, 2))
children31001 = cgraph.get_children(to_label(cgraph, 3, 1, 0, 0, 1))
parent30001 = cgraph.get_parent(to_label(cgraph, 3, 0, 0, 0, 1), get_only_relevant_parent=True, time_stamp=None)
parent30002 = cgraph.get_parent(to_label(cgraph, 3, 0, 0, 0, 2), get_only_relevant_parent=True, time_stamp=None)
parent31001 = cgraph.get_parent(to_label(cgraph, 3, 1, 0, 0, 1), get_only_relevant_parent=True, time_stamp=None)
children40001 = cgraph.get_children(to_label(cgraph, 4, 0, 0, 0, 1))
children40002 = cgraph.get_children(to_label(cgraph, 4, 0, 0, 0, 2))
parent40001 = cgraph.get_parent(to_label(cgraph, 4, 0, 0, 0, 1), get_only_relevant_parent=True, time_stamp=None)
parent40002 = cgraph.get_parent(to_label(cgraph, 4, 0, 0, 0, 2), get_only_relevant_parent=True, time_stamp=None)
# (non-existing) Children of L1
assert np.array_equal(children10000, []) is True
assert np.array_equal(children11000, []) is True
assert np.array_equal(children11001, []) is True
assert np.array_equal(children12000, []) is True
# Parent of L1
assert parent10000 == to_label(cgraph, 2, 0, 0, 0, 1)
assert parent11000 == to_label(cgraph, 2, 1, 0, 0, 1)
assert parent11001 == to_label(cgraph, 2, 1, 0, 0, 1)
assert parent12000 == to_label(cgraph, 2, 2, 0, 0, 1)
# Children of L2
assert len(children20001) == 1 and to_label(cgraph, 1, 0, 0, 0, 0) in children20001
assert len(children21001) == 2 and to_label(cgraph, 1, 1, 0, 0, 0) in children21001 and to_label(cgraph, 1, 1, 0, 0, 1) in children21001
assert len(children22001) == 1 and to_label(cgraph, 1, 2, 0, 0, 0) in children22001
# Parent of L2
assert parent20001 == to_label(cgraph, 3, 0, 0, 0, 1) and parent21001 == to_label(cgraph, 3, 0, 0, 0, 2) or \
parent20001 == to_label(cgraph, 3, 0, 0, 0, 2) and parent21001 == to_label(cgraph, 3, 0, 0, 0, 1)
assert parent22001 == to_label(cgraph, 3, 1, 0, 0, 1)
# Children of L3
assert len(children30001) == 1 and len(children30002) == 1 and len(children31001) == 1
assert to_label(cgraph, 2, 0, 0, 0, 1) in children30001 and to_label(cgraph, 2, 1, 0, 0, 1) in children30002 or \
to_label(cgraph, 2, 0, 0, 0, 1) in children30002 and to_label(cgraph, 2, 1, 0, 0, 1) in children30001
assert to_label(cgraph, 2, 2, 0, 0, 1) in children31001
# Parent of L3
assert parent30001 == parent31001 or parent30002 == parent31001
assert (parent30001 == to_label(cgraph, 4, 0, 0, 0, 1) and parent30002 == to_label(cgraph, 4, 0, 0, 0, 2)) or \
(parent30001 == to_label(cgraph, 4, 0, 0, 0, 2) and parent30002 == to_label(cgraph, 4, 0, 0, 0, 1))
# Children of L4
if len(children40001) == 1:
assert parent20001 in children40001
assert len(children40002) == 2 and parent21001 in children40002 and parent22001 in children40002
elif len(children40001) == 2:
assert parent21001 in children40001 and parent22001 in children40001
assert len(children40002) == 1 and parent20001 in children40002
# (non-existing) Parent of L4
assert parent40001 is None
assert parent40002 is None
# # Children of (non-existing) L5
# with pytest.raises(IndexError):
# cgraph.get_children(to_label(cgraph, 5, 0, 0, 0, 1))
# # Parent of (non-existing) L5
# with pytest.raises(IndexError):
# cgraph.get_parent(to_label(cgraph, 5, 0, 0, 0, 1), get_only_relevant_parent=True, time_stamp=None)
children2_separate = cgraph.get_children([to_label(cgraph, 2, 0, 0, 0, 1),
to_label(cgraph, 2, 1, 0, 0, 1),
to_label(cgraph, 2, 2, 0, 0, 1)])
assert len(children2_separate) == 3
assert to_label(cgraph, 2, 0, 0, 0, 1) in children2_separate and \
np.all(np.isin(children2_separate[to_label(cgraph, 2, 0, 0, 0, 1)], children20001))
assert to_label(cgraph, 2, 1, 0, 0, 1) in children2_separate and \
np.all(np.isin(children2_separate[to_label(cgraph, 2, 1, 0, 0, 1)], children21001))
assert to_label(cgraph, 2, 2, 0, 0, 1) in children2_separate and \
np.all(np.isin(children2_separate[to_label(cgraph, 2, 2, 0, 0, 1)], children22001))
children2_combined = cgraph.get_children([to_label(cgraph, 2, 0, 0, 0, 1),
to_label(cgraph, 2, 1, 0, 0, 1),
to_label(cgraph, 2, 2, 0, 0, 1)], flatten=True)
assert len(children2_combined) == 4 and \
np.all(np.isin(children20001, children2_combined)) and \
np.all(np.isin(children21001, children2_combined)) and \
np.all(np.isin(children22001, children2_combined))
@pytest.mark.timeout(30)
def test_get_root(self, gen_graph_simplequerytest):
cgraph = gen_graph_simplequerytest
root10000 = cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0),
time_stamp=None)
root11000 = cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 0),
time_stamp=None)
root11001 = cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 1),
time_stamp=None)
root12000 = cgraph.get_root(to_label(cgraph, 1, 2, 0, 0, 0),
time_stamp=None)
with pytest.raises(Exception) as e:
cgraph.get_root(0)
assert (root10000 == to_label(cgraph, 4, 0, 0, 0, 1) and
root11000 == root11001 == root12000 == to_label(
cgraph, 4, 0, 0, 0, 2)) or \
(root10000 == to_label(cgraph, 4, 0, 0, 0, 2) and
root11000 == root11001 == root12000 == to_label(
cgraph, 4, 0, 0, 0, 1))
@pytest.mark.timeout(30)
def test_get_subgraph_nodes(self, gen_graph_simplequerytest):
cgraph = gen_graph_simplequerytest
root1 = cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0))
root2 = cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 0))
lvl1_nodes_1 = cgraph.get_subgraph_nodes(root1)
lvl1_nodes_2 = cgraph.get_subgraph_nodes(root2)
assert len(lvl1_nodes_1) == 1
assert len(lvl1_nodes_2) == 3
assert to_label(cgraph, 1, 0, 0, 0, 0) in lvl1_nodes_1
assert to_label(cgraph, 1, 1, 0, 0, 0) in lvl1_nodes_2
assert to_label(cgraph, 1, 1, 0, 0, 1) in lvl1_nodes_2
assert to_label(cgraph, 1, 2, 0, 0, 0) in lvl1_nodes_2
lvl2_nodes_1 = cgraph.get_subgraph_nodes(root1, return_layers=[2])
lvl2_nodes_2 = cgraph.get_subgraph_nodes(root2, return_layers=[2])
assert len(lvl2_nodes_1) == 1
assert len(lvl2_nodes_2) == 2
assert to_label(cgraph, 2, 0, 0, 0, 1) in lvl2_nodes_1
assert to_label(cgraph, 2, 1, 0, 0, 1) in lvl2_nodes_2
assert to_label(cgraph, 2, 2, 0, 0, 1) in lvl2_nodes_2
lvl3_nodes_1 = cgraph.get_subgraph_nodes(root1, return_layers=[3])
lvl3_nodes_2 = cgraph.get_subgraph_nodes(root2, return_layers=[3])
assert len(lvl3_nodes_1) == 1
assert len(lvl3_nodes_2) == 2
assert to_label(cgraph, 3, 0, 0, 0, 1) in lvl3_nodes_1
assert to_label(cgraph, 3, 0, 0, 0, 2) in lvl3_nodes_2
assert to_label(cgraph, 3, 1, 0, 0, 1) in lvl3_nodes_2
lvl4_node = cgraph.get_subgraph_nodes(root1, return_layers=[4])
assert len(lvl4_node) == 1
assert root1 in lvl4_node
layers = cgraph.get_subgraph_nodes(root2, return_layers=[1, 4])
assert len(layers) == 2 and 1 in layers and 4 in layers
assert len(layers[4]) == 1 and root2 in layers[4]
assert len(layers[1]) == 3
assert to_label(cgraph, 1, 1, 0, 0, 0) in layers[1]
assert to_label(cgraph, 1, 1, 0, 0, 1) in layers[1]
assert to_label(cgraph, 1, 2, 0, 0, 0) in layers[1]
lvl2_nodes = cgraph.get_subgraph_nodes(root2, return_layers=[2],
bounding_box=[[1, 0, 0], [2, 1, 1]],
bb_is_coordinate=False)
assert len(lvl2_nodes) == 1
assert to_label(cgraph, 2, 1, 0, 0, 1) in lvl2_nodes
lvl2_parent = cgraph.get_parent(to_label(cgraph, 1, 1, 0, 0, 0))
lvl1_nodes = cgraph.get_subgraph_nodes(lvl2_parent)
assert len(lvl1_nodes) == 2
assert to_label(cgraph, 1, 1, 0, 0, 0) in lvl1_nodes
assert to_label(cgraph, 1, 1, 0, 0, 1) in lvl1_nodes
@pytest.mark.timeout(30)
def test_get_subgraph_edges(self, gen_graph_simplequerytest):
cgraph = gen_graph_simplequerytest
root1 = cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0))
root2 = cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 0))
edges, affinities, areas = cgraph.get_subgraph_edges(root1)
assert len(edges) == 0 and len(affinities) == 0 and len(areas) == 0
edges, affinities, areas = cgraph.get_subgraph_edges(root2)
assert [to_label(cgraph, 1, 1, 0, 0, 0),
to_label(cgraph, 1, 1, 0, 0, 1)] in edges or \
[to_label(cgraph, 1, 1, 0, 0, 1),
to_label(cgraph, 1, 1, 0, 0, 0)] in edges
assert [to_label(cgraph, 1, 1, 0, 0, 0),
to_label(cgraph, 1, 2, 0, 0, 0)] in edges or \
[to_label(cgraph, 1, 2, 0, 0, 0),
to_label(cgraph, 1, 1, 0, 0, 0)] in edges
# assert len(edges) == 2 and len(affinities) == 2 and len(areas) == 2
lvl2_parent = cgraph.get_parent(to_label(cgraph, 1, 1, 0, 0, 0))
edges, affinities, areas = cgraph.get_subgraph_edges(lvl2_parent)
assert [to_label(cgraph, 1, 1, 0, 0, 0),
to_label(cgraph, 1, 1, 0, 0, 1)] in edges or \
[to_label(cgraph, 1, 1, 0, 0, 1),
to_label(cgraph, 1, 1, 0, 0, 0)] in edges
assert [to_label(cgraph, 1, 1, 0, 0, 0),
to_label(cgraph, 1, 2, 0, 0, 0)] in edges or \
[to_label(cgraph, 1, 2, 0, 0, 0),
to_label(cgraph, 1, 1, 0, 0, 0)] in edges
assert len(edges) == 2
@pytest.mark.timeout(30)
def test_get_subgraph_nodes_bb(self, gen_graph_simplequerytest):
cgraph = gen_graph_simplequerytest
bb = np.array([[1, 0, 0], [2, 1, 1]], dtype=np.int)
bb_coord = bb * cgraph.chunk_size
childs_1 = cgraph.get_subgraph_nodes(cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 1)), bounding_box=bb)
childs_2 = cgraph.get_subgraph_nodes(cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 1)), bounding_box=bb_coord, bb_is_coordinate=True)
assert np.all(~(np.sort(childs_1) - np.sort(childs_2)))
@pytest.mark.timeout(30)
def test_get_atomic_partners(self, gen_graph_simplequerytest):
cgraph = gen_graph_simplequerytest
class TestGraphMerge:
@pytest.mark.timeout(30)
def test_merge_pair_same_chunk(self, gen_graph):
"""
Add edge between existing RG supervoxels 1 and 2 (same chunk)
Expected: Same (new) parent for RG 1 and 2 on Layer two
┌─────┐ ┌─────┐
│ A¹ │ │ A¹ │
│ 1 2 │ => │ 1━2 │
│ │ │ │
└─────┘ └─────┘
"""
cgraph = gen_graph(n_layers=2)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1)],
edges=[],
timestamp=fake_timestamp)
# Merge
new_root_ids = cgraph.add_edges("Jane Doe", [to_label(cgraph, 1, 0, 0, 0, 1), to_label(cgraph, 1, 0, 0, 0, 0)], affinities=0.3)
assert len(new_root_ids) == 1
new_root_id = new_root_ids[0]
# Check
assert cgraph.get_parent(to_label(cgraph, 1, 0, 0, 0, 0)) == new_root_id
assert cgraph.get_parent(to_label(cgraph, 1, 0, 0, 0, 1)) == new_root_id
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 0))
assert partners[0] == to_label(cgraph, 1, 0, 0, 0, 1) and affinities[0] == np.float32(0.3)
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 1))
assert partners[0] == to_label(cgraph, 1, 0, 0, 0, 0) and affinities[0] == np.float32(0.3)
leaves = np.unique(cgraph.get_subgraph_nodes(new_root_id))
assert len(leaves) == 2
assert to_label(cgraph, 1, 0, 0, 0, 0) in leaves
assert to_label(cgraph, 1, 0, 0, 0, 1) in leaves
@pytest.mark.timeout(30)
def test_merge_pair_neighboring_chunks(self, gen_graph):
"""
Add edge between existing RG supervoxels 1 and 2 (neighboring chunks)
┌─────┬─────┐ ┌─────┬─────┐
│ A¹ │ B¹ │ │ A¹ │ B¹ │
│ 1 │ 2 │ => │ 1━━┿━━2 │
│ │ │ │ │ │
└─────┴─────┘ └─────┴─────┘
"""
cgraph = gen_graph(n_layers=3)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0)],
edges=[],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 0)],
edges=[],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]), time_stamp=fake_timestamp)
# Merge
new_root_ids = cgraph.add_edges("Jane Doe", [to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 0)], affinities=0.3)
assert len(new_root_ids) == 1
new_root_id = new_root_ids[0]
# Check
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0)) == new_root_id
assert cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 0)) == new_root_id
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 0))
assert partners[0] == to_label(cgraph, 1, 1, 0, 0, 0) and affinities[0] == np.float32(0.3)
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 1, 0, 0, 0))
assert partners[0] == to_label(cgraph, 1, 0, 0, 0, 0) and affinities[0] == np.float32(0.3)
leaves = np.unique(cgraph.get_subgraph_nodes(new_root_id))
assert len(leaves) == 2
assert to_label(cgraph, 1, 0, 0, 0, 0) in leaves
assert to_label(cgraph, 1, 1, 0, 0, 0) in leaves
@pytest.mark.timeout(30)
def test_merge_pair_disconnected_chunks(self, gen_graph):
"""
Add edge between existing RG supervoxels 1 and 2 (disconnected chunks)
┌─────┐ ┌─────┐ ┌─────┐ ┌─────┐
│ A¹ │ ... │ Z¹ │ │ A¹ │ ... │ Z¹ │
│ 1 │ │ 2 │ => │ 1━━┿━━━━━┿━━2 │
│ │ │ │ │ │ │ │
└─────┘ └─────┘ └─────┘ └─────┘
"""
cgraph = gen_graph(n_layers=9)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0)],
edges=[],
timestamp=fake_timestamp)
# Preparation: Build Chunk Z
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 127, 127, 127, 0)],
edges=[],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0x00, 0x00, 0x00]]), time_stamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0x7F, 0x7F, 0x7F]]), time_stamp=fake_timestamp)
cgraph.add_layer(4, np.array([[0x00, 0x00, 0x00]]), time_stamp=fake_timestamp)
cgraph.add_layer(4, np.array([[0x3F, 0x3F, 0x3F]]), time_stamp=fake_timestamp)
cgraph.add_layer(5, np.array([[0x00, 0x00, 0x00]]), time_stamp=fake_timestamp)
cgraph.add_layer(5, np.array([[0x1F, 0x1F, 0x1F]]), time_stamp=fake_timestamp)
cgraph.add_layer(6, np.array([[0x00, 0x00, 0x00]]), time_stamp=fake_timestamp)
cgraph.add_layer(6, np.array([[0x0F, 0x0F, 0x0F]]), time_stamp=fake_timestamp)
cgraph.add_layer(7, np.array([[0x00, 0x00, 0x00]]), time_stamp=fake_timestamp)
cgraph.add_layer(7, np.array([[0x07, 0x07, 0x07]]), time_stamp=fake_timestamp)
cgraph.add_layer(8, np.array([[0x00, 0x00, 0x00]]), time_stamp=fake_timestamp)
cgraph.add_layer(8, np.array([[0x03, 0x03, 0x03]]), time_stamp=fake_timestamp)
cgraph.add_layer(9, np.array([[0x00, 0x00, 0x00], [0x01, 0x01, 0x01]]), time_stamp=fake_timestamp)
# Merge
new_root_ids = cgraph.add_edges("Jane Doe", [to_label(cgraph, 1, 127, 127, 127, 0), to_label(cgraph, 1, 0, 0, 0, 0)], affinities=0.3)
assert len(new_root_ids) == 1
new_root_id = new_root_ids[0]
# Check
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0)) == new_root_id
assert cgraph.get_root(to_label(cgraph, 1, 127, 127, 127, 0)) == new_root_id
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 0))
assert partners[0] == to_label(cgraph, 1, 127, 127, 127, 0) and affinities[0] == np.float32(0.3)
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 127, 127, 127, 0))
assert partners[0] == to_label(cgraph, 1, 0, 0, 0, 0) and affinities[0] == np.float32(0.3)
leaves = np.unique(cgraph.get_subgraph_nodes(new_root_id))
assert len(leaves) == 2
assert to_label(cgraph, 1, 0, 0, 0, 0) in leaves
assert to_label(cgraph, 1, 127, 127, 127, 0) in leaves
@pytest.mark.timeout(30)
def test_merge_pair_already_connected(self, gen_graph):
"""
Add edge between already connected RG supervoxels 1 and 2 (same chunk).
Expected: No change, i.e. same parent (to_label(cgraph, 2, 0, 0, 0, 1)), affinity (0.5) and timestamp as before
┌─────┐ ┌─────┐
│ A¹ │ │ A¹ │
│ 1━2 │ => │ 1━2 │
│ │ │ │
└─────┘ └─────┘
"""
cgraph = gen_graph(n_layers=2)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1)],
edges=[(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1), 0.5)],
timestamp=fake_timestamp)
res_old = cgraph.table.read_rows()
res_old.consume_all()
# Merge
cgraph.add_edges("Jane Doe", [to_label(cgraph, 1, 0, 0, 0, 1), to_label(cgraph, 1, 0, 0, 0, 0)])
res_new = cgraph.table.read_rows()
res_new.consume_all()
# Check
if res_old.rows != res_new.rows:
warn("Rows were modified when merging a pair of already connected supervoxels. "
"While probably not an error, it is an unnecessary operation.")
@pytest.mark.timeout(30)
def test_merge_triple_chain_to_full_circle_same_chunk(self, gen_graph):
"""
Add edge between indirectly connected RG supervoxels 1 and 2 (same chunk)
┌─────┐ ┌─────┐
│ A¹ │ │ A¹ │
│ 1 2 │ => │ 1━2 │
│ ┗3┛ │ │ ┗3┛ │
└─────┘ └─────┘
"""
cgraph = gen_graph(n_layers=2)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1), to_label(cgraph, 1, 0, 0, 0, 2)],
edges=[(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 2), 0.5),
(to_label(cgraph, 1, 0, 0, 0, 1), to_label(cgraph, 1, 0, 0, 0, 2), 0.5)],
timestamp=fake_timestamp)
# Merge
new_root_ids = cgraph.add_edges("Jane Doe", [to_label(cgraph, 1, 0, 0, 0, 1), to_label(cgraph, 1, 0, 0, 0, 0)], affinities=0.3)
assert len(new_root_ids) == 1
new_root_id = new_root_ids[0]
# Check
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0)) == new_root_id
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 1)) == new_root_id
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 2)) == new_root_id
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 0))
assert len(partners) == 2
assert to_label(cgraph, 1, 0, 0, 0, 1) in partners
assert to_label(cgraph, 1, 0, 0, 0, 2) in partners
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 1))
assert len(partners) == 2
assert to_label(cgraph, 1, 0, 0, 0, 0) in partners
assert to_label(cgraph, 1, 0, 0, 0, 2) in partners
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 2))
assert len(partners) == 2
assert to_label(cgraph, 1, 0, 0, 0, 0) in partners
assert to_label(cgraph, 1, 0, 0, 0, 1) in partners
leaves = np.unique(cgraph.get_subgraph_nodes(new_root_id))
assert len(leaves) == 3
assert to_label(cgraph, 1, 0, 0, 0, 0) in leaves
assert to_label(cgraph, 1, 0, 0, 0, 1) in leaves
assert to_label(cgraph, 1, 0, 0, 0, 2) in leaves
@pytest.mark.timeout(30)
def test_merge_triple_chain_to_full_circle_neighboring_chunks(self, gen_graph):
"""
Add edge between indirectly connected RG supervoxels 1 and 2 (neighboring chunks)
┌─────┬─────┐ ┌─────┬─────┐
│ A¹ │ B¹ │ │ A¹ │ B¹ │
│ 1 │ 2 │ => │ 1━━┿━━2 │
│ ┗3━┿━━┛ │ │ ┗3━┿━━┛ │
└─────┴─────┘ └─────┴─────┘
"""
cgraph = gen_graph(n_layers=3)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1)],
edges=[(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1), 0.5),
(to_label(cgraph, 1, 0, 0, 0, 1), to_label(cgraph, 1, 1, 0, 0, 0), inf)],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1), inf)],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]), time_stamp=fake_timestamp)
# Merge
new_root_ids = cgraph.add_edges("Jane Doe", [to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 0)], affinities=1.0)
assert len(new_root_ids) == 1
new_root_id = new_root_ids[0]
# Check
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0)) == new_root_id
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 1)) == new_root_id
assert cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 0)) == new_root_id
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 0))
assert len(partners) == 2
assert to_label(cgraph, 1, 0, 0, 0, 1) in partners
assert to_label(cgraph, 1, 1, 0, 0, 0) in partners
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 1))
assert len(partners) == 2
assert to_label(cgraph, 1, 0, 0, 0, 0) in partners
assert to_label(cgraph, 1, 1, 0, 0, 0) in partners
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 1, 0, 0, 0))
assert len(partners) == 2
assert to_label(cgraph, 1, 0, 0, 0, 0) in partners
assert to_label(cgraph, 1, 0, 0, 0, 1) in partners
leaves = np.unique(cgraph.get_subgraph_nodes(new_root_id))
assert len(leaves) == 3
assert to_label(cgraph, 1, 0, 0, 0, 0) in leaves
assert to_label(cgraph, 1, 0, 0, 0, 1) in leaves
assert to_label(cgraph, 1, 1, 0, 0, 0) in leaves
cross_edge_dict_layers = graph_tests.root_cross_edge_test(new_root_id, cg=cgraph) # dict: layer -> cross_edge_dict
n_cross_edges_layer = collections.defaultdict(list)
for child_layer in cross_edge_dict_layers.keys():
for layer in cross_edge_dict_layers[child_layer].keys():
n_cross_edges_layer[layer].append(len(cross_edge_dict_layers[child_layer][layer]))
for layer in n_cross_edges_layer.keys():
assert len(np.unique(n_cross_edges_layer[layer])) == 1
@pytest.mark.timeout(30)
def test_merge_triple_chain_to_full_circle_disconnected_chunks(self, gen_graph):
"""
Add edge between indirectly connected RG supervoxels 1 and 2 (disconnected chunks)
┌─────┐ ┌─────┐ ┌─────┐ ┌─────┐
│ A¹ │ ... │ Z¹ │ │ A¹ │ ... │ Z¹ │
│ 1 │ │ 2 │ => │ 1━━┿━━━━━┿━━2 │
│ ┗3━┿━━━━━┿━━┛ │ │ ┗3━┿━━━━━┿━━┛ │
└─────┘ └─────┘ └─────┘ └─────┘
"""
cgraph = gen_graph(n_layers=9)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1)],
edges=[(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1), 0.5),
(to_label(cgraph, 1, 0, 0, 0, 1), to_label(cgraph, 1, 127, 127, 127, 0), inf)],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 127, 127, 127, 0)],
edges=[(to_label(cgraph, 1, 127, 127, 127, 0), to_label(cgraph, 1, 0, 0, 0, 1), inf)],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0x00, 0x00, 0x00]]), time_stamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0x7F, 0x7F, 0x7F]]), time_stamp=fake_timestamp)
cgraph.add_layer(4, np.array([[0x00, 0x00, 0x00]]), time_stamp=fake_timestamp)
cgraph.add_layer(4, np.array([[0x3F, 0x3F, 0x3F]]), time_stamp=fake_timestamp)
cgraph.add_layer(5, np.array([[0x00, 0x00, 0x00]]), time_stamp=fake_timestamp)
cgraph.add_layer(5, np.array([[0x1F, 0x1F, 0x1F]]), time_stamp=fake_timestamp)
cgraph.add_layer(6, np.array([[0x00, 0x00, 0x00]]), time_stamp=fake_timestamp)
cgraph.add_layer(6, np.array([[0x0F, 0x0F, 0x0F]]), time_stamp=fake_timestamp)
cgraph.add_layer(7, np.array([[0x00, 0x00, 0x00]]), time_stamp=fake_timestamp)
cgraph.add_layer(7, np.array([[0x07, 0x07, 0x07]]), time_stamp=fake_timestamp)
cgraph.add_layer(8, np.array([[0x00, 0x00, 0x00]]), time_stamp=fake_timestamp)
cgraph.add_layer(8, np.array([[0x03, 0x03, 0x03]]), time_stamp=fake_timestamp)
cgraph.add_layer(9, np.array([[0x00, 0x00, 0x00], [0x01, 0x01, 0x01]]), time_stamp=fake_timestamp)
# Merge
new_root_ids = cgraph.add_edges("Jane Doe", [to_label(cgraph, 1, 127, 127, 127, 0), to_label(cgraph, 1, 0, 0, 0, 0)], affinities=1.0)
assert len(new_root_ids) == 1
new_root_id = new_root_ids[0]
# Check
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0)) == new_root_id
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 1)) == new_root_id
assert cgraph.get_root(to_label(cgraph, 1, 127, 127, 127, 0)) == new_root_id
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 0))
assert len(partners) == 2
assert to_label(cgraph, 1, 0, 0, 0, 1) in partners
assert to_label(cgraph, 1, 127, 127, 127, 0) in partners
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 1))
assert len(partners) == 2
assert to_label(cgraph, 1, 0, 0, 0, 0) in partners
assert to_label(cgraph, 1, 127, 127, 127, 0) in partners
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 127, 127, 127, 0))
assert len(partners) == 2
assert to_label(cgraph, 1, 0, 0, 0, 0) in partners
assert to_label(cgraph, 1, 0, 0, 0, 1) in partners
leaves = np.unique(cgraph.get_subgraph_nodes(new_root_id))
assert len(leaves) == 3
assert to_label(cgraph, 1, 0, 0, 0, 0) in leaves
assert to_label(cgraph, 1, 0, 0, 0, 1) in leaves
assert to_label(cgraph, 1, 127, 127, 127, 0) in leaves
cross_edge_dict_layers = graph_tests.root_cross_edge_test(new_root_id, cg=cgraph) # dict: layer -> cross_edge_dict
n_cross_edges_layer = collections.defaultdict(list)
for child_layer in cross_edge_dict_layers.keys():
for layer in cross_edge_dict_layers[child_layer].keys():
n_cross_edges_layer[layer].append(len(cross_edge_dict_layers[child_layer][layer]))
for layer in n_cross_edges_layer.keys():
assert len(np.unique(n_cross_edges_layer[layer])) == 1
@pytest.mark.timeout(30)
def test_merge_same_node(self, gen_graph):
"""
Try to add loop edge between RG supervoxel 1 and itself
┌─────┐
│ A¹ │
│ 1 │ => Reject
│ │
└─────┘
"""
cgraph = gen_graph(n_layers=2)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0)],
edges=[],
timestamp=fake_timestamp)
res_old = cgraph.table.read_rows()
res_old.consume_all()
# Merge
assert cgraph.add_edges("Jane Doe", [to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 0)]) is None
res_new = cgraph.table.read_rows()
res_new.consume_all()
assert res_new.rows == res_old.rows
@pytest.mark.timeout(30)
def test_merge_pair_abstract_nodes(self, gen_graph):
"""
Try to add edge between RG supervoxel 1 and abstract node "2"
┌─────┐
│ B² │
│ "2" │
│ │
└─────┘
┌─────┐ => Reject
│ A¹ │
│ 1 │
│ │
└─────┘
"""
cgraph = gen_graph(n_layers=3)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0)],
edges=[],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 0)],
edges=[],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]), time_stamp=fake_timestamp)
res_old = cgraph.table.read_rows()
res_old.consume_all()
# Merge
assert cgraph.add_edges("Jane Doe", [to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 2, 1, 0, 0, 1)]) is None
res_new = cgraph.table.read_rows()
res_new.consume_all()
assert res_new.rows == res_old.rows
@pytest.mark.timeout(30)
def test_diagonal_connections(self, gen_graph):
"""
Create graph with edge between RG supervoxels 1 and 2 (same chunk)
and edge between RG supervoxels 1 and 3 (neighboring chunks)
┌─────┬─────┐
│ A¹ │ B¹ │
│ 2 1━┿━━3 │
│ / │ │
┌─────┬─────┐
│ | │ │
│ 4━━┿━━5 │
│ C¹ │ D¹ │
└─────┴─────┘
"""
cgraph = gen_graph(n_layers=3)
# Chunk A
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0),
to_label(cgraph, 1, 0, 0, 0, 1)],
edges=[(to_label(cgraph, 1, 0, 0, 0, 0),
to_label(cgraph, 1, 1, 0, 0, 0), inf),
(to_label(cgraph, 1, 0, 0, 0, 0),
to_label(cgraph, 1, 0, 1, 0, 0), inf)])
# Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 1, 0, 0, 0),
to_label(cgraph, 1, 0, 0, 0, 0), inf)])
# Chunk C
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 1, 0, 0)],
edges=[(to_label(cgraph, 1, 0, 1, 0, 0),
to_label(cgraph, 1, 1, 1, 0, 0), inf),
(to_label(cgraph, 1, 0, 1, 0, 0),
to_label(cgraph, 1, 0, 0, 0, 0), inf)])
# Chunk D
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 1, 0, 0)],
edges=[(to_label(cgraph, 1, 1, 1, 0, 0),
to_label(cgraph, 1, 0, 1, 0, 0), inf)])
cgraph.add_layer(3,
np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]]))
rr = cgraph.range_read_chunk(
chunk_id=cgraph.get_chunk_id(layer=3, x=0, y=0, z=0))
root_ids_t0 = list(rr.keys())
assert len(root_ids_t0) == 2
child_ids = []
for root_id in root_ids_t0:
cgraph.logger.debug(("root_id", root_id))
child_ids.extend(cgraph.get_subgraph_nodes(root_id))
new_roots = cgraph.add_edges("Jane Doe",
[to_label(cgraph, 1, 0, 0, 0, 0),
to_label(cgraph, 1, 0, 0, 0, 1)],
affinities=[.5])
root_ids = []
for child_id in child_ids:
root_ids.append(cgraph.get_root(child_id))
assert len(np.unique(root_ids)) == 1
root_id = root_ids[0]
assert root_id == new_roots[0]
cross_edge_dict_layers = graph_tests.root_cross_edge_test(root_id,
cg=cgraph) # dict: layer -> cross_edge_dict
n_cross_edges_layer = collections.defaultdict(list)
for child_layer in cross_edge_dict_layers.keys():
for layer in cross_edge_dict_layers[child_layer].keys():
n_cross_edges_layer[layer].append(
len(cross_edge_dict_layers[child_layer][layer]))
for layer in n_cross_edges_layer.keys():
assert len(np.unique(n_cross_edges_layer[layer])) == 1
@pytest.mark.timeout(30)
def test_cross_edges(self, gen_graph):
"""
Remove edge between existing RG supervoxels 1 and 2 (neighboring chunks)
┌─...─┬────────┬─────┐ ┌─...─┬────────┬─────┐
| │ A¹ │ B¹ │ | │ A¹ │ B¹ │
| │ 4 1━━┿━━5 │ => | │ 4━━1━━┿━━5 │
| │ / │ | │ | │ / │ │
| │ 3 2━━┿━━6 │ | │ 3 2━━┿━━6 │
└─...─┴────────┴─────┘ └─...─┴────────┴─────┘
"""
cgraph = gen_graph(n_layers=6)
chunk_offset = 6
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, chunk_offset, 0, 0, 0), to_label(cgraph, 1, chunk_offset, 0, 0, 1),
to_label(cgraph, 1, chunk_offset, 0, 0, 2), to_label(cgraph, 1, chunk_offset, 0, 0, 3)],
edges=[(to_label(cgraph, 1, chunk_offset, 0, 0, 0), to_label(cgraph, 1, chunk_offset+1, 0, 0, 0), inf),
(to_label(cgraph, 1, chunk_offset, 0, 0, 1), to_label(cgraph, 1, chunk_offset+1, 0, 0, 1), inf),
(to_label(cgraph, 1, chunk_offset, 0, 0, 0), to_label(cgraph, 1, chunk_offset, 0, 0, 2), .5)],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, chunk_offset+1, 0, 0, 0), to_label(cgraph, 1, chunk_offset+1, 0, 0, 1)],
edges=[(to_label(cgraph, 1, chunk_offset+1, 0, 0, 0), to_label(cgraph, 1, chunk_offset, 0, 0, 0), inf),
(to_label(cgraph, 1, chunk_offset+1, 0, 0, 1), to_label(cgraph, 1, chunk_offset, 0, 0, 1), inf),
(to_label(cgraph, 1, chunk_offset+1, 0, 0, 0), to_label(cgraph, 1, chunk_offset+2, 0, 0, 0), inf),
(to_label(cgraph, 1, chunk_offset+1, 0, 0, 1), to_label(cgraph, 1, chunk_offset+2, 0, 0, 1), inf)],
timestamp=fake_timestamp)
# Preparation: Build Chunk C
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, chunk_offset+2, 0, 0, 0), to_label(cgraph, 1, chunk_offset+2, 0, 0, 1)],
edges=[(to_label(cgraph, 1, chunk_offset+2, 0, 0, 0), to_label(cgraph, 1, chunk_offset+1, 0, 0, 0), inf),
(to_label(cgraph, 1, chunk_offset+2, 0, 0, 1), to_label(cgraph, 1, chunk_offset+1, 0, 0, 1), inf),
(to_label(cgraph, 1, chunk_offset+2, 0, 0, 0), to_label(cgraph, 1, chunk_offset+3, 0, 0, 0), inf),
(to_label(cgraph, 1, chunk_offset+2, 0, 0, 0), to_label(cgraph, 1, chunk_offset+2, 0, 0, 1), .5)],
timestamp=fake_timestamp)
# Preparation: Build Chunk D
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, chunk_offset+3, 0, 0, 0)],
edges=[(to_label(cgraph, 1, chunk_offset+3, 0, 0, 0), to_label(cgraph, 1, chunk_offset+2, 0, 0, 0), inf),
(to_label(cgraph, 1, chunk_offset+3, 0, 0, 0), to_label(cgraph, 1, chunk_offset+4, 0, 0, 0), inf)],
timestamp=fake_timestamp)
# Preparation: Build Chunk E
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, chunk_offset+4, 0, 0, 0)],
edges=[(to_label(cgraph, 1, chunk_offset+4, 0, 0, 0), to_label(cgraph, 1, chunk_offset+3, 0, 0, 0), inf),
(to_label(cgraph, 1, chunk_offset+4, 0, 0, 0), to_label(cgraph, 1, chunk_offset+5, 0, 0, 0), inf)],
timestamp=fake_timestamp)
# Preparation: Build Chunk F
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, chunk_offset+5, 0, 0, 0)],
edges=[(to_label(cgraph, 1, chunk_offset+5, 0, 0, 0), to_label(cgraph, 1, chunk_offset+4, 0, 0, 0), inf)],
timestamp=fake_timestamp)
for i_layer in range(3, 7):
for i_chunk in range(0, 2 ** (7 - i_layer), 2):
cgraph.add_layer(i_layer, np.array([[i_chunk, 0, 0], [i_chunk+1, 0, 0]]), time_stamp=fake_timestamp)
new_roots = cgraph.add_edges("Jane Doe",
[to_label(cgraph, 1, chunk_offset, 0, 0, 0),
to_label(cgraph, 1, chunk_offset, 0, 0, 3)],
affinities=.9)
assert len(new_roots) == 1
root_id = new_roots[0]
cross_edge_dict_layers = graph_tests.root_cross_edge_test(root_id, cg=cgraph) # dict: layer -> cross_edge_dict
n_cross_edges_layer = collections.defaultdict(list)
for child_layer in cross_edge_dict_layers.keys():
for layer in cross_edge_dict_layers[child_layer].keys():
n_cross_edges_layer[layer].append(
len(cross_edge_dict_layers[child_layer][layer]))
for layer in n_cross_edges_layer.keys():
cgraph.logger.debug("LAYER %d" % layer)
assert len(np.unique(n_cross_edges_layer[layer])) == 1
class TestGraphSplit:
@pytest.mark.timeout(30)
def test_split_pair_same_chunk(self, gen_graph):
"""
Remove edge between existing RG supervoxels 1 and 2 (same chunk)
Expected: Different (new) parents for RG 1 and 2 on Layer two
┌─────┐ ┌─────┐
│ A¹ │ │ A¹ │
│ 1━2 │ => │ 1 2 │
│ │ │ │
└─────┘ └─────┘
"""
cgraph = gen_graph(n_layers=2)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1)],
edges=[(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1), 0.5)],
timestamp=fake_timestamp)
# Split
new_root_ids = cgraph.remove_edges("Jane Doe", to_label(cgraph, 1, 0, 0, 0, 1), to_label(cgraph, 1, 0, 0, 0, 0), mincut=False)
# Check New State
assert len(new_root_ids) == 2
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0)) != cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 1))
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 0))
assert len(partners) == 0
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 1))
assert len(partners) == 0
leaves = np.unique(cgraph.get_subgraph_nodes(cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0))))
assert len(leaves) == 1 and to_label(cgraph, 1, 0, 0, 0, 0) in leaves
leaves = np.unique(cgraph.get_subgraph_nodes(cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 1))))
assert len(leaves) == 1 and to_label(cgraph, 1, 0, 0, 0, 1) in leaves
# Check Old State still accessible
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0), time_stamp=fake_timestamp) == \
cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 1), time_stamp=fake_timestamp)
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 0), time_stamp=fake_timestamp)
assert len(partners) == 1 and partners[0] == to_label(cgraph, 1, 0, 0, 0, 1)
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 1), time_stamp=fake_timestamp)
assert len(partners) == 1 and partners[0] == to_label(cgraph, 1, 0, 0, 0, 0)
leaves = np.unique(cgraph.get_subgraph_nodes(cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0), time_stamp=fake_timestamp)))
assert len(leaves) == 2
assert to_label(cgraph, 1, 0, 0, 0, 0) in leaves
assert to_label(cgraph, 1, 0, 0, 0, 1) in leaves
# assert len(cgraph.get_latest_roots()) == 2
# assert len(cgraph.get_latest_roots(fake_timestamp)) == 1
def test_split_nonexisting_edge(self, gen_graph):
"""
Remove edge between existing RG supervoxels 1 and 2 (same chunk)
Expected: Different (new) parents for RG 1 and 2 on Layer two
┌─────┐ ┌─────┐
│ A¹ │ │ A¹ │
│ 1━2 │ => │ 1━2 │
│ | │ │ | │
│ 3 │ │ 3 │
└─────┘ └─────┘
"""
cgraph = gen_graph(n_layers=2)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1)],
edges=[(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1), 0.5),
(to_label(cgraph, 1, 0, 0, 0, 2), to_label(cgraph, 1, 0, 0, 0, 1), 0.5)],
timestamp=fake_timestamp)
# Split
new_root_ids = cgraph.remove_edges("Jane Doe", to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 2), mincut=False)
assert len(new_root_ids) == 1
assert len(cgraph.get_atomic_node_partners(to_label(cgraph, 1, 0, 0, 0, 0))) == 1
@pytest.mark.timeout(30)
def test_split_pair_neighboring_chunks(self, gen_graph):
"""
Remove edge between existing RG supervoxels 1 and 2 (neighboring chunks)
┌─────┬─────┐ ┌─────┬─────┐
│ A¹ │ B¹ │ │ A¹ │ B¹ │
│ 1━━┿━━2 │ => │ 1 │ 2 │
│ │ │ │ │ │
└─────┴─────┘ └─────┴─────┘
"""
cgraph = gen_graph(n_layers=3)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 0), 1.0)],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 0), 1.0)],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]), time_stamp=fake_timestamp)
# Split
new_root_ids = cgraph.remove_edges("Jane Doe", to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 0), mincut=False)
# Check New State
assert len(new_root_ids) == 2
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0)) != cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 0))
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 0))
assert len(partners) == 0
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 1, 0, 0, 0))
assert len(partners) == 0
leaves = np.unique(cgraph.get_subgraph_nodes(cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0))))
assert len(leaves) == 1 and to_label(cgraph, 1, 0, 0, 0, 0) in leaves
leaves = np.unique(cgraph.get_subgraph_nodes(cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 0))))
assert len(leaves) == 1 and to_label(cgraph, 1, 1, 0, 0, 0) in leaves
# Check Old State still accessible
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0), time_stamp=fake_timestamp) == \
cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 0), time_stamp=fake_timestamp)
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 0), time_stamp=fake_timestamp)
assert len(partners) == 1 and partners[0] == to_label(cgraph, 1, 1, 0, 0, 0)
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 1, 0, 0, 0), time_stamp=fake_timestamp)
assert len(partners) == 1 and partners[0] == to_label(cgraph, 1, 0, 0, 0, 0)
leaves = np.unique(cgraph.get_subgraph_nodes(cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0), time_stamp=fake_timestamp)))
assert len(leaves) == 2
assert to_label(cgraph, 1, 0, 0, 0, 0) in leaves
assert to_label(cgraph, 1, 1, 0, 0, 0) in leaves
assert len(cgraph.get_latest_roots()) == 2
assert len(cgraph.get_latest_roots(fake_timestamp)) == 1
@pytest.mark.timeout(30)
def test_split_verify_cross_chunk_edges(self, gen_graph):
"""
Remove edge between existing RG supervoxels 1 and 2 (neighboring chunks)
┌─────┬─────┬─────┐ ┌─────┬─────┬─────┐
| │ A¹ │ B¹ │ | │ A¹ │ B¹ │
| │ 1━━┿━━3 │ => | │ 1━━┿━━3 │
| │ | │ │ | │ │ │
| │ 2 │ │ | │ 2 │ │
└─────┴─────┴─────┘ └─────┴─────┴─────┘
"""
cgraph = gen_graph(n_layers=4)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 1)],
edges=[(to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 2, 0, 0, 0), inf),
(to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 1), .5)],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 2, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 2, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 0), inf)],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]), time_stamp=fake_timestamp)
cgraph.add_layer(3, np.array([[2, 0, 0], [3, 0, 0]]), time_stamp=fake_timestamp)
cgraph.add_layer(4, np.array([[0, 0, 0], [1, 0, 0]]), time_stamp=fake_timestamp)
assert cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 0)) == cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 1))
assert cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 0)) == cgraph.get_root(to_label(cgraph, 1, 2, 0, 0, 0))
# Split
new_root_ids = cgraph.remove_edges("Jane Doe", to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 1), mincut=False)
svs2 = cgraph.get_subgraph_nodes(new_root_ids[0])
svs1 = cgraph.get_subgraph_nodes(new_root_ids[1])
len_set = {1, 2}
assert len(svs1) in len_set
len_set.remove(len(svs1))
assert len(svs2) in len_set
# Check New State
assert len(new_root_ids) == 2
assert cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 0)) != cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 1))
assert cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 0)) == cgraph.get_root(to_label(cgraph, 1, 2, 0, 0, 0))
cc_dict = cgraph.get_atomic_cross_edge_dict(cgraph.get_parent(to_label(cgraph, 1, 1, 0, 0, 0)))
assert len(cc_dict[3]) == 1
assert cc_dict[3][0][0] == to_label(cgraph, 1, 1, 0, 0, 0)
assert cc_dict[3][0][1] == to_label(cgraph, 1, 2, 0, 0, 0)
assert len(cgraph.get_latest_roots()) == 2
assert len(cgraph.get_latest_roots(fake_timestamp)) == 1
@pytest.mark.timeout(30)
def test_split_verify_loop(self, gen_graph):
"""
Remove edge between existing RG supervoxels 1 and 2 (neighboring chunks)
┌─────┬────────┬─────┐ ┌─────┬────────┬─────┐
| │ A¹ │ B¹ │ | │ A¹ │ B¹ │
| │ 4━━1━━┿━━5 │ => | │ 4 1━━┿━━5 │
| │ / │ | │ | │ │ | │
| │ 3 2━━┿━━6 │ | │ 3 2━━┿━━6 │
└─────┴────────┴─────┘ └─────┴────────┴─────┘
"""
cgraph = gen_graph(n_layers=4)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 1),
to_label(cgraph, 1, 1, 0, 0, 2), to_label(cgraph, 1, 1, 0, 0, 3)],
edges=[(to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 2, 0, 0, 0), inf),
(to_label(cgraph, 1, 1, 0, 0, 1), to_label(cgraph, 1, 2, 0, 0, 1), inf),
(to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 2), .5),
(to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 3), .5)],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 2, 0, 0, 0), to_label(cgraph, 1, 2, 0, 0, 1)],
edges=[(to_label(cgraph, 1, 2, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 0), inf),
(to_label(cgraph, 1, 2, 0, 0, 1), to_label(cgraph, 1, 1, 0, 0, 1), inf),
(to_label(cgraph, 1, 2, 0, 0, 1), to_label(cgraph, 1, 2, 0, 0, 0), .5)],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]), time_stamp=fake_timestamp)
cgraph.add_layer(3, np.array([[2, 0, 0], [3, 0, 0]]), time_stamp=fake_timestamp)
cgraph.add_layer(4, np.array([[0, 0, 0], [1, 0, 0]]), time_stamp=fake_timestamp)
assert cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 0)) == cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 1))
assert cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 0)) == cgraph.get_root(to_label(cgraph, 1, 2, 0, 0, 0))
# Split
new_root_ids = cgraph.remove_edges("Jane Doe", to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 2), mincut=False)
assert len(new_root_ids) == 2
new_root_ids = cgraph.remove_edges("Jane Doe", to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 3), mincut=False)
assert len(new_root_ids) == 2
cc_dict = cgraph.get_atomic_cross_edge_dict(cgraph.get_parent(to_label(cgraph, 1, 1, 0, 0, 0)))
assert len(cc_dict[3]) == 1
cc_dict = cgraph.get_atomic_cross_edge_dict(cgraph.get_parent(to_label(cgraph, 1, 1, 0, 0, 0)))
assert len(cc_dict[3]) == 1
assert len(cgraph.get_latest_roots()) == 3
assert len(cgraph.get_latest_roots(fake_timestamp)) == 1
@pytest.mark.timeout(30)
def test_split_pair_disconnected_chunks(self, gen_graph):
"""
Remove edge between existing RG supervoxels 1 and 2 (disconnected chunks)
┌─────┐ ┌─────┐ ┌─────┐ ┌─────┐
│ A¹ │ ... │ Z¹ │ │ A¹ │ ... │ Z¹ │
│ 1━━┿━━━━━┿━━2 │ => │ 1 │ │ 2 │
│ │ │ │ │ │ │ │
└─────┘ └─────┘ └─────┘ └─────┘
"""
cgraph = gen_graph(n_layers=9)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 127, 127, 127, 0), 1.0)],
timestamp=fake_timestamp)
# Preparation: Build Chunk Z
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 127, 127, 127, 0)],
edges=[(to_label(cgraph, 1, 127, 127, 127, 0), to_label(cgraph, 1, 0, 0, 0, 0), 1.0)],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0x00, 0x00, 0x00]]), time_stamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0x7F, 0x7F, 0x7F]]), time_stamp=fake_timestamp)
cgraph.add_layer(4, np.array([[0x00, 0x00, 0x00]]), time_stamp=fake_timestamp)
cgraph.add_layer(4, np.array([[0x3F, 0x3F, 0x3F]]), time_stamp=fake_timestamp)
cgraph.add_layer(5, np.array([[0x00, 0x00, 0x00]]), time_stamp=fake_timestamp)
cgraph.add_layer(5, np.array([[0x1F, 0x1F, 0x1F]]), time_stamp=fake_timestamp)
cgraph.add_layer(6, np.array([[0x00, 0x00, 0x00]]), time_stamp=fake_timestamp)
cgraph.add_layer(6, np.array([[0x0F, 0x0F, 0x0F]]), time_stamp=fake_timestamp)
cgraph.add_layer(7, np.array([[0x00, 0x00, 0x00]]), time_stamp=fake_timestamp)
cgraph.add_layer(7, np.array([[0x07, 0x07, 0x07]]), time_stamp=fake_timestamp)
cgraph.add_layer(8, np.array([[0x00, 0x00, 0x00]]), time_stamp=fake_timestamp)
cgraph.add_layer(8, np.array([[0x03, 0x03, 0x03]]), time_stamp=fake_timestamp)
cgraph.add_layer(9, np.array([[0x00, 0x00, 0x00], [0x01, 0x01, 0x01]]), time_stamp=fake_timestamp)
# Split
new_roots = cgraph.remove_edges("Jane Doe", to_label(cgraph, 1, 127, 127, 127, 0), to_label(cgraph, 1, 0, 0, 0, 0), mincut=False)
# Check New State
assert len(new_roots) == 2
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0)) != cgraph.get_root(to_label(cgraph, 1, 127, 127, 127, 0))
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 0))
assert len(partners) == 0
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 127, 127, 127, 0))
assert len(partners) == 0
leaves = np.unique(cgraph.get_subgraph_nodes(cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0))))
assert len(leaves) == 1 and to_label(cgraph, 1, 0, 0, 0, 0) in leaves
leaves = np.unique(cgraph.get_subgraph_nodes(cgraph.get_root(to_label(cgraph, 1, 127, 127, 127, 0))))
assert len(leaves) == 1 and to_label(cgraph, 1, 127, 127, 127, 0) in leaves
# Check Old State still accessible
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0), time_stamp=fake_timestamp) == \
cgraph.get_root(to_label(cgraph, 1, 127, 127, 127, 0), time_stamp=fake_timestamp)
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 0), time_stamp=fake_timestamp)
assert len(partners) == 1 and partners[0] == to_label(cgraph, 1, 127, 127, 127, 0)
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 127, 127, 127, 0), time_stamp=fake_timestamp)
assert len(partners) == 1 and partners[0] == to_label(cgraph, 1, 0, 0, 0, 0)
leaves = np.unique(cgraph.get_subgraph_nodes(cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0), time_stamp=fake_timestamp)))
assert len(leaves) == 2
assert to_label(cgraph, 1, 0, 0, 0, 0) in leaves
assert to_label(cgraph, 1, 127, 127, 127, 0) in leaves
@pytest.mark.timeout(30)
def test_split_pair_already_disconnected(self, gen_graph):
"""
Try to remove edge between already disconnected RG supervoxels 1 and 2 (same chunk).
Expected: No change, no error
┌─────┐ ┌─────┐
│ A¹ │ │ A¹ │
│ 1 2 │ => │ 1 2 │
│ │ │ │
└─────┘ └─────┘
"""
cgraph = gen_graph(n_layers=2)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1)],
edges=[],
timestamp=fake_timestamp)
res_old = cgraph.table.read_rows()
res_old.consume_all()
# Split
with pytest.raises(cg_exceptions.PreconditionError):
cgraph.remove_edges("Jane Doe", to_label(cgraph, 1, 0, 0, 0, 1), to_label(cgraph, 1, 0, 0, 0, 0), mincut=False)
res_new = cgraph.table.read_rows()
res_new.consume_all()
# Check
if res_old.rows != res_new.rows:
warn("Rows were modified when splitting a pair of already disconnected supervoxels. "
"While probably not an error, it is an unnecessary operation.")
@pytest.mark.timeout(30)
def test_split_full_circle_to_triple_chain_same_chunk(self, gen_graph):
"""
Remove direct edge between RG supervoxels 1 and 2, but leave indirect connection (same chunk)
┌─────┐ ┌─────┐
│ A¹ │ │ A¹ │
│ 1━2 │ => │ 1 2 │
│ ┗3┛ │ │ ┗3┛ │
└─────┘ └─────┘
"""
cgraph = gen_graph(n_layers=2)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1), to_label(cgraph, 1, 0, 0, 0, 2)],
edges=[(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 2), 0.5),
(to_label(cgraph, 1, 0, 0, 0, 1), to_label(cgraph, 1, 0, 0, 0, 2), 0.5),
(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1), 0.3)],
timestamp=fake_timestamp)
# Split
new_root_ids = cgraph.remove_edges("Jane Doe", to_label(cgraph, 1, 0, 0, 0, 1), to_label(cgraph, 1, 0, 0, 0, 0), mincut=False)
# Check New State
assert len(new_root_ids) == 1
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0)) == new_root_ids[0]
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 1)) == new_root_ids[0]
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 2)) == new_root_ids[0]
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 0))
assert len(partners) == 1 and partners[0] == to_label(cgraph, 1, 0, 0, 0, 2)
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 1))
assert len(partners) == 1 and partners[0] == to_label(cgraph, 1, 0, 0, 0, 2)
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 2))
assert len(partners) == 2
assert to_label(cgraph, 1, 0, 0, 0, 0) in partners
assert to_label(cgraph, 1, 0, 0, 0, 1) in partners
leaves = np.unique(cgraph.get_subgraph_nodes(new_root_ids[0]))
assert len(leaves) == 3
assert to_label(cgraph, 1, 0, 0, 0, 0) in leaves
assert to_label(cgraph, 1, 0, 0, 0, 1) in leaves
assert to_label(cgraph, 1, 0, 0, 0, 2) in leaves
# Check Old State still accessible
old_root_id = cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0), time_stamp=fake_timestamp)
assert new_root_ids[0] != old_root_id
# assert len(cgraph.get_latest_roots()) == 1
# assert len(cgraph.get_latest_roots(fake_timestamp)) == 1
@pytest.mark.timeout(30)
def test_split_full_circle_to_triple_chain_neighboring_chunks(self, gen_graph):
"""
Remove direct edge between RG supervoxels 1 and 2, but leave indirect connection (neighboring chunks)
┌─────┬─────┐ ┌─────┬─────┐
│ A¹ │ B¹ │ │ A¹ │ B¹ │
│ 1━━┿━━2 │ => │ 1 │ 2 │
│ ┗3━┿━━┛ │ │ ┗3━┿━━┛ │
└─────┴─────┘ └─────┴─────┘
"""
cgraph = gen_graph(n_layers=3)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1)],
edges=[(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1), 0.5),
(to_label(cgraph, 1, 0, 0, 0, 1), to_label(cgraph, 1, 1, 0, 0, 0), 0.5),
(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 0), 0.3)],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1), 0.5),
(to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 0), 0.3)],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]), time_stamp=fake_timestamp)
# Split
new_root_ids = cgraph.remove_edges("Jane Doe", to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 0), mincut=False)
# Check New State
assert len(new_root_ids) == 1
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0)) == new_root_ids[0]
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 1)) == new_root_ids[0]
assert cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 0)) == new_root_ids[0]
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 0))
assert len(partners) == 1 and partners[0] == to_label(cgraph, 1, 0, 0, 0, 1)
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 1, 0, 0, 0))
assert len(partners) == 1 and partners[0] == to_label(cgraph, 1, 0, 0, 0, 1)
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 1))
assert len(partners) == 2
assert to_label(cgraph, 1, 0, 0, 0, 0) in partners
assert to_label(cgraph, 1, 1, 0, 0, 0) in partners
leaves = np.unique(cgraph.get_subgraph_nodes(new_root_ids[0]))
assert len(leaves) == 3
assert to_label(cgraph, 1, 0, 0, 0, 0) in leaves
assert to_label(cgraph, 1, 0, 0, 0, 1) in leaves
assert to_label(cgraph, 1, 1, 0, 0, 0) in leaves
# Check Old State still accessible
old_root_id = cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0), time_stamp=fake_timestamp)
assert new_root_ids[0] != old_root_id
assert len(cgraph.get_latest_roots()) == 1
assert len(cgraph.get_latest_roots(fake_timestamp)) == 1
@pytest.mark.timeout(30)
def test_split_full_circle_to_triple_chain_disconnected_chunks(self, gen_graph):
"""
Remove direct edge between RG supervoxels 1 and 2, but leave indirect connection (disconnected chunks)
┌─────┐ ┌─────┐ ┌─────┐ ┌─────┐
│ A¹ │ ... │ Z¹ │ │ A¹ │ ... │ Z¹ │
│ 1━━┿━━━━━┿━━2 │ => │ 1 │ │ 2 │
│ ┗3━┿━━━━━┿━━┛ │ │ ┗3━┿━━━━━┿━━┛ │
└─────┘ └─────┘ └─────┘ └─────┘
"""
cgraph = gen_graph(n_layers=9)
loc = 2
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1)],
edges=[(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1), 0.5),
(to_label(cgraph, 1, 0, 0, 0, 1), to_label(cgraph, 1, loc, loc, loc, 0), 0.5),
(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, loc, loc, loc, 0), 0.3)],
timestamp=fake_timestamp)
# Preparation: Build Chunk Z
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, loc, loc, loc, 0)],
edges=[(to_label(cgraph, 1, loc, loc, loc, 0), to_label(cgraph, 1, 0, 0, 0, 1), 0.5),
(to_label(cgraph, 1, loc, loc, loc, 0), to_label(cgraph, 1, 0, 0, 0, 0), 0.3)],
timestamp=fake_timestamp)
for i_layer in range(3, 10):
if loc // 2**(i_layer - 3) == 1:
cgraph.add_layer(i_layer, np.array([[0, 0, 0], [1, 1, 1]]), time_stamp=fake_timestamp)
elif loc // 2**(i_layer - 3) == 0:
cgraph.add_layer(i_layer, np.array([[0, 0, 0]]), time_stamp=fake_timestamp)
else:
cgraph.add_layer(i_layer, np.array([[0, 0, 0]]), time_stamp=fake_timestamp)
cgraph.add_layer(i_layer, np.array([[loc // 2**(i_layer - 3), loc // 2**(i_layer - 3), loc // 2**(i_layer - 3)]]), time_stamp=fake_timestamp)
assert cgraph.get_root(to_label(cgraph, 1, loc, loc, loc, 0)) == \
cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0)) == \
cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 1))
# Split
new_root_ids = cgraph.remove_edges("Jane Doe", to_label(cgraph, 1, loc, loc, loc, 0), to_label(cgraph, 1, 0, 0, 0, 0), mincut=False)
# Check New State
assert len(new_root_ids) == 1
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0)) == new_root_ids[0]
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 1)) == new_root_ids[0]
assert cgraph.get_root(to_label(cgraph, 1, loc, loc, loc, 0)) == new_root_ids[0]
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 0))
assert len(partners) == 1 and partners[0] == to_label(cgraph, 1, 0, 0, 0, 1)
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, loc, loc, loc, 0))
assert len(partners) == 1 and partners[0] == to_label(cgraph, 1, 0, 0, 0, 1)
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 1))
assert len(partners) == 2
assert to_label(cgraph, 1, 0, 0, 0, 0) in partners
assert to_label(cgraph, 1, loc, loc, loc, 0) in partners
leaves = np.unique(cgraph.get_subgraph_nodes(new_root_ids[0]))
assert len(leaves) == 3
assert to_label(cgraph, 1, 0, 0, 0, 0) in leaves
assert to_label(cgraph, 1, 0, 0, 0, 1) in leaves
assert to_label(cgraph, 1, loc, loc, loc, 0) in leaves
# Check Old State still accessible
old_root_id = cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0), time_stamp=fake_timestamp)
assert new_root_ids[0] != old_root_id
assert len(cgraph.get_latest_roots()) == 1
assert len(cgraph.get_latest_roots(fake_timestamp)) == 1
@pytest.mark.timeout(30)
def test_split_same_node(self, gen_graph):
"""
Try to remove (non-existing) edge between RG supervoxel 1 and itself
┌─────┐
│ A¹ │
│ 1 │ => Reject
│ │
└─────┘
"""
cgraph = gen_graph(n_layers=2)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0)],
edges=[],
timestamp=fake_timestamp)
res_old = cgraph.table.read_rows()
res_old.consume_all()
# Split
with pytest.raises(cg_exceptions.PreconditionError):
cgraph.remove_edges("Jane Doe", to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 0), mincut=False)
res_new = cgraph.table.read_rows()
res_new.consume_all()
assert res_new.rows == res_old.rows
@pytest.mark.timeout(30)
def test_split_pair_abstract_nodes(self, gen_graph):
"""
Try to remove (non-existing) edge between RG supervoxel 1 and abstract node "2"
┌─────┐
│ B² │
│ "2" │
│ │
└─────┘
┌─────┐ => Reject
│ A¹ │
│ 1 │
│ │
└─────┘
"""
cgraph = gen_graph(n_layers=3)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0)],
edges=[],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 0)],
edges=[],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]), time_stamp=fake_timestamp)
res_old = cgraph.table.read_rows()
res_old.consume_all()
# Split
with pytest.raises(cg_exceptions.PreconditionError):
cgraph.remove_edges("Jane Doe", to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 2, 1, 0, 0, 1), mincut=False)
res_new = cgraph.table.read_rows()
res_new.consume_all()
assert res_new.rows == res_old.rows
@pytest.mark.timeout(30)
def test_diagonal_connections(self, gen_graph):
"""
Create graph with edge between RG supervoxels 1 and 2 (same chunk)
and edge between RG supervoxels 1 and 3 (neighboring chunks)
┌─────┬─────┐
│ A¹ │ B¹ │
│ 2━1━┿━━3 │
│ / │ │
┌─────┬─────┐
│ | │ │
│ 4━━┿━━5 │
│ C¹ │ D¹ │
└─────┴─────┘
"""
cgraph = gen_graph(n_layers=3)
# Chunk A
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1)],
edges=[(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1), 0.5),
(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 0), inf),
(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 1, 0, 0), inf)])
# Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 0), inf)])
# Chunk C
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 1, 0, 0)],
edges=[(to_label(cgraph, 1, 0, 1, 0, 0), to_label(cgraph, 1, 1, 1, 0, 0), inf),
(to_label(cgraph, 1, 0, 1, 0, 0), to_label(cgraph, 1, 0, 0, 0, 0), inf)])
# Chunk D
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 1, 0, 0)],
edges=[(to_label(cgraph, 1, 1, 1, 0, 0), to_label(cgraph, 1, 0, 1, 0, 0), inf)])
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]]))
rr = cgraph.range_read_chunk(chunk_id=cgraph.get_chunk_id(layer=3, x=0, y=0, z=0))
root_ids_t0 = list(rr.keys())
assert len(root_ids_t0) == 1
child_ids = []
for root_id in root_ids_t0:
cgraph.logger.debug(("root_id", root_id))
child_ids.extend(cgraph.get_subgraph_nodes(root_id))
new_roots = cgraph.remove_edges("Jane Doe",
to_label(cgraph, 1, 0, 0, 0, 0),
to_label(cgraph, 1, 0, 0, 0, 1),
mincut=False)
assert len(new_roots) == 2
assert cgraph.get_root(to_label(cgraph, 1, 1, 1, 0, 0)) == \
cgraph.get_root(to_label(cgraph, 1, 0, 1, 0, 0))
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0)) == \
cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0))
@pytest.mark.timeout(30)
def test_shatter(self, gen_graph):
"""
Create graph with edge between RG supervoxels 1 and 2 (same chunk)
and edge between RG supervoxels 1 and 3 (neighboring chunks)
┌─────┬─────┐
│ A¹ │ B¹ │
│ 2━1━┿━━3 │
│ / │ │
┌─────┬─────┐
│ | │ │
│ 4━━┿━━5 │
│ C¹ │ D¹ │
└─────┴─────┘
"""
cgraph = gen_graph(n_layers=3)
# Chunk A
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1)],
edges=[(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1), 0.5),
(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 0), inf),
(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 1, 0, 0), inf)])
# Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 0), inf)])
# Chunk C
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 1, 0, 0)],
edges=[(to_label(cgraph, 1, 0, 1, 0, 0), to_label(cgraph, 1, 1, 1, 0, 0), .1),
(to_label(cgraph, 1, 0, 1, 0, 0), to_label(cgraph, 1, 0, 0, 0, 0), inf)])
# Chunk D
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 1, 0, 0)],
edges=[(to_label(cgraph, 1, 1, 1, 0, 0), to_label(cgraph, 1, 0, 1, 0, 0), .1)])
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]]))
new_root_ids = cgraph.shatter_nodes("Jane Doe", atomic_node_ids=[to_label(cgraph, 1, 0, 0, 0, 0)])
cgraph.logger.debug(new_root_ids)
assert len(new_root_ids) == 3
class TestGraphMergeSplit:
@pytest.mark.timeout(30)
def test_multiple_cuts_and_splits(self, gen_graph_simplequerytest):
"""
┌─────┬─────┬─────┐ L X Y Z S L X Y Z S L X Y Z S L X Y Z S
│ A¹ │ B¹ │ C¹ │ 1: 1 0 0 0 0 ─── 2 0 0 0 1 ─── 3 0 0 0 1 ─── 4 0 0 0 1
│ 1 │ 3━2━┿━━4 │ 2: 1 1 0 0 0 ─┬─ 2 1 0 0 1 ─── 3 0 0 0 2 ─┬─ 4 0 0 0 2
│ │ │ │ 3: 1 1 0 0 1 ─┘ │
└─────┴─────┴─────┘ 4: 1 2 0 0 0 ─── 2 2 0 0 1 ─── 3 1 0 0 1 ─┘
"""
cgraph = gen_graph_simplequerytest
rr = cgraph.range_read_chunk(chunk_id=cgraph.get_chunk_id(layer=4, x=0, y=0, z=0))
root_ids_t0 = list(rr.keys())
child_ids = []
for root_id in root_ids_t0:
cgraph.logger.debug(f"root_id {root_id}")
child_ids.extend(cgraph.get_subgraph_nodes(root_id))
for i in range(10):
cgraph.logger.debug(f"\n\nITERATION {i}/10")
cgraph.logger.debug("\n\nMERGE 1 & 3\n\n")
new_roots = cgraph.add_edges("Jane Doe",
[to_label(cgraph, 1, 0, 0, 0, 0),
to_label(cgraph, 1, 1, 0, 0, 1)],
affinities=.9)
assert len(new_roots) == 1
assert len(cgraph.get_subgraph_nodes(new_roots[0])) == 4
root_ids = []
for child_id in child_ids:
root_ids.append(cgraph.get_root(child_id))
cgraph.logger.debug((child_id, cgraph.get_chunk_coordinates(child_id), root_ids[-1]))
parent_id = cgraph.get_parent(child_id)
cgraph.logger.debug((parent_id, cgraph.read_cross_chunk_edges(parent_id)))
u_root_ids = np.unique(root_ids)
assert len(u_root_ids) == 1
# ------------------------------------------------------------------
cgraph.logger.debug("\n\nSPLIT 2 & 3\n\n")
new_roots = cgraph.remove_edges("John Doe", to_label(cgraph, 1, 1, 0, 0, 0),
to_label(cgraph, 1, 1, 0, 0, 1), mincut=False)
assert len(np.unique(new_roots)) == 2
for root in new_roots:
cgraph.logger.debug(("SUBGRAPH", cgraph.get_subgraph_nodes(root)))
cgraph.logger.debug("test children")
root_ids = []
for child_id in child_ids:
root_ids.append(cgraph.get_root(child_id))
cgraph.logger.debug((child_id, cgraph.get_chunk_coordinates(child_id), cgraph.get_segment_id(child_id), root_ids[-1]))
cgraph.logger.debug((cgraph.get_atomic_node_info(child_id)))
cgraph.logger.debug("test root")
u_root_ids = np.unique(root_ids)
these_child_ids = []
for root_id in u_root_ids:
these_child_ids.extend(cgraph.get_subgraph_nodes(root_id, verbose=False))
cgraph.logger.debug((root_id, cgraph.get_subgraph_nodes(root_id, verbose=False)))
assert len(these_child_ids) == 4
assert len(u_root_ids) == 2
# ------------------------------------------------------------------
cgraph.logger.debug("\n\nSPLIT 1 & 3\n\n")
new_roots = cgraph.remove_edges("Jane Doe",
to_label(cgraph, 1, 0, 0, 0, 0),
to_label(cgraph, 1, 1, 0, 0, 1),
mincut=False)
assert len(new_roots) == 2
root_ids = []
for child_id in child_ids:
root_ids.append(cgraph.get_root(child_id))
cgraph.logger.debug((child_id, cgraph.get_chunk_coordinates(child_id), root_ids[-1]))
parent_id = cgraph.get_parent(child_id)
cgraph.logger.debug((parent_id, cgraph.read_cross_chunk_edges(parent_id)))
u_root_ids = np.unique(root_ids)
assert len(u_root_ids) == 3
# ------------------------------------------------------------------
cgraph.logger.debug("\n\nMERGE 2 & 3\n\n")
new_roots = cgraph.add_edges("Jane Doe",
[to_label(cgraph, 1, 1, 0, 0, 0),
to_label(cgraph, 1, 1, 0, 0, 1)],
affinities=.9)
assert len(new_roots) == 1
root_ids = []
for child_id in child_ids:
root_ids.append(cgraph.get_root(child_id))
cgraph.logger.debug((child_id, cgraph.get_chunk_coordinates(child_id), root_ids[-1]))
parent_id = cgraph.get_parent(child_id)
cgraph.logger.debug((parent_id, cgraph.read_cross_chunk_edges(parent_id)))
u_root_ids = np.unique(root_ids)
assert len(u_root_ids) == 2
for root_id in root_ids:
cross_edge_dict_layers = graph_tests.root_cross_edge_test(root_id, cg=cgraph) # dict: layer -> cross_edge_dict
n_cross_edges_layer = collections.defaultdict(list)
for child_layer in cross_edge_dict_layers.keys():
for layer in cross_edge_dict_layers[child_layer].keys():
n_cross_edges_layer[layer].append(len(cross_edge_dict_layers[child_layer][layer]))
for layer in n_cross_edges_layer.keys():
assert len(np.unique(n_cross_edges_layer[layer])) == 1
class TestGraphMinCut:
# TODO: Ideally, those tests should focus only on mincut retrieving the correct edges.
# The edge removal part should be tested exhaustively in TestGraphSplit
@pytest.mark.timeout(30)
def test_cut_regular_link(self, gen_graph):
"""
Regular link between 1 and 2
┌─────┬─────┐
│ A¹ │ B¹ │
│ 1━━┿━━2 │
│ │ │
└─────┴─────┘
"""
cgraph = gen_graph(n_layers=3)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 0), 0.5)],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 0), 0.5)],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]), time_stamp=fake_timestamp)
# Mincut
new_root_ids = cgraph.remove_edges(
"Jane Doe", to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 0),
[0, 0, 0], [2*cgraph.chunk_size[0], 2*cgraph.chunk_size[1], cgraph.chunk_size[2]],
mincut=True)
# Check New State
assert len(new_root_ids) == 2
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0)) != cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 0))
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 0))
assert len(partners) == 0
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 1, 0, 0, 0))
assert len(partners) == 0
leaves = np.unique(cgraph.get_subgraph_nodes(cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0))))
assert len(leaves) == 1 and to_label(cgraph, 1, 0, 0, 0, 0) in leaves
leaves = np.unique(cgraph.get_subgraph_nodes(cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 0))))
assert len(leaves) == 1 and to_label(cgraph, 1, 1, 0, 0, 0) in leaves
@pytest.mark.timeout(30)
def test_cut_no_link(self, gen_graph):
"""
No connection between 1 and 2
┌─────┬─────┐
│ A¹ │ B¹ │
│ 1 │ 2 │
│ │ │
└─────┴─────┘
"""
cgraph = gen_graph(n_layers=3)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0)],
edges=[],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 0)],
edges=[],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]), time_stamp=fake_timestamp)
res_old = cgraph.table.read_rows()
res_old.consume_all()
# Mincut
with pytest.raises(cg_exceptions.PreconditionError):
cgraph.remove_edges(
"Jane Doe", to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 0),
[0, 0, 0], [2*cgraph.chunk_size[0], 2*cgraph.chunk_size[1], cgraph.chunk_size[2]],
mincut=True)
res_new = cgraph.table.read_rows()
res_new.consume_all()
assert res_new.rows == res_old.rows
@pytest.mark.timeout(30)
def test_cut_old_link(self, gen_graph):
"""
Link between 1 and 2 got removed previously (aff = 0.0)
┌─────┬─────┐
│ A¹ │ B¹ │
│ 1┅┅╎┅┅2 │
│ │ │
└─────┴─────┘
"""
cgraph = gen_graph(n_layers=3)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 0), 0.5)],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 0), 0.5)],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]), time_stamp=fake_timestamp)
cgraph.remove_edges("John Doe", to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 0), mincut=False)
res_old = cgraph.table.read_rows()
res_old.consume_all()
# Mincut
with pytest.raises(cg_exceptions.PreconditionError):
cgraph.remove_edges(
"Jane Doe", to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 1, 0, 0, 0),
[0, 0, 0], [2*cgraph.chunk_size[0], 2*cgraph.chunk_size[1], cgraph.chunk_size[2]],
mincut=True)
res_new = cgraph.table.read_rows()
res_new.consume_all()
assert res_new.rows == res_old.rows
@pytest.mark.timeout(30)
def test_cut_indivisible_link(self, gen_graph):
"""
Sink: 1, Source: 2
Link between 1 and 2 is set to `inf` and must not be cut.
┌─────┬─────┐
│ A¹ │ B¹ │
│ 1══╪══2 │
│ │ │
└─────┴─────┘
"""
cgraph = gen_graph(n_layers=3)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 0, 0, 0, 0),
to_label(cgraph, 1, 1, 0, 0, 0), inf)],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 1, 0, 0, 0),
to_label(cgraph, 1, 0, 0, 0, 0), inf)],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]),
time_stamp=fake_timestamp)
original_parents_1 = cgraph.get_all_parents(
to_label(cgraph, 1, 0, 0, 0, 0))
original_parents_2 = cgraph.get_all_parents(
to_label(cgraph, 1, 1, 0, 0, 0))
# Mincut
assert cgraph.remove_edges(
"Jane Doe", to_label(cgraph, 1, 0, 0, 0, 0),
to_label(cgraph, 1, 1, 0, 0, 0),
[0, 0, 0], [2 * cgraph.chunk_size[0], 2 * cgraph.chunk_size[1],
cgraph.chunk_size[2]],
mincut=True) is None
new_parents_1 = cgraph.get_all_parents(to_label(cgraph, 1, 0, 0, 0, 0))
new_parents_2 = cgraph.get_all_parents(to_label(cgraph, 1, 1, 0, 0, 0))
assert np.all(np.array(original_parents_1) == np.array(new_parents_1))
assert np.all(np.array(original_parents_2) == np.array(new_parents_2))
class TestGraphMultiCut:
@pytest.mark.timeout(30)
def test_cut_multi_tree(self, gen_graph):
pass
class TestGraphHistory:
""" These test inadvertantly also test merge and split operations """
@pytest.mark.timeout(30)
def test_cut_merge_history(self, gen_graph):
"""
Regular link between 1 and 2
┌─────┬─────┐
│ A¹ │ B¹ │
│ 1━━┿━━2 │
│ │ │
└─────┴─────┘
(1) Split 1 and 2
(2) Merge 1 and 2
"""
cgraph = gen_graph(n_layers=3)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 0, 0, 0, 0),
to_label(cgraph, 1, 1, 0, 0, 0), 0.5)],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 0)],
edges=[(to_label(cgraph, 1, 1, 0, 0, 0),
to_label(cgraph, 1, 0, 0, 0, 0), 0.5)],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]),
time_stamp=fake_timestamp)
first_root = cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0))
assert first_root == cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 0))
timestamp_before_split = datetime.utcnow()
split_roots = cgraph.remove_edges("Jane Doe",
to_label(cgraph, 1, 0, 0, 0, 0),
to_label(cgraph, 1, 1, 0, 0, 0),
mincut=False)
assert len(split_roots) == 2
timestamp_after_split = datetime.utcnow()
merge_roots = cgraph.add_edges("Jane Doe",
[to_label(cgraph, 1, 0, 0, 0, 0),
to_label(cgraph, 1, 1, 0, 0, 0)],
affinities=.4)
assert len(merge_roots) == 1
merge_root = merge_roots[0]
timestamp_after_merge = datetime.utcnow()
assert len(cgraph.get_root_id_history(first_root,
time_stamp_past=datetime.min,
time_stamp_future=datetime.max)) == 4
assert len(cgraph.get_root_id_history(split_roots[0],
time_stamp_past=datetime.min,
time_stamp_future=datetime.max)) == 3
assert len(cgraph.get_root_id_history(split_roots[1],
time_stamp_past=datetime.min,
time_stamp_future=datetime.max)) == 3
assert len(cgraph.get_root_id_history(merge_root,
time_stamp_past=datetime.min,
time_stamp_future=datetime.max)) == 4
new_roots, old_roots = cgraph.get_delta_roots(timestamp_before_split,
timestamp_after_split)
assert(len(old_roots)==1)
assert(old_roots[0]==first_root)
assert(len(new_roots)==2)
assert(np.all(np.isin(new_roots, split_roots)))
new_roots2, old_roots2 = cgraph.get_delta_roots(timestamp_after_split,
timestamp_after_merge)
assert(len(new_roots2)==1)
assert(new_roots2[0]==merge_root)
assert(len(old_roots2)==2)
assert(np.all(np.isin(old_roots2, split_roots)))
new_roots3, old_roots3 = cgraph.get_delta_roots(timestamp_before_split,
timestamp_after_merge)
assert(len(new_roots3)==1)
assert(new_roots3[0]==merge_root)
assert(len(old_roots3)==1)
assert(old_roots3[0]==first_root)
class TestGraphLocks:
@pytest.mark.timeout(30)
def test_lock_unlock(self, gen_graph):
"""
No connection between 1, 2 and 3
┌─────┬─────┐
│ A¹ │ B¹ │
│ 1 │ 3 │
│ 2 │ │
└─────┴─────┘
(1) Try lock (opid = 1)
(2) Try lock (opid = 2)
(3) Try unlock (opid = 1)
(4) Try lock (opid = 2)
"""
cgraph = gen_graph(n_layers=3)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 1),
to_label(cgraph, 1, 0, 0, 0, 2)],
edges=[],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 1)],
edges=[],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]),
time_stamp=fake_timestamp)
operation_id_1 = cgraph.get_unique_operation_id()
root_id = cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 1))
assert cgraph.lock_root_loop(root_ids=[root_id],
operation_id=operation_id_1)[0]
operation_id_2 = cgraph.get_unique_operation_id()
assert not cgraph.lock_root_loop(root_ids=[root_id],
operation_id=operation_id_2)[0]
assert cgraph.unlock_root(root_id=root_id,
operation_id=operation_id_1)
assert cgraph.lock_root_loop(root_ids=[root_id],
operation_id=operation_id_2)[0]
@pytest.mark.timeout(30)
def test_lock_expiration(self, gen_graph, lock_expired_timedelta_override):
"""
No connection between 1, 2 and 3
┌─────┬─────┐
│ A¹ │ B¹ │
│ 1 │ 3 │
│ 2 │ │
└─────┴─────┘
(1) Try lock (opid = 1)
(2) Try lock (opid = 2)
(3) Try lock (opid = 2) with retries
"""
cgraph = gen_graph(n_layers=3)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 1),
to_label(cgraph, 1, 0, 0, 0, 2)],
edges=[],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 1)],
edges=[],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]),
time_stamp=fake_timestamp)
operation_id_1 = cgraph.get_unique_operation_id()
root_id = cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 1))
assert cgraph.lock_root_loop(root_ids=[root_id],
operation_id=operation_id_1)[0]
operation_id_2 = cgraph.get_unique_operation_id()
assert not cgraph.lock_root_loop(root_ids=[root_id],
operation_id=operation_id_2)[0]
assert cgraph.lock_root_loop(root_ids=[root_id],
operation_id=operation_id_2,
max_tries=10, waittime_s=.5)[0]
@pytest.mark.timeout(30)
def test_lock_renew(self, gen_graph):
"""
No connection between 1, 2 and 3
┌─────┬─────┐
│ A¹ │ B¹ │
│ 1 │ 3 │
│ 2 │ │
└─────┴─────┘
(1) Try lock (opid = 1)
(2) Try lock (opid = 2)
(3) Try lock (opid = 2) with retries
"""
cgraph = gen_graph(n_layers=3)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 1),
to_label(cgraph, 1, 0, 0, 0, 2)],
edges=[],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 1)],
edges=[],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]),
time_stamp=fake_timestamp)
operation_id_1 = cgraph.get_unique_operation_id()
root_id = cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 1))
assert cgraph.lock_root_loop(root_ids=[root_id],
operation_id=operation_id_1)[0]
assert cgraph.check_and_renew_root_locks(root_ids=[root_id],
operation_id=operation_id_1)
@pytest.mark.timeout(30)
def test_lock_merge_lock_old_id(self, gen_graph):
"""
No connection between 1, 2 and 3
┌─────┬─────┐
│ A¹ │ B¹ │
│ 1 │ 3 │
│ 2 │ │
└─────┴─────┘
(1) Merge (includes lock opid 1)
(2) Try lock opid 2 --> should be successful and return new root id
"""
cgraph = gen_graph(n_layers=3)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 1),
to_label(cgraph, 1, 0, 0, 0, 2)],
edges=[],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 1)],
edges=[],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]),
time_stamp=fake_timestamp)
root_id = cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 1))
new_root_ids = cgraph.add_edges("Chuck Norris", [to_label(cgraph, 1, 0, 0, 0, 1),
to_label(cgraph, 1, 0, 0, 0, 2)], affinities=1.)
assert new_root_ids is not None
operation_id_2 = cgraph.get_unique_operation_id()
success, new_root_id = cgraph.lock_root_loop(root_ids=[root_id],
operation_id=operation_id_2,
max_tries=10, waittime_s=.5)
cgraph.logger.debug(new_root_id)
assert success
assert new_root_ids[0] == new_root_id
| 45.863683
| 162
| 0.555526
| 132,284
| 0.94341
| 0
| 0
| 133,032
| 0.948744
| 0
| 0
| 27,177
| 0.193818
|
593862c08f86b1ec3350fd994c6a0a23e0d407ad
| 202
|
py
|
Python
|
remote_works/graphql/delivery/resolvers.py
|
tetyanaloskutova/saleor
|
b3bb51e9c0c4c2febf4aa1e2a7d893e77c331e89
|
[
"BSD-3-Clause"
] | 7
|
2019-05-17T14:27:13.000Z
|
2021-12-17T22:52:40.000Z
|
remote_works/graphql/delivery/resolvers.py
|
tetyanaloskutova/saleor
|
b3bb51e9c0c4c2febf4aa1e2a7d893e77c331e89
|
[
"BSD-3-Clause"
] | 9
|
2019-04-13T09:24:28.000Z
|
2019-09-09T15:35:05.000Z
|
remote_works/graphql/delivery/resolvers.py
|
tetyanaloskutova/remote-works
|
b3bb51e9c0c4c2febf4aa1e2a7d893e77c331e89
|
[
"BSD-3-Clause"
] | null | null | null |
import graphene_django_optimizer as gql_optimizer
from ...delivery import models
def resolve_delivery_zones(info):
qs = models.DeliveryZone.objects.all()
return gql_optimizer.query(qs, info)
| 22.444444
| 49
| 0.787129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
593db3c128dcad16c4059d93406558fd51b30469
| 5,617
|
py
|
Python
|
wark.py
|
rcorre/wark
|
fe4fe4789cb63bb2738265c3a008dc3dadb8ddaa
|
[
"MIT"
] | 1
|
2017-05-24T00:25:39.000Z
|
2017-05-24T00:25:39.000Z
|
wark.py
|
rcorre/wark
|
fe4fe4789cb63bb2738265c3a008dc3dadb8ddaa
|
[
"MIT"
] | null | null | null |
wark.py
|
rcorre/wark
|
fe4fe4789cb63bb2738265c3a008dc3dadb8ddaa
|
[
"MIT"
] | null | null | null |
import os
import json
import uuid
import shlex
import weechat
import requests
from ciscosparkapi import CiscoSparkAPI
from ws4py.client.threadedclient import WebSocketClient
SCRIPT_NAME = "spark"
FULL_NAME = "plugins.var.python.{}".format(SCRIPT_NAME)
SPARK_SOCKET_URL = 'https://wdm-a.wbx2.com/wdm/api/v1/devices'
api = None
listener = None
rooms = None
buffers = []
def unixtime(msg):
"""Get the unix timestamp from a spark message object"""
t = time.strptime(msg.created, '%Y-%m-%dT%H:%M:%S.%fZ')
return int(time.mktime(t))
class Buffer():
"""Represents a weechat buffer connected to a spark room."""
def __init__(self, buf, room, api):
self.buf = buf
self.room = room
self.api = api
def show(self, msg):
"""Display a message in the buffer."""
weechat.prnt_date_tags(self.buf, unixtime(msg), "", msg.text)
def send(self, txt):
"""Send a message to the room."""
self.api.messages.create(roomId=self.room.id, markdown=txt)
# Cisco Spark has a websocket interface to listen for message events
# It isn't documented, I found it here:
# https://github.com/marchfederico/ciscospark-websocket-events
class EventListener(WebSocketClient):
"""Listens to the cisco spark web socket."""
def __init__(self, buffers):
self.buffers = buffers
spec = {
"deviceName": "weechat",
"deviceType": "DESKTOP",
"localizedModel": "python2",
"model": "python2",
"name": "weechat",
"systemName": "weechat",
"systemVersion": "0.1"
}
self.bearer = 'Bearer ' + os.getenv("SPARK_ACCESS_TOKEN")
self.headers = {'Authorization': self.bearer}
resp = requests.post(SPARK_SOCKET_URL, headers=self.headers, json=spec,
timeout=10.0)
if resp.status_code != 200:
print("Failed to register device {}: {}".format(name, resp.json()))
info = resp.json()
self.dev_url = info['url']
super(EventListener, self).__init__(
info['webSocketUrl'], protocols=['http-only', 'chat'])
def opened(self):
# authentication handshake
self.send(json.dumps({
'id': str(uuid.uuid4()),
'type': 'authorization',
'data': { 'token': self.bearer }
}))
def closed(self, code, reason=None):
resp = requests.delete(self.dev_url, headers=self.headers,
timeout=10.0)
if resp.status_code != 200:
print("Failed to unregister websocket device from Spark")
def received_message(self, m):
try:
j = json.loads(str(m))
except:
print("Failed to parse message {}".format(m))
return
timestamp = j['timestamp']
data = j['data']
name = data.get('actor', {}).get('displayName')
ev = data['eventType']
if ev == 'status.start_typing':
weechat.prnt('', '{} started typing'.format(name))
elif ev == 'status.stop_typing':
weechat.prnt('', '{} stopped typing'.format(name))
elif ev == 'conversation.activity':
act = data['activity']
verb = act['verb']
if verb == 'post':
msg = api.messages.get(act['id'])
for buf in self.buffers:
if buf.room.id == msg.roomId:
buf.show(msg)
else:
print('Unknown event {}'.format(ev))
class CommandException(Exception):
pass
def buffer_input_cb(data, buf, input_data):
weechat.prnt(buf, input_data)
return weechat.WEECHAT_RC_OK
def buffer_close_cb(data, buf):
"""Called on closing a buffer."""
return weechat.WEECHAT_RC_OK
def room_list(buf):
"""Print a list of visible rooms."""
weechat.prnt(buf, '--Rooms--')
weechat.prnt(buf, '\n'.join(rooms.keys()))
weechat.prnt(buf, '---------')
def room_open(buf, name):
"""Open a new buffer connected to a spark room."""
room = rooms[name]
newbuf = weechat.buffer_new("spark." + room.title, "buffer_input_cb", "",
"buffer_close_cb", "")
buffers[room.id] = Buffer(buf, room, api)
def rehistory(_buf):
#messages = api.messages.list(roomId=room.id)
#for msg in sorted(messages, key=unixtime):
# text = msg.text.encode('ascii', 'replace') if msg.text else ''
# weechat.prnt_date_tags(newbuf, unixtime(msg), "", text)
pass
COMMANDS = {
'rooms': room_list,
'open': room_open,
}
def spark_command_cb(data, buf, command):
parts = shlex.split(command)
cmd = parts[0]
args = parts[1:]
if not cmd in COMMANDS:
weechat.prnt(buf, "Unknown command " + cmd)
return weechat.WEECHAT_RC_ERROR
try:
COMMANDS[cmd](buf, *args)
return weechat.WEECHAT_RC_OK
except CommandException as ex:
weechat.prnt(buf, 'Error: {}'.format(ex))
return weechat.WEECHAT_RC_ERROR
weechat.register(SCRIPT_NAME, "rcorre", "0.1", "MIT", "Spark Client", "", "")
api = CiscoSparkAPI()
rooms = {room.title: room for room in api.rooms.list()}
listener = EventListener()
listener.connect()
weechat.hook_command(
# Command name and description
'spark', '',
# Usage
'[command] [command options]',
# Description of arguments
'Commands:\n' +
'\n'.join(['history']) +
'\nUse /spark help [command] to find out more\n',
# Completions
'|'.join(COMMANDS.keys()),
# Function name
'spark_command_cb', '')
| 28.226131
| 79
| 0.590707
| 2,874
| 0.511661
| 0
| 0
| 0
| 0
| 0
| 0
| 1,801
| 0.320634
|
593e946792eaa8675d0ba0dfd7b0ef6bf054d411
| 1,869
|
py
|
Python
|
src/app.py
|
jqueguiner/text-to-speech-as-a-service
|
b66b1593a6c669c77edadb38939de30e82e46425
|
[
"MIT"
] | 3
|
2020-03-19T09:49:49.000Z
|
2020-03-30T14:18:00.000Z
|
src/app.py
|
jqueguiner/text-to-speech-as-a-service
|
b66b1593a6c669c77edadb38939de30e82e46425
|
[
"MIT"
] | 2
|
2021-09-28T01:12:37.000Z
|
2022-02-26T06:54:04.000Z
|
src/app.py
|
jqueguiner/text-to-speech-as-a-service
|
b66b1593a6c669c77edadb38939de30e82e46425
|
[
"MIT"
] | null | null | null |
import os
import sys
import subprocess
import requests
import ssl
import random
import string
import json
from flask import jsonify
from flask import Flask
from flask import request
from flask import send_file
import traceback
from uuid import uuid4
from notebook_utils.synthesize import *
try: # Python 3.5+
from http import HTTPStatus
except ImportError:
try: # Python 3
from http import client as HTTPStatus
except ImportError: # Python 2
import httplib as HTTPStatus
app = Flask(__name__)
def generate_random_filename(upload_directory, extension):
filename = str(uuid4())
filename = os.path.join(upload_directory, filename + "." + extension)
return filename
def clean_me(filename):
if os.path.exists(filename):
os.remove(filename)
def create_directory(path):
os.system("mkdir -p %s" % os.path.dirname(path))
@app.route("/process", methods=["POST", "GET"])
def process():
output_path = generate_random_filename(output_directory, "wav")
try:
text = request.json["text"]
synthesize(text, tts_model, voc_model, alpha=1.0, output_file=output_path)
callback = send_file(output_path, mimetype='audio/wav')
return callback, 200
except:
traceback.print_exc()
return {'message': 'input error'}, 400
finally:
clean_me(
output_path
)
if __name__ == '__main__':
global output_directory
global voc_model
global tts_model
output_directory = '/src/output/'
create_directory(output_directory)
init_hparams('notebook_utils/pretrained_hparams.py')
tts_model = get_forward_model('pretrained/forward_100K.pyt')
voc_model = get_wavernn_model('pretrained/wave_800K.pyt')
port = 5000
host = '0.0.0.0'
app.run(host=host, port=port, threaded=True)
| 21.238636
| 82
| 0.687533
| 0
| 0
| 0
| 0
| 531
| 0.284109
| 0
| 0
| 240
| 0.128411
|
593ea1a84d21ae7ff3a90ce0dfc4e0f0d6b66ac7
| 4,728
|
py
|
Python
|
Leander_Stephen_D'Souza/Joystick/Joystick_Motor_Code_using_PWM_library.py
|
leander-dsouza/MRM-Tenure
|
3f372ffeeb12b04f4c5c636235db61725d47c3c6
|
[
"MIT"
] | 2
|
2020-08-26T04:01:03.000Z
|
2020-09-11T05:21:32.000Z
|
Leander_Stephen_D'Souza/Joystick/Joystick_Motor_Code_using_PWM_library.py
|
leander-dsouza/MRM-Tenure
|
3f372ffeeb12b04f4c5c636235db61725d47c3c6
|
[
"MIT"
] | null | null | null |
Leander_Stephen_D'Souza/Joystick/Joystick_Motor_Code_using_PWM_library.py
|
leander-dsouza/MRM-Tenure
|
3f372ffeeb12b04f4c5c636235db61725d47c3c6
|
[
"MIT"
] | null | null | null |
import RPi.GPIO as GPIO
import time
import pygame
from pygame import locals
import pygame.display
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
speedA = 0.000
speedB = 0.000
x = 512.00
y = 512.00
# frequency=100Hz
t_on = 0.00
t_off = 0.00
ledpin1 =35 # left_fwd
ledpin2 =36 # right_fwd
ledpin3 =37 # left_bck
ledpin4 =38 # right_bck
GPIO.setup(ledpin1, GPIO.OUT)
GPIO.setup(ledpin2, GPIO.OUT)
GPIO.setup(ledpin3, GPIO.OUT)
GPIO.setup(ledpin4, GPIO.OUT)
GPIO.output(ledpin1, False)
GPIO.output(ledpin2, False)
GPIO.output(ledpin3, False)
GPIO.output(ledpin4, False)
p=GPIO.PWM(ledpin1,100)
q=GPIO.PWM(ledpin2,100)
r=GPIO.PWM(ledpin3,100)
s=GPIO.PWM(ledpin4,100)
p.start(0.00)
q.start(0.00)
r.start(0.00)
s.start(0.00)
def arduino_map(x, in_min, in_max, out_min, out_max):
return ((x - in_min) * (out_max - out_min) / (in_max - in_min)) + out_min
def oct1(x, y):
speedA = arduino_map(y, 1023, 512, 255, 0)
speedB = arduino_map(x + y, 1535, 1023, 255, 0)
p.ChangeDutyCycle(speedA*(100.000000/255.000000))
q.ChangeDutyCycle(speedB*(100.000000/255.000000))
r.ChangeDutyCycle(0)
s.ChangeDutyCycle(0)
def oct2(x, y):
speedA = arduino_map(x, 512, 0, 0, 255)
speedB = arduino_map(x + y, 1023, 512, 0, 255)
p.ChangeDutyCycle(speedA*(100.000000/255.000000))
s.ChangeDutyCycle(speedB*(100.000000/255.000000))
q.ChangeDutyCycle(0)
r.ChangeDutyCycle(0)
def oct3(x, y):
speedA = arduino_map(y - x, 512, 0, 255, 0)
speedB = arduino_map(x, 512, 0, 0, 255)
p.ChangeDutyCycle(speedA*(100.000000/255.000000))
s.ChangeDutyCycle(speedB*(100.000000/255.000000))
r.ChangeDutyCycle(0)
q.ChangeDutyCycle(0)
def oct4(x, y):
speedA = arduino_map(x - y, 512, 0, 255, 0)
speedB = arduino_map(y, 512, 0, 0, 255)
r.ChangeDutyCycle(speedA*(100.000000/255.000000))
s.ChangeDutyCycle(speedB*(100.000000/255.000000))
p.ChangeDutyCycle(0)
q.ChangeDutyCycle(0)
def oct5(x, y):
speedA = arduino_map(y, 512, 0, 0, 255)
speedB = arduino_map(x + y, 1023, 512, 0, 255)
r.ChangeDutyCycle(speedA*(100.000000/255.000000))
s.ChangeDutyCycle(speedB*(100.000000/255.000000))
p.ChangeDutyCycle(0)
q.ChangeDutyCycle(0)
def oct6(x, y):
speedA = arduino_map(x, 1023, 512, 255, 0)
speedB = arduino_map(x + y, 1535, 1023, 255, 0)
r.ChangeDutyCycle(speedA*(100.000000/255.000000))
q.ChangeDutyCycle(speedB*(100.000000/255.000000))
p.ChangeDutyCycle(0)
s.ChangeDutyCycle(0)
def oct7(x, y):
speedA = arduino_map(x - y, 0, 512, 0, 255)
speedB = arduino_map(x, 1023, 512, 255, 0)
r.ChangeDutyCycle(speedA*(100.000000/255.000000))
q.ChangeDutyCycle(speedB*(100.000000/255.000000))
p.ChangeDutyCycle(0)
s.ChangeDutyCycle(0)
def oct8(x, y):
speedA = arduino_map(y - x, 0, 512, 0, 255)
speedB = arduino_map(y, 1023, 512, 255, 0)
p.ChangeDutyCycle(speedA*(100.000000/255.000000))
q.ChangeDutyCycle(speedB*(100.000000/255.000000))
r.ChangeDutyCycle(0)
s.ChangeDutyCycle(0)
pygame.init()
pygame.display.init()
pygame.joystick.init() # main joystick device system
try:
j = pygame.joystick.Joystick(0) # create a joystick instance
j.init() # init instance
print("Enabled joystick:")
except pygame.error:
print("no joystick found.")
while 1:
for e in pygame.event.get(): # iterate over event stack
if e.type == pygame.locals.JOYAXISMOTION:
x, y = j.get_axis(0), j.get_axis(1)
x = round(arduino_map(x, -1, 1, 1023, 0))
y = round(arduino_map(y, 1, -1, 0, 1023))
print("X=", x)
print("Y=", y)
# QUAD 1
if (x <= 512) & ((y >= 512) & (y <= 1023)):
if (x + y) >= 1023: # OCT1
oct1(x, y)
if (x + y) < 1023: # OCT2
oct2(x, y)
# QUAD 2
if (x <= 512) & (y <= 512):
if (x - y) <= 0: # OCT3
oct3(x, y)
if (x - y) > 0: # OCT4
oct4(x, y)
# QUAD 3
if ((x >= 512) & (x <= 1023)) & (y <= 512):
if (x + y) <= 1023: # OCT5
oct5(x, y)
if (x + y) > 1023: # OCT6
oct6(x, y)
# QUAD 4
if ((x >= 512) & (x <= 1023)) & ((y >= 512) & (y <= 1023)):
if (y - x) <= 0: # OCT7
oct7(x, y)
if (y - x) > 0: # OCT8
oct8(x, y)
| 27.172414
| 78
| 0.556684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 305
| 0.064509
|
593f2fd2545bc28f967b04b9e6d7e99629ac3a94
| 8,548
|
py
|
Python
|
rest_helpers/type_serializers.py
|
WillFr/restlax
|
ec47617d915094137077f641427976f04acd8d47
|
[
"Apache-2.0"
] | 1
|
2019-07-03T16:29:05.000Z
|
2019-07-03T16:29:05.000Z
|
rest_helpers/type_serializers.py
|
WillFr/restlax
|
ec47617d915094137077f641427976f04acd8d47
|
[
"Apache-2.0"
] | null | null | null |
rest_helpers/type_serializers.py
|
WillFr/restlax
|
ec47617d915094137077f641427976f04acd8d47
|
[
"Apache-2.0"
] | null | null | null |
"""
This module contains functions that are geared toward serializing objects,
in particular JSON API objects.
"""
import decimal
from collections import Iterable
from rest_helpers.jsonapi_objects import Resource, Response, Link, JsonApiObject, Relationship
def to_jsonable(obj, no_empty_field=False, is_private=None):
"""
This is a low level function to transform any object into a json
serializable (jsonable) object based on its __dict__.
Arguments:
obj {any type} -- the object to be transformed.
Keyword Arguments:
no_empty_field {bool} -- if set to true, the empty field (empty
string or None) will be removed from the resulting jsonable object
(default: {False})
is_private -- callback/function can be passed through to define what
does or does not surface in json payload.
Returns:
dict -- A dictionary that can be used by json.dumps
"""
if is_private is None:
is_private = lambda k: True if str(k)[0] != '_' else False
if isinstance(obj, list):
return [to_jsonable(r, no_empty_field, is_private) for r in obj]
dic = obj if isinstance(obj, dict) else \
obj.__dict__ if hasattr(obj, "__dict__") else \
None
if dic is None:
if isinstance(obj, decimal.Decimal):
str_rep = str(obj)
return int(obj) if '.' not in str_rep else str_rep
return obj
return {str(k): to_jsonable(v, no_empty_field, is_private)for k, v in dic.items() if is_private(k) and (not no_empty_field or v is not None and v != "")}
def response_to_jsonable(response, generate_self_links=True, id_only=False,is_private=None):
"""
Transform a response object into a json serializable (jsonable) object that
matches the jsonapi requirements.
Arguments:
resource {Response} -- The response to be serialized
Keyword Arguments:
generate_self_links {bool} -- If set to true "self" links will be added appropriately
where they do not exist. (default: {True})
Returns:
dict -- a dictionary that can be used by json.dumps to serialize the Response object.
"""
assert isinstance(response, Response)
# Data is a resource object (or a list of resource object,
# hence it needs some special serialization logic)
dic = response.__dict__.copy()
dic.pop("data")
return_value = to_jsonable(dic, no_empty_field=True,is_private=is_private)
if response.data is not None:
jsonable_data = resource_to_jsonable(response.data, generate_self_links,is_private=is_private)
if id_only:
jsonable_data = jsonable_data["id"] if not isinstance(jsonable_data, Iterable) else [x["id"] for x in jsonable_data]
return_value["data"] = jsonable_data
return return_value
def resource_to_jsonable(resource, generate_self_links=True,is_private=None):
"""
Transform a resource object or a resource object list into
a json serializable (jsonable) object that matches the jsonapi
requirements.
Arguments:
resource {Resource|list<Resource>} -- The resource or list of resources
to be serialized
Keyword Arguments:
generate_self_links {bool} -- If set to true "self" links will be added appropriately
where they do not exist. (default: {True})
Returns:
dict -- a dictionary that can be used by json.dumps to serialize the Resource object.
"""
if isinstance(resource, list):
return [resource_to_jsonable(x,is_private) for x in resource]
assert isinstance(resource, Resource)
json_resource = resource.to_primitive() if (hasattr(resource, "to_primitive") and callable(resource,to_primitive)) else to_jsonable(resource, is_private=is_private)
special = ["id", "type", "relationships", "links", "meta"]
for key in special:
json_resource.pop(key, None)
relationships = relationships_to_jsonable(
resource.relationships, "{0}?json_path=/{1}".format(resource.id, "relationships"),
generate_self_links)
resource_links = resource.links
if generate_self_links and "self" not in resource_links:
resource_links = resource.links.copy()
resource_links["self"] = Link(resource.id)
links = links_to_jsonable(resource_links)
return_value = {
"id" : resource.id,
"type" : resource.type,
"relationships" : relationships,
"links" : links,
"meta" : resource.meta,
"attributes" :json_resource
}
_remove_empty_fields(return_value)
return return_value
def link_to_jsonable(link):
"""
Transforms a json api link object into a dictionary that can be used by json.dumps.
Arguments:
link {Link} -- the link to be serialized.
Returns:
dict -- a dictionary that can be used by json.dumps to serialize the Link object.
"""
assert isinstance(link, Link)
if link.meta is None:
return link.url
else:
return {
"href": link.url,
"meta": to_jsonable(link.meta)
}
def links_to_jsonable(links):
"""
Transform a json api Link object dictionary into a dictionaty that can be used
by json dumps.
Arguments:
links {dict<Link>} -- the dictionary of Link objects to be serialized.
Returns:
dict -- a dictionary that can be used by json.dumps to serialize the dictionary of link
objects.
"""
if links is None:
return None
assert isinstance(links, dict)
return {k: link_to_jsonable(v) for k, v in links.items()}
def jsonapiobject_to_jsonable(jsonapiobject):
"""
Transforms a jsonapi json api objects into a dictionary that can be used by json dumps
Arguments:
jsonapiobject {JsonApiObject} -- The jsonapiobject to be serialized.
Returns:
dict -- a dictionary that can be used by json.dumps to serialize the JsonApiObject object.
"""
assert isinstance(jsonapiobject, JsonApiObject)
return to_jsonable(jsonapiobject, no_empty_field=True)
def relationship_to_jsonable(relationship, self_link=None):
"""
Tranform a json api relationship object into a json serializable object that matches
the json api specification.
Arguments:
relationship {Relationship} -- a relationship object to be serialized.
Keyword Arguments:
self_link {string} -- link to the relationship to be serialized. If not None, a link
json api object will be created based on this value and added to the links of the
relationship object to be serialized (default: {None}).
Returns:
dict -- a dictionary that can be used by json.dumps to serialize the relationship object.
"""
assert isinstance(relationship, Relationship)
return_value = dict()
links = relationship.links.copy() if relationship.links is not None else dict()
if self_link is not None:
links["self"] = Link(self_link)
if any(links):
return_value["links"] = links_to_jsonable(links)
if relationship.data is not None:
return_value["data"] = {"type": relationship.data.type, "id": relationship.data.id}
return return_value
def relationships_to_jsonable(relationships, self_link_prefix=None, generate_self_link=False):
"""
Tranform a dictionary of json api relationship object nto a json
serializable object that matches the json api specification.
Arguments:
relationships {dict<Relationships>} -- a dict of
relationship objects to be serialized.
Keyword Arguments:
self_link_prefix {string} -- prefix to be used as the link prefix when generate_self_link
is set to true. (default: {None})
generate_self_link {bool} -- when set to true, a self link will be autogenerated when
serializing the relationship object (default: {False}).
Returns:
dict -- a dictionary that can be used by json.dumps to serialize the relationship
dictionary.
"""
if relationships is None:
return None
assert isinstance(relationships, dict)
if generate_self_link:
return {k: relationship_to_jsonable(v, "{0}/{1}".format(self_link_prefix, k))
for k, v in relationships.items()}
else:
return {k: relationship_to_jsonable(v) for k, v in relationships.items()}
#region private
def _remove_empty_fields(dic):
for key in [k for k, v in dic.items() if v is None or v == ""]:
dic.pop(key)
#endregion
| 33.786561
| 168
| 0.681797
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,370
| 0.511231
|
59424eac730e7540dbabc57af91b0ddacf577089
| 458
|
py
|
Python
|
hackerearth/Algorithms/Fredo and Sums/solution.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | 4
|
2020-07-24T01:59:50.000Z
|
2021-07-24T15:14:08.000Z
|
hackerearth/Algorithms/Fredo and Sums/solution.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
hackerearth/Algorithms/Fredo and Sums/solution.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
"""
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
t = int(input())
for _ in range(t):
n = int(input())
a = sorted(map(int, input().strip().split()))
print(sum(a[1::2]) - sum(a[0::2]), sum(a[n // 2:]) - sum(a[:n // 2]))
| 28.625
| 94
| 0.576419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 274
| 0.598253
|
5942ff8661f94ed3c33e9cd05d6389cd70d923f4
| 1,753
|
py
|
Python
|
Wizard Battle App/wizardbattle.py
|
rayjustinhuang/PythonApps
|
ba5572fbff38de71f806558c5d0be5827962aebb
|
[
"MIT"
] | null | null | null |
Wizard Battle App/wizardbattle.py
|
rayjustinhuang/PythonApps
|
ba5572fbff38de71f806558c5d0be5827962aebb
|
[
"MIT"
] | null | null | null |
Wizard Battle App/wizardbattle.py
|
rayjustinhuang/PythonApps
|
ba5572fbff38de71f806558c5d0be5827962aebb
|
[
"MIT"
] | null | null | null |
import random
import time
from characters import Wizard, Creature
def main():
game_header()
game_loop()
def game_header():
print('------------------------------')
print(' WIZARD TEXT GAME APP')
print('------------------------------')
def game_loop():
creatures = [
Creature('Toad', 1),
Creature('Tiger', 12),
Creature('Bat', 3),
Creature('Dragon', 50),
Creature('Evil Wizard', 1000),
]
# print(creatures)
hero = Wizard('Gandalf', 75)
while True:
active_creature = random.choice(creatures)
print('A {} of level {} has appeared from a dark and foggy forest...'
.format(active_creature.name, active_creature.level))
print()
cmd = input('Do you [a]ttack, [r]un away, or [l]ook around? ')
if cmd == 'a':
# print('attack')
if hero.attack(active_creature):
creatures.remove(active_creature)
else:
print('The wizard retreats to recover...')
time.sleep(5)
print('The wizard returns revitalized')
elif cmd == 'r':
# print('run away')
print('The wizard has become unsure of himself and flees...')
elif cmd == 'l':
# print('look around')
print('The wizard {} takes a look around and sees...'.format(hero.name))
for c in creatures:
print(' * A {} of level {}'.format(c.name, c.level))
else:
print('exiting game...')
break
if not creatures:
print("You've defeated all the creatures!!! You win!")
break
print()
if __name__ == '__main__':
main()
| 25.405797
| 84
| 0.50599
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 599
| 0.3417
|
5943869d3d4d2e30ae0802900ea733c4c32ec043
| 2,581
|
py
|
Python
|
xmastreegame/ThreadedTree.py
|
martinohanlon/GPIOXmasTreeGame
|
0d32ff7ca4fe3c2b536f5fa4490d09c1caf54b3a
|
[
"MIT"
] | 2
|
2015-01-21T22:13:53.000Z
|
2017-12-13T17:57:37.000Z
|
xmastreegame/ThreadedTree.py
|
martinohanlon/GPIOXmasTreeGame
|
0d32ff7ca4fe3c2b536f5fa4490d09c1caf54b3a
|
[
"MIT"
] | null | null | null |
xmastreegame/ThreadedTree.py
|
martinohanlon/GPIOXmasTreeGame
|
0d32ff7ca4fe3c2b536f5fa4490d09c1caf54b3a
|
[
"MIT"
] | null | null | null |
import threading
from time import sleep
import RPi.GPIO as GPIO
illumination_time_default = 0.001
class XmasTree(threading.Thread):
#Pins
#Model B+ or A+
#A, B, C, D = 21, 19, 26, 20
#Other model, probably Model A or Model B
#A, B, C, D = 7, 9, 11, 8
def __init__(self, A = 21,B = 19, C = 26, D = 20):
#setup threading
threading.Thread.__init__(self)
#setup properties
self.running = False
self.stopped = False
self.leds = 0
self.A, self.B, self.C, self.D = A, B, C, D
def run(self):
self.running = True
#loop until its stopped
while not self.stopped:
for i in range(8):
self._single_led_on(self.leds & (1<<i))
sleep(illumination_time_default)
#once stopped turn the leds off
self.leds_on(0)
self.running = False
def stop(self):
self.stopped = True
#wait for it to stop running
while self.running:
sleep(0.01)
def leds_on(self, leds):
self.leds = leds
def _single_led_on(self, n):
A, B, C, D = self.A, self.B, self.C, self.D
# First, set all the nodes to be input (effectively
# 'disconnecting' them from the Raspberry Pi)
GPIO.setup(A, GPIO.IN)
GPIO.setup(B, GPIO.IN)
GPIO.setup(C, GPIO.IN)
GPIO.setup(D, GPIO.IN)
# Now determine which nodes are connected to the anode
# and cathode for this LED
if (n==1): anode, cathode = C, A
elif (n==2): anode, cathode = C, D
elif (n==4): anode, cathode = D, C
elif (n==8): anode, cathode = D, B
elif (n==16): anode, cathode = B, D
elif (n==32): anode, cathode = A, B
elif (n==64): anode, cathode = B, A
elif (n==128): anode, cathode = A, C
else: return # invalid LED number
# Configure the anode and cathode nodes to be outputs
GPIO.setup(anode, GPIO.OUT)
GPIO.setup(cathode, GPIO.OUT)
# Make the anode high (+3.3v) and the cathode low (0v)
GPIO.output(anode, GPIO.HIGH)
GPIO.output(cathode, GPIO.LOW)
#test
if __name__ == "__main__":
L0 = 1
L1 = 2
L2 = 4
L3 = 8
L4 = 16
L5 = 32
L6 = 64
ALL = 1+2+4+8+16+32+64
GPIO.setmode(GPIO.BCM)
try:
tree = XmasTree()
tree.start()
tree.leds_on(ALL)
while(True):
sleep(0.1)
finally:
tree.stop()
GPIO.cleanup()
| 25.058252
| 62
| 0.533514
| 2,100
| 0.813638
| 0
| 0
| 0
| 0
| 0
| 0
| 553
| 0.214258
|
59445fc42f57f15739274fff9371a3ae622d87a7
| 1,962
|
py
|
Python
|
cap7/ex5.py
|
felipesch92/livroPython
|
061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307
|
[
"MIT"
] | null | null | null |
cap7/ex5.py
|
felipesch92/livroPython
|
061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307
|
[
"MIT"
] | null | null | null |
cap7/ex5.py
|
felipesch92/livroPython
|
061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307
|
[
"MIT"
] | null | null | null |
jogo = [[], [], []], [[], [], []], [[], [], []]
cont = 0
contx = conto = contxc = contoc = 0
while True:
l = int(input('Informe a linha: '))
c = int(input('Informe a coluna: '))
if l < 4 and c < 4:
if cont % 2 == 0:
jogo[l-1][c-1] = 'X'
else:
jogo[l-1][c-1] = 'O'
cont += 1
for x in range(0, 3):
for j in jogo[x]:
if j == 'X':
contx += 1
if j == 'O':
conto +=1
for k in range(0, 3):
if jogo[k][x] == 'X':
contxc += 1
if jogo[k][x] == 'O':
contoc += 1
print(jogo[x])
if jogo[0][0] == 'X' and jogo[1][1] == 'X' and jogo[2][2] == 'X':
print(jogo[x + 1])
print(jogo[x + 2])
print(f'Parabéns, X venceu!')
break
if jogo[0][0] == 'O' and jogo[1][1] == 'O' and jogo[2][2] == 'O':
print(jogo[x + 1])
print(jogo[x + 2])
print(f'Parabéns, X venceu!')
break
if jogo[0][2] == 'X' and jogo[1][1] == 'X' and jogo[2][0] == 'X':
print(jogo[x + 1])
print(jogo[x + 2])
print(f'Parabéns, X venceu!')
break
if jogo[0][2] == 'O' and jogo[1][1] == 'O' and jogo[2][0] == 'O':
print(jogo[x + 1])
print(jogo[x + 2])
print(f'Parabéns, X venceu!')
break
if contx == 3 or contxc == 3:
print(jogo[x+1])
print(f'Parabéns, X venceu!')
break
if conto == 3 or contoc == 3:
print(jogo[x+1])
print(f'Parabéns, O venceu!')
break
contx = conto = contxc = contoc = 0
else:
print('Posição já preenchida')
| 35.035714
| 77
| 0.35474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 257
| 0.130391
|
5944d36b482e6230d5854a8d2998c95179d5d03e
| 23,625
|
py
|
Python
|
lib/intercom_test/framework.py
|
rtweeks/intercom_test
|
a682088af93d280297764b639f4727ec4716673f
|
[
"Apache-2.0"
] | null | null | null |
lib/intercom_test/framework.py
|
rtweeks/intercom_test
|
a682088af93d280297764b639f4727ec4716673f
|
[
"Apache-2.0"
] | null | null | null |
lib/intercom_test/framework.py
|
rtweeks/intercom_test
|
a682088af93d280297764b639f4727ec4716673f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 PayTrace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
import functools
from io import StringIO
import json
import logging
import os.path
import shutil
import yaml
from .cases import (
IdentificationListReader as CaseIdListReader,
hash_from_fields as _hash_from_fields,
)
from .exceptions import MultipleAugmentationEntriesError, NoAugmentationError
from .augmentation.compact_file import (
augment_dict_from,
case_keys as case_keys_in_compact_file,
TestCaseAugmenter as CompactFileAugmenter,
Updater as CompactAugmentationUpdater,
)
from .augmentation import update_file
from .utils import (
FilteredDictView as _FilteredDictView,
open_temp_copy,
)
from .yaml_tools import (
YAML_EXT,
content_events as _yaml_content_events,
get_load_all_fn as _get_yaml_load_all,
)
logger = logging.getLogger(__name__)
class InterfaceCaseProvider:
"""Test case data manager
Use an instance of this class to:
* Generate test case data :class:`dict`\ s
* Decorate the case runner function (if auto-updating of compact
augmentation data files is desired)
* Merge extension test case files to the main test case file
* Other case augmentation management tasks
Setting :attr:`use_body_type_magic` to ``True`` automatically parses the
``"request body"`` value as JSON if ``"request type"`` in the same test
case is ``"json"``, and similarly for ``"response body"`` and
``"response type"``.
.. automethod:: __init__
"""
use_body_type_magic = False
safe_yaml_loading = True
class _UpdateState(Enum):
not_requested = '-'
requested = '?'
aborted = '!'
def __repr__(self, ):
return "<{}.{}>".format(type(self).__name__, self.name)
_case_augmenter = None
def __init__(self, spec_dir, group_name, *, case_augmenter=None):
"""Constructing an instance
:param spec_dir: File system directory for test case specifications
:param group_name: Name of the group of tests to load
:keyword case_augmenter:
*optional* An object providing the interface of a
:class:`.CaseAugmenter`
The main test case file of the group is located in *spec_dir* and is
named for *group_name* with the '.yml' extension added. Extension
test case files are found in the *group_name* subdirectory of
*spec_dir* and all have '.yml' extensions.
"""
super().__init__()
self._spec_dir = spec_dir
self._group_name = group_name
self._compact_files_update = self._UpdateState.not_requested
if case_augmenter:
self._case_augmenter = case_augmenter
self._augmented_case = case_augmenter.augmented_test_case
@property
def spec_dir(self):
"""The directory containing the test specification files for this instance"""
return self._spec_dir
@property
def group_name(self):
"""Name of group of test cases to load for this instance"""
return self._group_name
@property
def case_augmenter(self):
"""The :class:`.CaseAugmenter` instance used by this object, if any"""
return self._case_augmenter
@property
def main_group_test_file(self):
"""Path to the main test file of the group for this instance"""
return os.path.join(self.spec_dir, self.group_name + YAML_EXT)
def extension_files(self, ):
"""Get an iterable of the extension files of this instance"""
return extension_files(self.spec_dir, self.group_name)
def cases(self, ):
"""Generates :class:`dict`\ s of test case data
This method reads test cases from the group's main test case file
and auxiliary files, possibly extending them with augmented data (if
*case_augmentations* was given in the constructor).
"""
yield from self._cases_from_file(self.main_group_test_file)
for ext_file in sorted(self.extension_files()):
yield from self._cases_from_file(ext_file)
if self._compact_files_update is self._UpdateState.requested:
self.update_compact_files()
def update_compact_augmentation_on_success(self, fn):
"""Decorator for activating compact data file updates
Using this decorator around the test functions tidies up the logic
around whether to propagate test case augmentation data from update
files to compact files. The compact files will be updated if all
interface tests succeed and not if any of them fail.
The test runner function can be automatically wrapped with this
functionality through :meth:`case_runners`.
"""
CFUpdate = self._UpdateState
@functools.wraps(fn)
def wrapper(*args, **kwargs):
if self._compact_files_update is not CFUpdate.aborted:
self._compact_files_update = CFUpdate.requested
try:
return fn(*args, **kwargs)
except:
self._compact_files_update = CFUpdate.aborted
raise
return wrapper
def case_runners(self, fn, *, do_compact_updates=True):
"""Generates runner callables from a callable
The callables in the returned iterable each call *fn* with all the
positional arguments they are given, the test case :class:`dict` as an
additional positional argument, and all keyword arguments passed to
the case runner.
Using this method rather than :meth:`cases` directly for running tests
has two advantages:
* The default of *do_compact_updates* automatically applies
:meth:`update_compact_augmentation_on_success` to *fn*
* Each returned runner callable will log the test case as YAML prior
to invoking *fn*, which is helpful when updating the augmenting data
for the case becomes necessary
Each callable generated will also have the case data available via
an :attr:`case` on the callable.
"""
if do_compact_updates and self._case_augmenter is not None:
fn = self.update_compact_augmentation_on_success(fn)
for case in self.cases():
@functools.wraps(fn)
def wrapper(*args, **kwargs):
logger.info("{}\n{}".format(
" CASE TESTED ".center(40, '*'),
yaml.dump([case]),
))
return fn(*args, case, **kwargs)
wrapper.case = case
yield wrapper
def update_compact_files(self, ):
"""Calls the :class:`CaseAugmenter` to apply compact data file updates
:raises NoAugmentationError:
when no case augmentation data was specified during construction
of this object
"""
if self._case_augmenter is None:
raise NoAugmentationError("No augmentation data specified")
return self._case_augmenter.update_compact_files()
def merge_test_extensions(self, ):
"""Merge the extension files of the target group into the group's main file"""
ext_files = sorted(self.extension_files())
with open(self.main_group_test_file, 'ab') as fixed_version_specs:
for ext_file in ext_files:
ext_file_ref = os.path.relpath(ext_file, os.path.join(self.spec_dir, self.group_name))
print("---\n# From {}\n".format(ext_file_ref).encode('utf8'), file=fixed_version_specs)
with open(ext_file, 'rb') as ext_specs:
shutil.copyfileobj(ext_specs, fixed_version_specs)
for ext_file in ext_files:
os.remove(ext_file)
def _augmented_case(self, x):
"""This method is defined to be overwritten on the instance level when augmented data is used"""
return x
def _cases_from_file(self, filepath):
with open(filepath, 'rb') as file:
load_all_yaml = _get_yaml_load_all(safe=self.safe_yaml_loading)
for test_case in (
tc
for case_set in load_all_yaml(file)
for tc in case_set
):
if self.use_body_type_magic:
_parse_json_bodies(test_case)
yield self._augmented_case(test_case)
def extension_files(spec_dir, group_name):
"""Iterator of file paths for extensions of a test case group
:param spec_dir: Directory in which specifications live
:param group_name: Name of the group to iterate
"""
yield from data_files(os.path.join(spec_dir, group_name))
def data_files(dir_path):
"""Generate data file paths from the given directory"""
try:
dir_listing = os.listdir(dir_path)
except FileNotFoundError:
return
for entry in dir_listing:
entry = os.path.join(dir_path, entry)
if not os.path.isfile(entry):
continue
if not entry.endswith(YAML_EXT):
continue
yield entry
def _parse_json_bodies(test_case):
if test_case.get('request type') == 'json':
test_case['request body'] = json.loads(test_case['request body'])
if test_case.get('response type') == 'json':
test_case['response body'] = json.loads(test_case['response body'])
class CaseAugmenter:
"""Base class of case augmentation data managers
This class uses and manages files in a case augmentation directory. The
data files are intended to either end in '.yml' or '.update.yml'.
The version control system should, typically, be set up to ignore files
with the '.update.yml' extension. These two kinds of files have a different
"data shape".
Update files (ending in '.update.yml') are convenient for manual editing
because they look like the test case file from which the case came, but
with additional entries in the case data :class:`dict`. The problems with
long term use of this file format are A) it is inefficient for correlation
to test cases, and B) it duplicates data from the test case, possibly
leading to confusion when modifying the .update.yml file does not change
the test case.
Compact data files (other files ending in '.yml') typically are generated
through this package. The format is difficult to manually correlate with
the test file, but does not duplicate all of the test case data as does the
update file data format. Instead, the relevant keys of the test case are
hashed and the hash value is used to index the additional augmentation
value entries.
It is an error for a test case to have multiple augmentations defined
within .yml files (excluding .update.yml files), whether in the same or
different files. It is also an error for multiple files with the
.update.yml extension to specify augmentation for the same case, though
within the same file the last specification is taken. When augmentations
for a case exist within both one .update.yml and one .yml file, the
.update.yml is used (with the goal of updating the .yml file with the
new augmentation values).
Methods of this class depend on the class-level presence of
:const:`CASE_PRIMARY_KEYS`, which is not provided in this class. To use
this class's functionality, derive from it and define this constant in
the subclass. Two basic subclasses are defined in this module:
:class:`HTTPCaseAugmenter` and :class:`RPCCaseAugmenter`.
.. automethod:: __init__
"""
UPDATE_FILE_EXT = ".update" + YAML_EXT
# Set this to False to allow arbitrary object instantiation and code
# execution from loaded YAML
safe_loading = True
def __init__(self, augmentation_data_dir):
"""Constructing an instance
:param augmentation_data_dir:
path to directory holding the augmentation data
"""
super().__init__()
# Initialize info on extension data location
self._case_augmenters = {}
self._updates = {} # compact_file_path -> dict of update readers
working_files = []
self._augmentation_data_dir = augmentation_data_dir
for file_path in data_files(augmentation_data_dir):
if file_path.endswith(self.UPDATE_FILE_EXT):
working_files.append(file_path)
else:
self._load_compact_refs(file_path)
self._index_working_files(working_files)
@property
def augmentation_data_dir(self):
return self._augmentation_data_dir
def _load_compact_refs(self, file_path):
for case_key, start_byte in case_keys_in_compact_file(file_path):
if case_key in self._case_augmenters:
self._excessive_augmentation_data(case_key, self._case_augmenters[case_key].file_path, file_path)
self._case_augmenters[case_key] = CompactFileAugmenter(file_path, start_byte, case_key, safe_loading=self.safe_loading)
self._case_augmenters[case_key].safe_loading = self.safe_loading
def _excessive_augmentation_data(self, case_key, file1, file2):
if file1 == file2:
error_msg = "Test case key \"{}\" has multiple augmentation entries in {}".format(
case_key,
file1,
)
else:
error_msg = "Test case key \"{}\" has augmentation entries in {} and {}".format(
case_key,
file1,
file2,
)
raise MultipleAugmentationEntriesError(error_msg)
def _index_working_files(self, working_files):
for case_key, augmenter in update_file.index(working_files, self.CASE_PRIMARY_KEYS, safe_loading=self.safe_loading).items():
existing_augmenter = self._case_augmenters.get(case_key)
if isinstance(existing_augmenter, CompactFileAugmenter):
if augmenter.deposit_file_path != existing_augmenter.file_path:
raise MultipleAugmentationEntriesError(
"case {} conflicts with case \"{}\" in {}; if present, this case must be in {}".format(
augmenter.case_reference,
case_key,
existing_augmenter.file_path,
os.path.basename(existing_augmenter.file_path).replace(
YAML_EXT,
self.UPDATE_FILE_EXT
),
)
)
elif existing_augmenter is not None:
raise MultipleAugmentationEntriesError(
"case {} conflicts with case {}".format(
augmenter.case_reference,
existing_augmenter.case_reference,
)
)
self._updates.setdefault(augmenter.deposit_file_path, {})[case_key] = augmenter
self._case_augmenters[case_key] = augmenter
@classmethod
def key_of_case(cls, test_case):
"""Compute the key (hash) value of the given test case"""
if hasattr(test_case, 'items'):
test_case = test_case.items()
return _hash_from_fields(
(k, v) for k, v in test_case
if k in cls.CASE_PRIMARY_KEYS
)
def augmented_test_case(self, test_case):
"""Add key/value pairs to *test_case* per the stored augmentation data
:param dict test_case: The test case to augment
:returns: Test case with additional key/value pairs
:rtype: dict
"""
case_key = self.key_of_case(test_case)
augment_case = self._case_augmenters.get(case_key)
if not augment_case:
return test_case
aug_test_case = dict(test_case)
augment_case(aug_test_case)
return aug_test_case
def augmented_test_case_events(self, case_key, case_id_events):
"""Generate YAML events for a test case
:param str case_key:
The case key for augmentation
:param case_id_events:
An iterable of YAML events representing the key/value pairs of the
test case identity
This is used internally when extending an updates file with the existing
data from a case, given the ID of the case as YAML.
"""
case_augmenter = self._case_augmenters.get(case_key)
yield yaml.MappingStartEvent(None, None, True, flow_style=False)
yield from case_id_events
if case_augmenter is not None:
yield from case_augmenter.case_data_events()
yield yaml.MappingEndEvent()
def update_compact_files(self, ):
"""Update compact data files from update data files"""
for file_path, updates in self._updates.items():
if os.path.exists(file_path):
with open_temp_copy(file_path, binary=True) as instream, open(file_path, 'wb') as outstream:
updated_events = self._updated_compact_events(
yaml.parse(instream),
updates
)
yaml.emit(updated_events, outstream)
else:
with open(file_path, 'wb') as outstream:
yaml.emit(self._fresh_content_events(updates.items()), outstream)
def extend_updates(self, file_name_base):
"""Create an object for extending a particular update file
The idea is::
case_augmenter.extend_updates('foo').with_current_augmentation(sys.stdin)
"""
return UpdateExtender(file_name_base, self, safe_loading=self.safe_loading)
def _updated_compact_events(self, events, updates):
mutator = CompactAugmentationUpdater(
_FilteredDictView(
updates,
value_transform=self._full_yaml_mapping_events_from_update_augmentation
),
self.CASE_PRIMARY_KEYS
)
yield from (
output_event
for input_event in events
for output_event in mutator.filter(input_event)
)
@classmethod
def _full_yaml_mapping_events_from_update_augmentation(cls, augmenter):
yield yaml.MappingStartEvent(None, None, True, flow_style=False)
yield from augmenter.case_data_events()
yield yaml.MappingEndEvent()
def _fresh_content_events(self, content_iterable):
# Header events
yield yaml.StreamStartEvent()
yield yaml.DocumentStartEvent()
yield yaml.MappingStartEvent(None, None, True, flow_style=False)
# Content events
for key, value in content_iterable:
yield yaml.ScalarEvent(None, None, (True, False), key)
if isinstance(value, dict):
yield from _yaml_content_events(dict(
(k, v)
for k, v in value.items()
if k not in self.CASE_PRIMARY_KEYS
))
elif callable(getattr(value, 'case_data_events')):
yield yaml.MappingStartEvent(None, None, True, flow_style=False)
yield from value.case_data_events()
yield yaml.MappingEndEvent()
else:
yield yaml.MappingStartEvent(None, None, True, flow_style=False)
yield from value
yield yaml.MappingEndEvent()
# Tail events
yield yaml.MappingEndEvent()
yield yaml.DocumentEndEvent()
yield yaml.StreamEndEvent()
class HTTPCaseAugmenter(CaseAugmenter):
"""A :class:`.CaseAugmenter` subclass for augmenting HTTP test cases"""
CASE_PRIMARY_KEYS = frozenset((
'url', 'method', 'request body',
))
class RPCCaseAugmenter(CaseAugmenter):
"""A :class:`.CaseAugmenter` subclass for augmenting RPC test cases"""
CASE_PRIMARY_KEYS = frozenset((
'endpoint', 'request parameters',
))
class UpdateExtender:
safe_loading = True
def __init__(self, file_name_base, case_augmenter, *, safe_loading=None):
super().__init__()
if safe_loading is not None and safe_loading is not self.safe_loading:
self.safe_loading = safe_loading
self._file_name = os.path.join(
case_augmenter.augmentation_data_dir,
file_name_base + case_augmenter.UPDATE_FILE_EXT
)
self._case_augmenter = case_augmenter
@property
def file_name(self):
return self._file_name
def with_current_augmentation(self, stream):
"""Append the full test case with its current augmentation data to the target file
:param stream:
A file-like object (which could be passed to :func:`yaml.parse`)
The *stream* contains YAML identifying the test case in question. The
identifying YAML from the test case _plus_ the augmentative key/value
pairs as currently defined in the augmenting data files will be written
to the file :attr:`file_name`.
"""
if stream.isatty():
print("Input test cases from interface, ending with a line containing only '...':")
buffered_input = StringIO()
for line in stream:
if line.rstrip() == "...":
break
buffered_input.write(line)
buffered_input.seek(0)
stream = buffered_input
id_list_reader = CaseIdListReader(self._case_augmenter.CASE_PRIMARY_KEYS, safe_loading=self.safe_loading)
for event in yaml.parse(stream):
test_case = id_list_reader.read(event)
if test_case is None:
continue
# Look up augmentation for case_id
case_as_currently_augmented_events = (
self._case_augmenter.augmented_test_case_events(*test_case)
)
# Append augmentation case to self.file_name
with open(self.file_name, 'ab') as outstream:
yaml.emit(
self._case_yaml_events(case_as_currently_augmented_events),
outstream,
)
def _case_yaml_events(self, content_events):
yield yaml.StreamStartEvent()
yield yaml.DocumentStartEvent(explicit=True)
yield yaml.SequenceStartEvent(None, None, implicit=True, flow_style=False)
yield from content_events
yield yaml.SequenceEndEvent()
yield yaml.DocumentEndEvent()
yield yaml.StreamEndEvent()
| 40.247019
| 132
| 0.634201
| 21,216
| 0.898032
| 6,315
| 0.267302
| 1,970
| 0.083386
| 0
| 0
| 9,349
| 0.395725
|
5945f3b8e933ce01f957d7f582aa80cb9b902687
| 1,283
|
py
|
Python
|
2020/03/day3.py
|
AlbertVeli/AdventOfCode
|
3d3473695318a0686fac720a1a21dd3629f09e33
|
[
"Unlicense"
] | null | null | null |
2020/03/day3.py
|
AlbertVeli/AdventOfCode
|
3d3473695318a0686fac720a1a21dd3629f09e33
|
[
"Unlicense"
] | null | null | null |
2020/03/day3.py
|
AlbertVeli/AdventOfCode
|
3d3473695318a0686fac720a1a21dd3629f09e33
|
[
"Unlicense"
] | 1
|
2021-12-04T10:37:09.000Z
|
2021-12-04T10:37:09.000Z
|
#!/usr/bin/env python3
# Day 3, with some speed optimizations
# Not really necessary for day 3, but probably later
import sys
import typing
import array
if len(sys.argv) != 2:
print('Usage:', sys.argv[0], '<input.txt>')
sys.exit(1)
width = 0
heigth = 0
# Use 1-d array of bytes to keep pixels
def read_input(fname: str) -> array.array[int]:
global width
global heigth
a = array.array('b')
width = len(open(fname).readline().rstrip())
for line in open(fname).read().splitlines():
heigth += 1
for c in line:
# Each pixel is True or False
a.append(c == '#')
return a
a = read_input(sys.argv[1])
# for faster x,y lookup in a
ytab = array.array('I')
for y in range(heigth):
ytab.append(y * width)
def get_pixel(x: int, y: int) -> int:
return a[(x % width) + ytab[y]]
def slope(dx: int, dy: int) -> int:
x = 0
y = 0
trees = 0
while True:
x += dx
y += dy
if y >= heigth:
break
if get_pixel(x, y) == True:
trees += 1
return trees
# part 1
print(slope(3, 1))
# part 2
slopes = [
(1,1),
(3,1),
(5,1),
(7,1),
(1,2)
]
f = 1
for s in slopes:
f *= slope(s[0], s[1])
print(f)
| 18.070423
| 52
| 0.533125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 254
| 0.197973
|
59470f4e50387be73fea566efd45c232849a6813
| 226
|
py
|
Python
|
Introduction to Computer Science and Programing Using Python/Exercises/Week 2 - Function, Strings and Alogorithms/Bisection Search.py
|
Dittz/Learning_Python
|
4c0c97075ef5e1717f82e2cf24b0587f0c8504f5
|
[
"MIT"
] | null | null | null |
Introduction to Computer Science and Programing Using Python/Exercises/Week 2 - Function, Strings and Alogorithms/Bisection Search.py
|
Dittz/Learning_Python
|
4c0c97075ef5e1717f82e2cf24b0587f0c8504f5
|
[
"MIT"
] | null | null | null |
Introduction to Computer Science and Programing Using Python/Exercises/Week 2 - Function, Strings and Alogorithms/Bisection Search.py
|
Dittz/Learning_Python
|
4c0c97075ef5e1717f82e2cf24b0587f0c8504f5
|
[
"MIT"
] | null | null | null |
x = 23
epsilon = 0.001
guess = x/2
tries = 0
while abs(guess**2- x) >= epsilon:
if guess**2 > x:
guess /=2
else:
guess *=1.5
tries +=1
print(f'Number of tries: {tries}')
print(f'Guess = {guess}')
| 15.066667
| 34
| 0.535398
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.199115
|
5949927fb2326eb76fbf268aa983df1f7b22c9a8
| 6,223
|
py
|
Python
|
SB_Admin_2/templates/dashboard.py
|
Softyy/sb-admin-2-dash
|
c57d46fd7f1703696fdd96a7b834beb32ab8a4aa
|
[
"MIT"
] | null | null | null |
SB_Admin_2/templates/dashboard.py
|
Softyy/sb-admin-2-dash
|
c57d46fd7f1703696fdd96a7b834beb32ab8a4aa
|
[
"MIT"
] | null | null | null |
SB_Admin_2/templates/dashboard.py
|
Softyy/sb-admin-2-dash
|
c57d46fd7f1703696fdd96a7b834beb32ab8a4aa
|
[
"MIT"
] | null | null | null |
import dash_core_components as dcc
import dash_html_components as html
from .layouts.info_card import render as info_card
from .layouts.graph_wrapper import render as graph_wrapper
from .layouts.project_bar import render as project_bar
from .layouts.color_card import render as color_card
from ..data_retrievers.dummy import create_bar_figure, create_line_figure
from ..consts import COLOR, DUMMY_PROJECTS
def render():
return html.Div(className="container-fluid", children=[
# Page Heading
html.Div([
html.H1("Dashboard", className="h3 mb-0 text-gray-800"),
html.A([html.I(className="fas fa-download fa-sm text-white-50"), "Generate Report"],
className="d-none d-sm-inline-block btn btn-sm btn-primary shadow-sm", href="#")
],
className="d-sm-flex align-items-center justify-content-between mb-4"
),
# Content Row
html.Div(className="row", children=[
info_card("Earnings (Monthly)", "$40,000",
"fa-calendar", "primary"),
info_card("Earnings (Annual)", "$215,000",
"fa-dollar-sign", "success"),
info_card("Tasks", display_value=50, icon="fa-clipboard-list",
color="info", progress_bar=True),
info_card("Pending Requests", display_value=18, icon="fa-comments",
color="warning"),
]),
html.Div(className="row", children=[
graph_wrapper("Earnings Overview", 8, 7, "lineGraphDropdownMenuLink",
dcc.Graph(figure=create_line_figure(),
config={'displayModeBar': False})),
graph_wrapper("Revenue Sources", 4, 5, 'pieChartDropdownMenuLink',
dcc.Graph(figure=create_bar_figure(),
config={'displayModeBar': False}))
]),
html.Div(className="row", children=[
# Content Column
html.Div(
className="col-lg-6 mb-4",
children=[
# Project Card Example
html.Div(
className="card shadow mb-4",
children=[
html.Div(
className="card-header py-3",
children=html.H6(
"Projects", className="m-0 font-weight-bold text-primary")
),
html.Div(
className="card-body",
children=[project_bar(p[0], p[1])
for p in DUMMY_PROJECTS]
)
]
),
# Color System
html.Div(
className="row",
children=[color_card(color, color, COLOR[color])
for color in COLOR]
)
]
),
html.Div(
className="col-lg-6 mb-4",
children=[
# Illustrations
html.Div(
className="card shadow mb-4",
children=[
html.Div(
className="card-header py-3",
children=html.H6(
"Illustrations", className="m-0 font-weight-bold text-primary")
),
html.Div(
className="card-body",
children=[
html.Div(
className="text-center",
children=html.Img(
className="img-fluid px-3 px-sm-4 mt-3 mb-4",
style={"width": "25rem"},
src="/assets/img/undraw_posting_photo.svg"
)
),
html.P([
"Add some quality, svg illustrations to your project courtesy of ",
html.A(
"unDraw", target="_blank", rel="nofollow", href="https://undraw.co/"),
" a constantly updated collection of beautiful svg images that you can use completely free and without attribution!"]),
html.A("Browse Illustrations on unDraw", target="_blank",
rel="nofollow", href="https://undraw.co/")
]
)
]
),
# Approach
html.Div(
className="card shadow mb-4",
children=[
html.Div(
className="card-header py-3",
children=html.H6(
"Development Approach", className="m-0 font-weight-bold text-primary")
),
html.Div(
className="card-body",
children=[
html.P("SB Admin 2 makes extensive use of Bootstrap 4 utility classes in order to reduce CSS bloat and poor page performance. Custom CSS classes are used to create custom components and custom utility classes."),
html.P(
"Before working with this theme, you should become familiar with the Bootstrap framework, especially the utility classes.", className="mb-0")
]
)
]
)
]
)
])
])
| 44.134752
| 248
| 0.415877
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,699
| 0.273019
|
594b9e391b71aa4e58f65f8b436f15f1fdaebd0a
| 2,440
|
py
|
Python
|
tests/unit/test_refresh_utils.py
|
anukaal/cloud-sql-python-connector
|
e8799c7de46dbe11a91a9a29173a5cfd279a561d
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_refresh_utils.py
|
anukaal/cloud-sql-python-connector
|
e8799c7de46dbe11a91a9a29173a5cfd279a561d
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_refresh_utils.py
|
anukaal/cloud-sql-python-connector
|
e8799c7de46dbe11a91a9a29173a5cfd279a561d
|
[
"Apache-2.0"
] | null | null | null |
""""
Copyright 2021 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Any
import aiohttp
import google.auth
import pytest # noqa F401 Needed to run the tests
from google.cloud.sql.connector.refresh_utils import _get_ephemeral, _get_metadata
from google.cloud.sql.connector.utils import generate_keys
@pytest.mark.asyncio
async def test_get_ephemeral(connect_string: str) -> None:
"""
Test to check whether _get_ephemeral runs without problems given a valid
connection string.
"""
project = connect_string.split(":")[0]
instance = connect_string.split(":")[2]
credentials, project = google.auth.default(
scopes=[
"https://www.googleapis.com/auth/sqlservice.admin",
"https://www.googleapis.com/auth/cloud-platform",
]
)
_, pub_key = await generate_keys()
async with aiohttp.ClientSession() as client_session:
result: Any = await _get_ephemeral(
client_session, credentials, project, instance, pub_key
)
result = result.split("\n")
assert (
result[0] == "-----BEGIN CERTIFICATE-----"
and result[len(result) - 1] == "-----END CERTIFICATE-----"
)
@pytest.mark.asyncio
async def test_get_metadata(connect_string: str) -> None:
"""
Test to check whether _get_metadata runs without problems given a valid
connection string.
"""
project = connect_string.split(":")[0]
instance = connect_string.split(":")[2]
credentials, project = google.auth.default(
scopes=[
"https://www.googleapis.com/auth/sqlservice.admin",
"https://www.googleapis.com/auth/cloud-platform",
]
)
async with aiohttp.ClientSession() as client_session:
result = await _get_metadata(client_session, credentials, project, instance)
assert result["ip_addresses"] is not None and isinstance(
result["server_ca_cert"], str
)
| 30.123457
| 84
| 0.690164
| 0
| 0
| 0
| 0
| 1,623
| 0.665164
| 1,581
| 0.647951
| 1,112
| 0.455738
|
594bfb1a451c5278cb6eb0568922591b031e3438
| 105
|
py
|
Python
|
office365/sharepoint/search/query/popularTenantQuery.py
|
wreiner/Office365-REST-Python-Client
|
476bbce4f5928a140b4f5d33475d0ac9b0783530
|
[
"MIT"
] | 544
|
2016-08-04T17:10:16.000Z
|
2022-03-31T07:17:20.000Z
|
office365/sharepoint/search/query/popularTenantQuery.py
|
wreiner/Office365-REST-Python-Client
|
476bbce4f5928a140b4f5d33475d0ac9b0783530
|
[
"MIT"
] | 438
|
2016-10-11T12:24:22.000Z
|
2022-03-31T19:30:35.000Z
|
office365/sharepoint/search/query/popularTenantQuery.py
|
wreiner/Office365-REST-Python-Client
|
476bbce4f5928a140b4f5d33475d0ac9b0783530
|
[
"MIT"
] | 202
|
2016-08-22T19:29:40.000Z
|
2022-03-30T20:26:15.000Z
|
from office365.runtime.client_value import ClientValue
class PopularTenantQuery(ClientValue):
pass
| 17.5
| 54
| 0.828571
| 47
| 0.447619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
594c04608e796ac9b1ce2395563d5fd38205ff2d
| 1,320
|
py
|
Python
|
blog/urls.py
|
MaryamKia/blog
|
5274fda9fa67d20f48b0554bd9659f54221ae423
|
[
"MIT"
] | null | null | null |
blog/urls.py
|
MaryamKia/blog
|
5274fda9fa67d20f48b0554bd9659f54221ae423
|
[
"MIT"
] | 10
|
2020-02-12T00:42:03.000Z
|
2022-01-13T01:20:37.000Z
|
blog/urls.py
|
PilaPont/blog
|
61eb5cf30fe9937b4d0c85eb319854946df69a27
|
[
"MIT"
] | null | null | null |
"""blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.urls import path
from django.contrib import admin
from django.conf.urls import include
from rest_framework_jwt.views import obtain_jwt_token
urlpatterns = [
path('admin/', admin.site.urls),
path('posts/', include(('posts.urls', 'posts'), namespace='posts')),
path('api/auth/login/', obtain_jwt_token, name='api-login'),
path('api/posts/', include(('posts.api.urls', 'posts'), namespace='api-posts')),
path('api/comments/', include(('comments.api.urls', 'comments'), namespace='comments-api')),
path('api/accounts/', include(('accounts.api.urls', 'comments'), namespace='accounts-api')),
path('api-auth/', include('rest_framework.urls')),
]
| 42.580645
| 96
| 0.696212
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 901
| 0.682576
|