hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f47ed8fe6020022d7b1589221f9e2222859d8ce5
| 4,785
|
py
|
Python
|
sagas/ofbiz/schema_testing.py
|
samlet/stack
|
47db17fd4fdab264032f224dca31a4bb1d19b754
|
[
"Apache-2.0"
] | 3
|
2020-01-11T13:55:38.000Z
|
2020-08-25T22:34:15.000Z
|
sagas/ofbiz/schema_testing.py
|
samlet/stack
|
47db17fd4fdab264032f224dca31a4bb1d19b754
|
[
"Apache-2.0"
] | null | null | null |
sagas/ofbiz/schema_testing.py
|
samlet/stack
|
47db17fd4fdab264032f224dca31a4bb1d19b754
|
[
"Apache-2.0"
] | 1
|
2021-01-01T05:21:44.000Z
|
2021-01-01T05:21:44.000Z
|
import json
import graphene
from sagas.ofbiz.schema_queries_g import *
from sagas.ofbiz.connector import OfbizConnector
from sagas.ofbiz.finder import Finder
from sagas.ofbiz.util import QueryHelper
oc=OfbizConnector()
finder=Finder(oc)
helper=QueryHelper(oc, finder)
class Query(graphene.ObjectType):
testing = graphene.List(lambda: Testing)
testing_node = graphene.List(lambda: TestingNode)
def resolve_testing(self, info):
entity_name = "Testing"
recs = oc.all(entity_name)
ent = oc.delegator.getModelEntity(entity_name)
result = helper.fill_records(ent, Testing, recs)
return result
def resolve_testing_node(self, info):
# print("query testing_node")
entity_name = "TestingNode"
recs = oc.all(entity_name)
ent = oc.delegator.getModelEntity(entity_name)
result = helper.fill_records(ent, TestingNode, recs)
return result
schema = graphene.Schema(query=Query)
q1 = '''
{
testing {
testingId
testingName
testingTypeId
testingType{
lastUpdatedTxStamp
description
}
}
}
'''.strip()
q2 = '''
{
testingNode {
testingNodeId
testingNodeMember{
testingNodeId
testingId
}
}
}
'''.strip()
def clear_all():
oc.delegator.removeAll("TestingNodeMember")
oc.delegator.removeAll("TestingNode")
oc.delegator.removeAll("Testing")
oc.delegator.removeAll("TestingType")
def prepare():
create = finder.create
UtilDateTime = finder.oc.j.UtilDateTime
nowTimestamp = finder.now()
create("TestingType", "testingTypeId", "PERFOMFINDTEST")
create("Testing", "testingId", "PERF_TEST_1", "testingTypeId", "PERFOMFINDTEST", "testingName", "nice name one")
create("Testing", "testingId", "PERF_TEST_2", "testingTypeId", "PERFOMFINDTEST", "testingName",
"nice other name two")
create("Testing", "testingId", "PERF_TEST_3", "testingTypeId", "PERFOMFINDTEST", "testingName", "medium name three")
create("Testing", "testingId", "PERF_TEST_4", "testingTypeId", "PERFOMFINDTEST", "testingName", "bad nme four")
create("Testing", "testingId", "PERF_TEST_5", "testingTypeId", "PERFOMFINDTEST", "testingName", "nice name one")
create("Testing", "testingId", "PERF_TEST_6", "testingTypeId", "PERFOMFINDTEST")
create("Testing", "testingId", "PERF_TEST_7", "testingTypeId", "PERFOMFINDTEST")
create("Testing", "testingId", "PERF_TEST_8", "testingTypeId", "PERFOMFINDTEST")
create("Testing", "testingId", "PERF_TEST_9", "testingTypeId", "PERFOMFINDTEST")
create("TestingNode", "testingNodeId", "NODE_1", "description", "Date Node")
create("TestingNodeMember", "testingNodeId", "NODE_1", "testingId", "PERF_TEST_5",
"fromDate", UtilDateTime.addDaysToTimestamp(nowTimestamp, 1),
"thruDate", UtilDateTime.addDaysToTimestamp(nowTimestamp, 3),
"extendFromDate", UtilDateTime.addDaysToTimestamp(nowTimestamp, -1),
"extendThruDate", UtilDateTime.addDaysToTimestamp(nowTimestamp, 3))
create("TestingNodeMember", "testingNodeId", "NODE_1", "testingId", "PERF_TEST_6",
"fromDate", UtilDateTime.addDaysToTimestamp(nowTimestamp, -1),
"thruDate", UtilDateTime.addDaysToTimestamp(nowTimestamp, 1),
"extendFromDate", UtilDateTime.addDaysToTimestamp(nowTimestamp, -1),
"extendThruDate", UtilDateTime.addDaysToTimestamp(nowTimestamp, 3))
create("TestingNodeMember", "testingNodeId", "NODE_1", "testingId", "PERF_TEST_7",
"fromDate", UtilDateTime.addDaysToTimestamp(nowTimestamp, -1),
"thruDate", UtilDateTime.addDaysToTimestamp(nowTimestamp, 1),
"extendFromDate", UtilDateTime.addDaysToTimestamp(nowTimestamp, -1),
"extendThruDate", UtilDateTime.addDaysToTimestamp(nowTimestamp, 3))
create("TestingNodeMember", "testingNodeId", "NODE_1", "testingId", "PERF_TEST_8",
"fromDate", UtilDateTime.addDaysToTimestamp(nowTimestamp, -3),
"thruDate", UtilDateTime.addDaysToTimestamp(nowTimestamp, 1),
"extendFromDate", UtilDateTime.addDaysToTimestamp(nowTimestamp, -1),
"extendThruDate", UtilDateTime.addDaysToTimestamp(nowTimestamp, 3))
create("TestingNodeMember", "testingNodeId", "NODE_1", "testingId", "PERF_TEST_9",
"fromDate", UtilDateTime.addDaysToTimestamp(nowTimestamp, -3),
"thruDate", UtilDateTime.addDaysToTimestamp(nowTimestamp, -1),
"extendFromDate", UtilDateTime.addDaysToTimestamp(nowTimestamp, -1),
"extendThruDate", UtilDateTime.addDaysToTimestamp(nowTimestamp, -3))
if __name__ == '__main__':
clear_all()
prepare()
result = schema.execute(q2)
print(json.dumps(result.data, indent=2, ensure_ascii=False))
| 40.550847
| 120
| 0.694671
|
495eff15908c618c61d080dde8bbabe3ef2c3139
| 1,582
|
py
|
Python
|
speedo_server/database/tables.py
|
nokx5/speedo
|
23d42bd6148eef845d7f11c992b17ae1bc2fbd6d
|
[
"MIT"
] | null | null | null |
speedo_server/database/tables.py
|
nokx5/speedo
|
23d42bd6148eef845d7f11c992b17ae1bc2fbd6d
|
[
"MIT"
] | null | null | null |
speedo_server/database/tables.py
|
nokx5/speedo
|
23d42bd6148eef845d7f11c992b17ae1bc2fbd6d
|
[
"MIT"
] | null | null | null |
# mypy: ignore-errors
from sqlalchemy import MetaData, Table, Column, ForeignKey, DateTime, String, Integer
from sqlalchemy import UniqueConstraint
from sqlalchemy.sql.functions import now
metadata = MetaData()
User = Table(
"speedo_user",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(64), unique=True, nullable=False),
)
Project = Table(
"project",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(128), unique=True, nullable=False),
)
Tag = Table(
"tag",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(256), unique=True, nullable=False),
)
Submission = Table(
"submission",
metadata,
Column("id", Integer, primary_key=True),
Column("uid", String, unique=True, index=True),
Column(
"timestamp",
DateTime(timezone=True),
index=True,
server_default=now(),
nullable=False,
),
Column("project_id", None, ForeignKey("project.id"), nullable=False),
Column("user_id", None, ForeignKey("speedo_user.id"), nullable=False),
Column("configuration", String(256), nullable=True),
Column("message", String, nullable=False),
UniqueConstraint("uid", "project_id", "user_id"),
)
TagRelation = Table(
"relation_tag_submission",
metadata,
Column("id", Integer, primary_key=True),
Column("tag_id", None, ForeignKey("tag.id"), nullable=False, unique=False),
Column(
"submission_id", None, ForeignKey("submission.id"), nullable=False, unique=False
),
)
| 26.813559
| 88
| 0.66182
|
bc7e6b08e087ade969eb9b21b4c4b55be082e45d
| 1,668
|
py
|
Python
|
xpresso/binders/_openapi_providers/params/query.py
|
graingert/xpresso
|
217ae3ca0e1f2d1d69bbb0376e8aab6decd64d6c
|
[
"MIT"
] | null | null | null |
xpresso/binders/_openapi_providers/params/query.py
|
graingert/xpresso
|
217ae3ca0e1f2d1d69bbb0376e8aab6decd64d6c
|
[
"MIT"
] | null | null | null |
xpresso/binders/_openapi_providers/params/query.py
|
graingert/xpresso
|
217ae3ca0e1f2d1d69bbb0376e8aab6decd64d6c
|
[
"MIT"
] | null | null | null |
from pydantic.schema import field_schema, get_flat_models_from_field, get_model_name_map
from xpresso.binders._openapi_providers.api import ModelNameMap, Schemas
from xpresso.binders._openapi_providers.params.base import (
OpenAPIParameterBase,
OpenAPIParameterMarkerBase,
)
from xpresso.openapi import models as openapi_models
from xpresso.openapi.constants import REF_PREFIX
class OpenAPIQueryParameter(OpenAPIParameterBase):
def get_openapi(
self, model_name_map: ModelNameMap, schemas: Schemas
) -> openapi_models.ConcreteParameter:
model_name_map.update(
get_model_name_map(
get_flat_models_from_field(
self.field,
model_name_map.keys(), # type: ignore[arg-type]
)
)
)
schema, refs, _ = field_schema(
self.field,
by_alias=True,
ref_prefix=REF_PREFIX,
model_name_map=model_name_map,
)
schemas.update(refs)
return openapi_models.Query(
description=self.description or self.field.field_info.description, # type: ignore[arg-type]
required=self.required,
deprecated=self.deprecated,
style=self.style, # type: ignore[arg-type]
explode=self.explode,
schema=openapi_models.Schema(
**schema, nullable=self.field.allow_none or None
),
examples=self.examples, # type: ignore[arg-type]
name=self.name,
)
class OpenAPIQueryParameterMarker(OpenAPIParameterMarkerBase):
cls = OpenAPIQueryParameter
in_ = "header"
| 34.75
| 104
| 0.648082
|
684a12b8d278b7fdf8787369bccbe080e6ba8c53
| 137
|
py
|
Python
|
tests/__init__.py
|
hawkeone/PlexTraktSync
|
cc5505c2035244d4b0ef83b8488d9ab45c007ac0
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
hawkeone/PlexTraktSync
|
cc5505c2035244d4b0ef83b8488d9ab45c007ac0
|
[
"MIT"
] | 24
|
2021-08-11T08:19:09.000Z
|
2022-03-31T08:23:56.000Z
|
tests/__init__.py
|
7a6163/PlexTraktSync
|
74319e4750343ef37107e96d92ff8bdb3aac95cc
|
[
"MIT"
] | null | null | null |
import sys
from os.path import dirname, abspath
# Add our module to system path
sys.path.insert(0, dirname(dirname(abspath(__file__))))
| 22.833333
| 55
| 0.773723
|
5290b1c725f5250df8c1285abd33e81faa6905a0
| 295
|
py
|
Python
|
app/auth/serializers.py
|
AuFeld/COAG
|
3874a9c1c6ceb908a6bbabfb49e2c701d8e54f20
|
[
"MIT"
] | 1
|
2021-06-03T10:29:12.000Z
|
2021-06-03T10:29:12.000Z
|
app/auth/serializers.py
|
AuFeld/COAG
|
3874a9c1c6ceb908a6bbabfb49e2c701d8e54f20
|
[
"MIT"
] | 45
|
2021-06-05T14:47:09.000Z
|
2022-03-30T06:16:44.000Z
|
app/auth/serializers.py
|
AuFeld/COAG
|
3874a9c1c6ceb908a6bbabfb49e2c701d8e54f20
|
[
"MIT"
] | null | null | null |
from app.auth.models import Token
from app.serializers.common import ModelSerializer
class TokenSerializer(ModelSerializer):
"""
Serializer for the default `Token` model.
Use it if you use default model.
"""
class Meta:
model = Token
exclude = {"user_id"}
| 21.071429
| 50
| 0.674576
|
6b3b12eccc299d4ecf872ef389337441d725f401
| 16,671
|
py
|
Python
|
Lib/test/test_zlib.py
|
deadsnakes/python2.5
|
d5dbcd8556f1e45094bd383b50727e248d9de1bf
|
[
"PSF-2.0"
] | null | null | null |
Lib/test/test_zlib.py
|
deadsnakes/python2.5
|
d5dbcd8556f1e45094bd383b50727e248d9de1bf
|
[
"PSF-2.0"
] | 1
|
2021-04-11T15:01:12.000Z
|
2021-04-11T15:01:12.000Z
|
Lib/test/test_zlib.py
|
deadsnakes/python2.5
|
d5dbcd8556f1e45094bd383b50727e248d9de1bf
|
[
"PSF-2.0"
] | 2
|
2016-04-04T14:31:33.000Z
|
2018-03-09T22:39:02.000Z
|
import unittest
from test import test_support
import zlib
import random
# print test_support.TESTFN
def getbuf():
# This was in the original. Avoid non-repeatable sources.
# Left here (unused) in case something wants to be done with it.
import imp
try:
t = imp.find_module('test_zlib')
file = t[0]
except ImportError:
file = open(__file__)
buf = file.read() * 8
file.close()
return buf
class ChecksumTestCase(unittest.TestCase):
# checksum test cases
def test_crc32start(self):
self.assertEqual(zlib.crc32(""), zlib.crc32("", 0))
self.assert_(zlib.crc32("abc", 0xffffffff))
def test_crc32empty(self):
self.assertEqual(zlib.crc32("", 0), 0)
self.assertEqual(zlib.crc32("", 1), 1)
self.assertEqual(zlib.crc32("", 432), 432)
def test_adler32start(self):
self.assertEqual(zlib.adler32(""), zlib.adler32("", 1))
self.assert_(zlib.adler32("abc", 0xffffffff))
def test_adler32empty(self):
self.assertEqual(zlib.adler32("", 0), 0)
self.assertEqual(zlib.adler32("", 1), 1)
self.assertEqual(zlib.adler32("", 432), 432)
def assertEqual32(self, seen, expected):
# 32-bit values masked -- checksums on 32- vs 64- bit machines
# This is important if bit 31 (0x08000000L) is set.
self.assertEqual(seen & 0x0FFFFFFFFL, expected & 0x0FFFFFFFFL)
def test_penguins(self):
self.assertEqual32(zlib.crc32("penguin", 0), 0x0e5c1a120L)
self.assertEqual32(zlib.crc32("penguin", 1), 0x43b6aa94)
self.assertEqual32(zlib.adler32("penguin", 0), 0x0bcf02f6)
self.assertEqual32(zlib.adler32("penguin", 1), 0x0bd602f7)
self.assertEqual(zlib.crc32("penguin"), zlib.crc32("penguin", 0))
self.assertEqual(zlib.adler32("penguin"),zlib.adler32("penguin",1))
class ExceptionTestCase(unittest.TestCase):
# make sure we generate some expected errors
def test_bigbits(self):
# specifying total bits too large causes an error
self.assertRaises(zlib.error,
zlib.compress, 'ERROR', zlib.MAX_WBITS + 1)
def test_badcompressobj(self):
# verify failure on building compress object with bad params
self.assertRaises(ValueError, zlib.compressobj, 1, zlib.DEFLATED, 0)
def test_baddecompressobj(self):
# verify failure on building decompress object with bad params
self.assertRaises(ValueError, zlib.decompressobj, 0)
def test_decompressobj_badflush(self):
# verify failure on calling decompressobj.flush with bad params
self.assertRaises(ValueError, zlib.decompressobj().flush, 0)
self.assertRaises(ValueError, zlib.decompressobj().flush, -1)
class CompressTestCase(unittest.TestCase):
# Test compression in one go (whole message compression)
def test_speech(self):
x = zlib.compress(HAMLET_SCENE)
self.assertEqual(zlib.decompress(x), HAMLET_SCENE)
def test_speech128(self):
# compress more data
data = HAMLET_SCENE * 128
x = zlib.compress(data)
self.assertEqual(zlib.decompress(x), data)
class CompressObjectTestCase(unittest.TestCase):
# Test compression object
def test_pair(self):
# straightforward compress/decompress objects
data = HAMLET_SCENE * 128
co = zlib.compressobj()
x1 = co.compress(data)
x2 = co.flush()
self.assertRaises(zlib.error, co.flush) # second flush should not work
dco = zlib.decompressobj()
y1 = dco.decompress(x1 + x2)
y2 = dco.flush()
self.assertEqual(data, y1 + y2)
def test_compressoptions(self):
# specify lots of options to compressobj()
level = 2
method = zlib.DEFLATED
wbits = -12
memlevel = 9
strategy = zlib.Z_FILTERED
co = zlib.compressobj(level, method, wbits, memlevel, strategy)
x1 = co.compress(HAMLET_SCENE)
x2 = co.flush()
dco = zlib.decompressobj(wbits)
y1 = dco.decompress(x1 + x2)
y2 = dco.flush()
self.assertEqual(HAMLET_SCENE, y1 + y2)
def test_compressincremental(self):
# compress object in steps, decompress object as one-shot
data = HAMLET_SCENE * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), 256):
bufs.append(co.compress(data[i:i+256]))
bufs.append(co.flush())
combuf = ''.join(bufs)
dco = zlib.decompressobj()
y1 = dco.decompress(''.join(bufs))
y2 = dco.flush()
self.assertEqual(data, y1 + y2)
def test_decompinc(self, flush=False, source=None, cx=256, dcx=64):
# compress object in steps, decompress object in steps
source = source or HAMLET_SCENE
data = source * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), cx):
bufs.append(co.compress(data[i:i+cx]))
bufs.append(co.flush())
combuf = ''.join(bufs)
self.assertEqual(data, zlib.decompress(combuf))
dco = zlib.decompressobj()
bufs = []
for i in range(0, len(combuf), dcx):
bufs.append(dco.decompress(combuf[i:i+dcx]))
self.assertEqual('', dco.unconsumed_tail, ########
"(A) uct should be '': not %d long" %
len(dco.unconsumed_tail))
if flush:
bufs.append(dco.flush())
else:
while True:
chunk = dco.decompress('')
if chunk:
bufs.append(chunk)
else:
break
self.assertEqual('', dco.unconsumed_tail, ########
"(B) uct should be '': not %d long" %
len(dco.unconsumed_tail))
self.assertEqual(data, ''.join(bufs))
# Failure means: "decompressobj with init options failed"
def test_decompincflush(self):
self.test_decompinc(flush=True)
def test_decompimax(self, source=None, cx=256, dcx=64):
# compress in steps, decompress in length-restricted steps
source = source or HAMLET_SCENE
# Check a decompression object with max_length specified
data = source * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), cx):
bufs.append(co.compress(data[i:i+cx]))
bufs.append(co.flush())
combuf = ''.join(bufs)
self.assertEqual(data, zlib.decompress(combuf),
'compressed data failure')
dco = zlib.decompressobj()
bufs = []
cb = combuf
while cb:
#max_length = 1 + len(cb)//10
chunk = dco.decompress(cb, dcx)
self.failIf(len(chunk) > dcx,
'chunk too big (%d>%d)' % (len(chunk), dcx))
bufs.append(chunk)
cb = dco.unconsumed_tail
bufs.append(dco.flush())
self.assertEqual(data, ''.join(bufs), 'Wrong data retrieved')
def test_decompressmaxlen(self, flush=False):
# Check a decompression object with max_length specified
data = HAMLET_SCENE * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), 256):
bufs.append(co.compress(data[i:i+256]))
bufs.append(co.flush())
combuf = ''.join(bufs)
self.assertEqual(data, zlib.decompress(combuf),
'compressed data failure')
dco = zlib.decompressobj()
bufs = []
cb = combuf
while cb:
max_length = 1 + len(cb)//10
chunk = dco.decompress(cb, max_length)
self.failIf(len(chunk) > max_length,
'chunk too big (%d>%d)' % (len(chunk),max_length))
bufs.append(chunk)
cb = dco.unconsumed_tail
if flush:
bufs.append(dco.flush())
else:
while chunk:
chunk = dco.decompress('', max_length)
self.failIf(len(chunk) > max_length,
'chunk too big (%d>%d)' % (len(chunk),max_length))
bufs.append(chunk)
self.assertEqual(data, ''.join(bufs), 'Wrong data retrieved')
def test_decompressmaxlenflush(self):
self.test_decompressmaxlen(flush=True)
def test_maxlenmisc(self):
# Misc tests of max_length
dco = zlib.decompressobj()
self.assertRaises(ValueError, dco.decompress, "", -1)
self.assertEqual('', dco.unconsumed_tail)
def test_flushes(self):
# Test flush() with the various options, using all the
# different levels in order to provide more variations.
sync_opt = ['Z_NO_FLUSH', 'Z_SYNC_FLUSH', 'Z_FULL_FLUSH']
sync_opt = [getattr(zlib, opt) for opt in sync_opt
if hasattr(zlib, opt)]
data = HAMLET_SCENE * 8
for sync in sync_opt:
for level in range(10):
obj = zlib.compressobj( level )
a = obj.compress( data[:3000] )
b = obj.flush( sync )
c = obj.compress( data[3000:] )
d = obj.flush()
self.assertEqual(zlib.decompress(''.join([a,b,c,d])),
data, ("Decompress failed: flush "
"mode=%i, level=%i") % (sync, level))
del obj
def test_odd_flush(self):
# Test for odd flushing bugs noted in 2.0, and hopefully fixed in 2.1
import random
if hasattr(zlib, 'Z_SYNC_FLUSH'):
# Testing on 17K of "random" data
# Create compressor and decompressor objects
co = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
dco = zlib.decompressobj()
# Try 17K of data
# generate random data stream
try:
# In 2.3 and later, WichmannHill is the RNG of the bug report
gen = random.WichmannHill()
except AttributeError:
try:
# 2.2 called it Random
gen = random.Random()
except AttributeError:
# others might simply have a single RNG
gen = random
gen.seed(1)
data = genblock(1, 17 * 1024, generator=gen)
# compress, sync-flush, and decompress
first = co.compress(data)
second = co.flush(zlib.Z_SYNC_FLUSH)
expanded = dco.decompress(first + second)
# if decompressed data is different from the input data, choke.
self.assertEqual(expanded, data, "17K random source doesn't match")
def test_empty_flush(self):
# Test that calling .flush() on unused objects works.
# (Bug #1083110 -- calling .flush() on decompress objects
# caused a core dump.)
co = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
self.failUnless(co.flush()) # Returns a zlib header
dco = zlib.decompressobj()
self.assertEqual(dco.flush(), "") # Returns nothing
if hasattr(zlib.compressobj(), "copy"):
def test_compresscopy(self):
# Test copying a compression object
data0 = HAMLET_SCENE
data1 = HAMLET_SCENE.swapcase()
c0 = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
bufs0 = []
bufs0.append(c0.compress(data0))
c1 = c0.copy()
bufs1 = bufs0[:]
bufs0.append(c0.compress(data0))
bufs0.append(c0.flush())
s0 = ''.join(bufs0)
bufs1.append(c1.compress(data1))
bufs1.append(c1.flush())
s1 = ''.join(bufs1)
self.assertEqual(zlib.decompress(s0),data0+data0)
self.assertEqual(zlib.decompress(s1),data0+data1)
def test_badcompresscopy(self):
# Test copying a compression object in an inconsistent state
c = zlib.compressobj()
c.compress(HAMLET_SCENE)
c.flush()
self.assertRaises(ValueError, c.copy)
if hasattr(zlib.decompressobj(), "copy"):
def test_decompresscopy(self):
# Test copying a decompression object
data = HAMLET_SCENE
comp = zlib.compress(data)
d0 = zlib.decompressobj()
bufs0 = []
bufs0.append(d0.decompress(comp[:32]))
d1 = d0.copy()
bufs1 = bufs0[:]
bufs0.append(d0.decompress(comp[32:]))
s0 = ''.join(bufs0)
bufs1.append(d1.decompress(comp[32:]))
s1 = ''.join(bufs1)
self.assertEqual(s0,s1)
self.assertEqual(s0,data)
def test_baddecompresscopy(self):
# Test copying a compression object in an inconsistent state
data = zlib.compress(HAMLET_SCENE)
d = zlib.decompressobj()
d.decompress(data)
d.flush()
self.assertRaises(ValueError, d.copy)
def genblock(seed, length, step=1024, generator=random):
"""length-byte stream of random data from a seed (in step-byte blocks)."""
if seed is not None:
generator.seed(seed)
randint = generator.randint
if length < step or step < 2:
step = length
blocks = []
for i in range(0, length, step):
blocks.append(''.join([chr(randint(0,255))
for x in range(step)]))
return ''.join(blocks)[:length]
def choose_lines(source, number, seed=None, generator=random):
"""Return a list of number lines randomly chosen from the source"""
if seed is not None:
generator.seed(seed)
sources = source.split('\n')
return [generator.choice(sources) for n in range(number)]
HAMLET_SCENE = """
LAERTES
O, fear me not.
I stay too long: but here my father comes.
Enter POLONIUS
A double blessing is a double grace,
Occasion smiles upon a second leave.
LORD POLONIUS
Yet here, Laertes! aboard, aboard, for shame!
The wind sits in the shoulder of your sail,
And you are stay'd for. There; my blessing with thee!
And these few precepts in thy memory
See thou character. Give thy thoughts no tongue,
Nor any unproportioned thought his act.
Be thou familiar, but by no means vulgar.
Those friends thou hast, and their adoption tried,
Grapple them to thy soul with hoops of steel;
But do not dull thy palm with entertainment
Of each new-hatch'd, unfledged comrade. Beware
Of entrance to a quarrel, but being in,
Bear't that the opposed may beware of thee.
Give every man thy ear, but few thy voice;
Take each man's censure, but reserve thy judgment.
Costly thy habit as thy purse can buy,
But not express'd in fancy; rich, not gaudy;
For the apparel oft proclaims the man,
And they in France of the best rank and station
Are of a most select and generous chief in that.
Neither a borrower nor a lender be;
For loan oft loses both itself and friend,
And borrowing dulls the edge of husbandry.
This above all: to thine ownself be true,
And it must follow, as the night the day,
Thou canst not then be false to any man.
Farewell: my blessing season this in thee!
LAERTES
Most humbly do I take my leave, my lord.
LORD POLONIUS
The time invites you; go; your servants tend.
LAERTES
Farewell, Ophelia; and remember well
What I have said to you.
OPHELIA
'Tis in my memory lock'd,
And you yourself shall keep the key of it.
LAERTES
Farewell.
"""
def test_main():
test_support.run_unittest(
ChecksumTestCase,
ExceptionTestCase,
CompressTestCase,
CompressObjectTestCase
)
if __name__ == "__main__":
test_main()
def test(tests=''):
if not tests: tests = 'o'
testcases = []
if 'k' in tests: testcases.append(ChecksumTestCase)
if 'x' in tests: testcases.append(ExceptionTestCase)
if 'c' in tests: testcases.append(CompressTestCase)
if 'o' in tests: testcases.append(CompressObjectTestCase)
test_support.run_unittest(*testcases)
if False:
import sys
sys.path.insert(1, '/Py23Src/python/dist/src/Lib/test')
import test_zlib as tz
ts, ut = tz.test_support, tz.unittest
su = ut.TestSuite()
su.addTest(ut.makeSuite(tz.CompressTestCase))
ts.run_suite(su)
| 34.232033
| 79
| 0.589707
|
2ec9d30b34678f43aaf792500e05455e545ca98b
| 8,509
|
py
|
Python
|
utils.py
|
xxchenxx/classifier-balancing
|
9ffa6046a64bcac4b8b98ec93f2095900ec6b03b
|
[
"BSD-3-Clause"
] | 734
|
2020-03-19T20:25:47.000Z
|
2022-03-30T03:37:28.000Z
|
utils.py
|
xxchenxx/classifier-balancing
|
9ffa6046a64bcac4b8b98ec93f2095900ec6b03b
|
[
"BSD-3-Clause"
] | 21
|
2020-03-25T07:00:40.000Z
|
2022-01-11T11:52:07.000Z
|
utils.py
|
xxchenxx/classifier-balancing
|
9ffa6046a64bcac4b8b98ec93f2095900ec6b03b
|
[
"BSD-3-Clause"
] | 112
|
2020-03-20T02:05:52.000Z
|
2022-03-28T06:15:27.000Z
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
import numpy as np
import matplotlib.pyplot as plt
import torch
from sklearn.metrics import f1_score
import torch.nn.functional as F
import importlib
import pdb
def source_import(file_path):
"""This function imports python module directly from source code using importlib"""
spec = importlib.util.spec_from_file_location('', file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def batch_show(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.figure(figsize=(20,20))
plt.imshow(inp)
if title is not None:
plt.title(title)
def print_write(print_str, log_file):
print(*print_str)
if log_file is None:
return
with open(log_file, 'a') as f:
print(*print_str, file=f)
def init_weights(model, weights_path, caffe=False, classifier=False):
"""Initialize weights"""
print('Pretrained %s weights path: %s' % ('classifier' if classifier else 'feature model',
weights_path))
weights = torch.load(weights_path)
if not classifier:
if caffe:
weights = {k: weights[k] if k in weights else model.state_dict()[k]
for k in model.state_dict()}
else:
weights = weights['state_dict_best']['feat_model']
weights = {k: weights['module.' + k] if 'module.' + k in weights else model.state_dict()[k]
for k in model.state_dict()}
else:
weights = weights['state_dict_best']['classifier']
weights = {k: weights['module.fc.' + k] if 'module.fc.' + k in weights else model.state_dict()[k]
for k in model.state_dict()}
model.load_state_dict(weights)
return model
def shot_acc (preds, labels, train_data, many_shot_thr=100, low_shot_thr=20, acc_per_cls=False):
if isinstance(train_data, np.ndarray):
training_labels = np.array(train_data).astype(int)
else:
training_labels = np.array(train_data.dataset.labels).astype(int)
if isinstance(preds, torch.Tensor):
preds = preds.detach().cpu().numpy()
labels = labels.detach().cpu().numpy()
elif isinstance(preds, np.ndarray):
pass
else:
raise TypeError('Type ({}) of preds not supported'.format(type(preds)))
train_class_count = []
test_class_count = []
class_correct = []
for l in np.unique(labels):
train_class_count.append(len(training_labels[training_labels == l]))
test_class_count.append(len(labels[labels == l]))
class_correct.append((preds[labels == l] == labels[labels == l]).sum())
many_shot = []
median_shot = []
low_shot = []
for i in range(len(train_class_count)):
if train_class_count[i] > many_shot_thr:
many_shot.append((class_correct[i] / test_class_count[i]))
elif train_class_count[i] < low_shot_thr:
low_shot.append((class_correct[i] / test_class_count[i]))
else:
median_shot.append((class_correct[i] / test_class_count[i]))
if len(many_shot) == 0:
many_shot.append(0)
if len(median_shot) == 0:
median_shot.append(0)
if len(low_shot) == 0:
low_shot.append(0)
if acc_per_cls:
class_accs = [c / cnt for c, cnt in zip(class_correct, test_class_count)]
return np.mean(many_shot), np.mean(median_shot), np.mean(low_shot), class_accs
else:
return np.mean(many_shot), np.mean(median_shot), np.mean(low_shot)
def weighted_shot_acc (preds, labels, ws, train_data, many_shot_thr=100, low_shot_thr=20):
training_labels = np.array(train_data.dataset.labels).astype(int)
if isinstance(preds, torch.Tensor):
preds = preds.detach().cpu().numpy()
labels = labels.detach().cpu().numpy()
elif isinstance(preds, np.ndarray):
pass
else:
raise TypeError('Type ({}) of preds not supported'.format(type(preds)))
train_class_count = []
test_class_count = []
class_correct = []
for l in np.unique(labels):
train_class_count.append(len(training_labels[training_labels == l]))
test_class_count.append(ws[labels==l].sum())
class_correct.append(((preds[labels==l] == labels[labels==l]) * ws[labels==l]).sum())
many_shot = []
median_shot = []
low_shot = []
for i in range(len(train_class_count)):
if train_class_count[i] > many_shot_thr:
many_shot.append((class_correct[i] / test_class_count[i]))
elif train_class_count[i] < low_shot_thr:
low_shot.append((class_correct[i] / test_class_count[i]))
else:
median_shot.append((class_correct[i] / test_class_count[i]))
return np.mean(many_shot), np.mean(median_shot), np.mean(low_shot)
def F_measure(preds, labels, openset=False, theta=None):
if openset:
# f1 score for openset evaluation
true_pos = 0.
false_pos = 0.
false_neg = 0.
for i in range(len(labels)):
true_pos += 1 if preds[i] == labels[i] and labels[i] != -1 else 0
false_pos += 1 if preds[i] != labels[i] and labels[i] != -1 and preds[i] != -1 else 0
false_neg += 1 if preds[i] != labels[i] and labels[i] == -1 else 0
precision = true_pos / (true_pos + false_pos)
recall = true_pos / (true_pos + false_neg)
return 2 * ((precision * recall) / (precision + recall + 1e-12))
else:
# Regular f1 score
return f1_score(labels.detach().cpu().numpy(), preds.detach().cpu().numpy(), average='macro')
def mic_acc_cal(preds, labels):
if isinstance(labels, tuple):
assert len(labels) == 3
targets_a, targets_b, lam = labels
acc_mic_top1 = (lam * preds.eq(targets_a.data).cpu().sum().float() \
+ (1 - lam) * preds.eq(targets_b.data).cpu().sum().float()) / len(preds)
else:
acc_mic_top1 = (preds == labels).sum().item() / len(labels)
return acc_mic_top1
def weighted_mic_acc_cal(preds, labels, ws):
acc_mic_top1 = ws[preds == labels].sum() / ws.sum()
return acc_mic_top1
def class_count (data):
labels = np.array(data.dataset.labels)
class_data_num = []
for l in np.unique(labels):
class_data_num.append(len(labels[labels == l]))
return class_data_num
# def dataset_dist (in_loader):
# """Example, dataset_dist(data['train'][0])"""
# label_list = np.array([x[1] for x in in_loader.dataset.samples])
# total_num = len(data_list)
# distribution = []
# for l in np.unique(label_list):
# distribution.append((l, len(label_list[label_list == l])/total_num))
# return distribution
# New Added
def torch2numpy(x):
if isinstance(x, torch.Tensor):
return x.detach().cpu().numpy()
elif isinstance(x, (list, tuple)):
return tuple([torch2numpy(xi) for xi in x])
else:
return x
def logits2score(logits, labels):
scores = F.softmax(logits, dim=1)
score = scores.gather(1, labels.view(-1, 1))
score = score.squeeze().cpu().numpy()
return score
def logits2entropy(logits):
scores = F.softmax(logits, dim=1)
scores = scores.cpu().numpy() + 1e-30
ent = -scores * np.log(scores)
ent = np.sum(ent, 1)
return ent
def logits2CE(logits, labels):
scores = F.softmax(logits, dim=1)
score = scores.gather(1, labels.view(-1, 1))
score = score.squeeze().cpu().numpy() + 1e-30
ce = -np.log(score)
return ce
def get_priority(ptype, logits, labels):
if ptype == 'score':
ws = 1 - logits2score(logits, labels)
elif ptype == 'entropy':
ws = logits2entropy(logits)
elif ptype == 'CE':
ws = logits2CE(logits, labels)
return ws
def get_value(oldv, newv):
if newv is not None:
return newv
else:
return oldv
| 34.172691
| 106
| 0.624868
|
bbcd73ff8eecaa1d1ad8dc5acfadc55c89c176d0
| 3,197
|
py
|
Python
|
poet/modules/exec.py
|
packetgeek/poet
|
f76e8a0e289c18edc410d64f1bb31d9a3da4765f
|
[
"MIT"
] | 189
|
2015-02-12T08:42:38.000Z
|
2019-10-21T07:12:58.000Z
|
Post-Exploitation/poet/poet/modules/exec.py
|
bhattsameer/TID3xploits
|
b57d8bae454081a3883a5684679e2a329e72d6e5
|
[
"MIT"
] | 37
|
2015-01-22T02:41:45.000Z
|
2016-03-20T03:27:33.000Z
|
Post-Exploitation/poet/poet/modules/exec.py
|
bhattsameer/TID3xploits
|
b57d8bae454081a3883a5684679e2a329e72d6e5
|
[
"MIT"
] | 51
|
2015-02-12T08:42:26.000Z
|
2019-05-20T14:30:17.000Z
|
import module
import config as CFG
import re
import zlib
REGEX = re.compile('^exec(\s+-o(\s+[\w.]+)?)?\s+(("[^"]+")\s+)+$')
EXEC = 'exec'
RECON = 'recon'
USAGE = """Execute commands on target.
usage: exec [-o [filename]] "cmd1" ["cmd2" "cmd3" ...]
\nExecute given commands and optionally log to file with optional filename.
\noptions:
-h\t\tshow help
-o filename\twrite results to file in {}/'.""".format(CFG.ARCHIVE_DIR)
@module.server_handler(EXEC)
def server_exec(server, argv):
# extra space is for regex
if len(argv) < 2 or argv[1] in ('-h', '--help') or not REGEX.match(' '.join(argv) + ' '):
print USAGE
return
try:
preproc = preprocess(argv)
except Exception:
print USAGE
return
server.generic(*preproc)
@module.client_handler(EXEC)
def client_exec(client, inp):
"""Handle server `exec' command.
Execute specially formatted input string and return specially formatted
response.
"""
client.s.send(execute(client, ' '.join(inp.split()[1:])))
@module.server_handler(RECON)
def server_recon(server, argv):
if '-h' in argv or '--help' in argv:
print USAGE
return
argc = len(argv)
if argc == 1:
server.generic(RECON)
elif '-o' in argv:
if argc == 2:
server.generic(RECON, True)
elif argc == 3:
server.generic(RECON, True, argv[2])
else:
print USAGE
else:
print USAGE
@module.client_handler(RECON)
def client_recon(client, inp):
ipcmd = 'ip addr' if 'no' in client.cmd_exec('which ifconfig') else 'ifconfig'
exec_str = '"whoami" "id" "uname -a" "lsb_release -a" "{}" "w" "who -a"'.format(ipcmd)
client.s.send(zlib.compress(execute(client, exec_str)))
def execute(client, exec_str):
out = ''
cmds = parse_exec_cmds(exec_str)
for cmd in cmds:
cmd_out = client.cmd_exec(cmd)
out += '='*20 + '\n\n$ {}\n{}\n'.format(cmd, cmd_out)
return out
def preprocess(argv):
"""Parse posh `exec' command line.
Args:
inp: raw `exec' command line
Returns:
Tuple suitable for expansion into as self.generic() parameters.
"""
write_file = None
write_flag = argv[1] == '-o'
if write_flag:
if len(argv) == 2:
# it was just "exec -o"
raise Exception
if '"' not in argv[2]:
write_file = argv[2]
del argv[2]
del argv[1]
argv = ' '.join(argv)
return argv, write_flag, write_file
def parse_exec_cmds(inp):
"""Parse string provided by server `exec' command.
Convert space delimited string with commands to execute in quotes, for
example ("ls -l" "cat /etc/passwd") into list with commands as strings.
Returns:
List of commands to execute.
"""
if inp.count('"') == 2:
return [inp[1:-1]]
else:
# server side regex guarantees that these quotes will be in the
# correct place -- the space between two commands
third_quote = inp.find('" "') + 2
first_cmd = inp[:third_quote-1]
rest = inp[third_quote:]
return [first_cmd[1:-1]] + parse_exec_cmds(rest)
| 25.99187
| 93
| 0.598686
|
76046511be47f584603ab1de9fa2a3294d4ec2b8
| 14,346
|
py
|
Python
|
tests/test_session_manager.py
|
yabov/hermes_fix
|
0a5e89fd15903a7ee0929e82b39879362e2e1008
|
[
"Apache-2.0"
] | 2
|
2020-02-20T15:00:35.000Z
|
2020-02-21T19:27:53.000Z
|
tests/test_session_manager.py
|
yabov/hermes_fix
|
0a5e89fd15903a7ee0929e82b39879362e2e1008
|
[
"Apache-2.0"
] | 3
|
2020-02-21T03:25:35.000Z
|
2020-02-21T18:37:42.000Z
|
tests/test_session_manager.py
|
yabov/hermes_fix
|
0a5e89fd15903a7ee0929e82b39879362e2e1008
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import logging
import queue
import time
import unittest
from datetime import datetime, timedelta
from dateutil.tz import gettz
import hermes_fix as fix
from hermes_fix import fix_engine, fix_errors
from hermes_fix.message_lib.FIX_4_2 import \
fix_messages as fix_messages_4_2_0_base
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)s-%(asctime)s-%(thread)d-%(filename)s:%(lineno)d - %(message)s')
SERVER_QUEUE = queue.Queue()
CLIENT_QUEUE = queue.Queue()
class FIXTestAppServer(fix.Application):
def on_register_callbacks(self, session_name):
self.register_callback(session_name, None, self.on_queue_msg)
def on_queue_msg(self, session_name, msg):
SERVER_QUEUE.put(msg)
def on_error(self, session_name, error):
SERVER_QUEUE.put(error)
class FIXTestAppClient(fix.Application):
def on_register_callbacks(self, session_name):
self.register_callback(session_name, None, self.on_queue_msg)
def on_queue_msg(self, session_name, msg):
CLIENT_QUEUE.put(msg)
def on_error(self, session_name, error):
CLIENT_QUEUE.put(error)
class Test(unittest.TestCase):
def setUp(self):
self.store = fix.FileStoreFactory()
self.settings = fix.SessionSettings([])
self.settings.read_dict({self._testMethodName: {'ConnectionType': 'acceptor',
'BeginString': 'FIX.4.2',
'SenderCompID': 'HOST',
'TargetCompID': self._testMethodName, # 'CLIENT',
'SocketAcceptPort': '5001',
'StorageConnectionString': f'sqlite:///store/{self._testMethodName}.server.db?check_same_thread=False',
'ConnectionStartTime': datetime.utcnow().time().strftime('%H:%M:%S'),
'ConnectionEndTime': (datetime.utcnow() + timedelta(seconds=10)).time().strftime('%H:%M:%S'),
'LogonTime': (datetime.utcnow() - timedelta(seconds=10)).time().strftime('%H:%M:%S'),
'LogoutTime': (datetime.utcnow() + timedelta(seconds=10)).time().strftime('%H:%M:%S')}})
self.settings_client = fix.SessionSettings([])
self.settings_client.read_dict({self._testMethodName: {'ConnectionType': 'initiator',
'BeginString': 'FIX.4.2',
'SenderCompID': self._testMethodName, # 'CLIENT',
'TargetCompID': 'HOST',
'SocketConnectPort': '5001',
'SocketConnectHost': 'localhost',
'StorageConnectionString': f'sqlite:///store/{self._testMethodName}.client.db?check_same_thread=False',
'ConnectionStartTime': datetime.utcnow().time().strftime('%H:%M:%S'),
'ConnectionEndTime': (datetime.utcnow() + timedelta(seconds=10)).time().strftime('%H:%M:%S'),
'LogonTime': (datetime.utcnow() - timedelta(seconds=10)).time().strftime('%H:%M:%S'),
'LogoutTime': (datetime.utcnow() + timedelta(seconds=10)).time().strftime('%H:%M:%S')}})
self.client_app = FIXTestAppClient()
self.client = fix.SocketConnection(
self.client_app, self.store, self.settings_client)
self.server_app = FIXTestAppServer()
self.server = fix.SocketConnection(
self.server_app, self.store, self.settings)
def do_logout(self, client_app):
client_app.engines[self._testMethodName].logout()
self.assertIsInstance(SERVER_QUEUE.get(timeout=3),
fix_messages_4_2_0_base.TestRequest)
self.assertIsInstance(SERVER_QUEUE.get(timeout=3),
fix_messages_4_2_0_base.Logout)
self.assertIsInstance(CLIENT_QUEUE.get(timeout=3),
fix_messages_4_2_0_base.Heartbeat)
self.assertIsInstance(CLIENT_QUEUE.get(timeout=3),
fix_messages_4_2_0_base.Logout)
""" Reconnect after Logout """
def test_reconnect(self):
self.server.start()
self.client.start()
resp_logon = SERVER_QUEUE.get(timeout=3)
sent_logon = CLIENT_QUEUE.get(timeout=3)
self.assertIsInstance(resp_logon, fix_messages_4_2_0_base.Logon)
self.assertIsInstance(sent_logon, fix_messages_4_2_0_base.Logon)
self.do_logout(self.client_app)
resp_logon = SERVER_QUEUE.get(timeout=3)
sent_logon = CLIENT_QUEUE.get(timeout=3)
self.assertIsInstance(resp_logon, fix_messages_4_2_0_base.Logon)
self.assertIsInstance(sent_logon, fix_messages_4_2_0_base.Logon)
self.do_logout(self.client_app)
""" Start connection when scheduled """
def test_logon_timer(self):
self.settings_client = fix.SessionSettings([])
self.settings_client.read_dict({self._testMethodName: {'ConnectionType': 'initiator',
'BeginString': 'FIX.4.2',
'SenderCompID': self._testMethodName, # 'CLIENT',
'TargetCompID': 'HOST',
'SocketConnectPort': '5001',
'SocketConnectHost': 'localhost',
'StorageConnectionString': f'sqlite:///store/{self._testMethodName}.client.db?check_same_thread=False',
'ConnectionStartTime': datetime.utcnow().time().strftime('%H:%M:%S'),
'ConnectionEndTime': (datetime.utcnow() + timedelta(seconds=10)).time().strftime('%H:%M:%S'),
'LogonTime': (datetime.utcnow() + timedelta(seconds=1)).time().strftime('%H:%M:%S'),
'LogoutTime': (datetime.utcnow() + timedelta(seconds=10)).time().strftime('%H:%M:%S')}})
self.client_app = FIXTestAppClient()
self.client = fix.SocketConnection(
self.client_app, self.store, self.settings_client)
self.server.start()
self.client.start()
resp_logon = SERVER_QUEUE.get(timeout=3)
sent_logon = CLIENT_QUEUE.get(timeout=3)
self.assertIsInstance(resp_logon, fix_messages_4_2_0_base.Logon)
self.assertIsInstance(sent_logon, fix_messages_4_2_0_base.Logon)
self.do_logout(self.client_app)
""" End connection when scheduled """
def test_logout_timer(self):
self.settings_client = fix.SessionSettings([])
self.settings_client.read_dict({self._testMethodName: {'ConnectionType': 'initiator',
'BeginString': 'FIX.4.2',
'SenderCompID': self._testMethodName, # 'CLIENT',
'TargetCompID': 'HOST',
'SocketConnectPort': '5001',
'SocketConnectHost': 'localhost',
'StorageConnectionString': f'sqlite:///store/{self._testMethodName}.client.db?check_same_thread=False',
'ConnectionStartTime': datetime.utcnow().time().strftime('%H:%M:%S'),
'ConnectionEndTime': (datetime.utcnow() + timedelta(seconds=10)).time().strftime('%H:%M:%S'),
'LogonTime': (datetime.utcnow() + timedelta(seconds=0)).time().strftime('%H:%M:%S'),
'LogoutTime': (datetime.utcnow() + timedelta(seconds=3)).time().strftime('%H:%M:%S')}})
self.client_app = FIXTestAppClient()
self.client = fix.SocketConnection(
self.client_app, self.store, self.settings_client)
self.server.start()
self.client.start()
resp_logon = SERVER_QUEUE.get(timeout=3)
sent_logon = CLIENT_QUEUE.get(timeout=3)
self.assertIsInstance(resp_logon, fix_messages_4_2_0_base.Logon)
self.assertIsInstance(sent_logon, fix_messages_4_2_0_base.Logon)
self.assertIsInstance(SERVER_QUEUE.get(timeout=5),
fix_messages_4_2_0_base.TestRequest)
self.assertIsInstance(SERVER_QUEUE.get(timeout=3),
fix_messages_4_2_0_base.Logout)
self.assertIsInstance(CLIENT_QUEUE.get(timeout=3),
fix_messages_4_2_0_base.Heartbeat)
self.assertIsInstance(CLIENT_QUEUE.get(timeout=3),
fix_messages_4_2_0_base.Logout)
""" Reject when connection is not outside of logon hours"""
def test_reject_out_of_window(self):
self.settings = fix.SessionSettings([])
self.settings.read_dict({self._testMethodName: {'ConnectionType': 'acceptor',
'BeginString': 'FIX.4.2',
'SenderCompID': 'HOST',
'TargetCompID': self._testMethodName, # 'CLIENT',
'SocketAcceptPort': '5001',
'StorageConnectionString': f'sqlite:///store/{self._testMethodName}.server.db?check_same_thread=False',
'ConnectionStartTime': datetime.utcnow().time().strftime('%H:%M:%S'),
'ConnectionEndTime': (datetime.utcnow() + timedelta(seconds=10)).time().strftime('%H:%M:%S'),
'LogonTime': (datetime.utcnow() + timedelta(seconds=8)).time().strftime('%H:%M:%S'),
'LogoutTime': (datetime.utcnow() + timedelta(seconds=10)).time().strftime('%H:%M:%S')}})
self.server_app = FIXTestAppServer()
self.server = fix.SocketConnection(
self.server_app, self.store, self.settings)
self.server.start()
self.client.start()
self.assertIsInstance(SERVER_QUEUE.get(
timeout=3), fix_errors.FIXRejectError)
self.assertIsInstance(CLIENT_QUEUE.get(timeout=3),
fix_errors.FIXInvalidFirstMessage)
self.assertIsInstance(SERVER_QUEUE.get(timeout=3),
fix_errors.FIXDropMessageError)
""" Logon with New_York timezone"""
def test_timezone_logon(self):
self.settings = fix.SessionSettings([])
self.settings.read_dict({self._testMethodName: {'ConnectionType': 'acceptor',
'BeginString': 'FIX.4.2',
'SenderCompID': 'HOST',
'TargetCompID': self._testMethodName, # 'CLIENT',
'SocketAcceptPort': '5001',
'StorageConnectionString': f'sqlite:///store/{self._testMethodName}.server.db?check_same_thread=False',
'SessionTimeZone' : 'America/New_York',
'ConnectionStartTime': datetime.now(gettz('America/New_York')).time().strftime('%H:%M:%S'),
'ConnectionEndTime': (datetime.now(gettz('America/New_York')) + timedelta(seconds=10)).time().strftime('%H:%M:%S'),
'LogonTime': (datetime.now(gettz('America/New_York')) + timedelta(seconds=0)).time().strftime('%H:%M:%S'),
'LogoutTime': (datetime.now(gettz('America/New_York')) + timedelta(seconds=3)).time().strftime('%H:%M:%S')}})
self.server_app = FIXTestAppServer()
self.server = fix.SocketConnection(
self.server_app, self.store, self.settings)
self.server.start()
self.client.start()
resp_logon = SERVER_QUEUE.get(timeout=3)
sent_logon = CLIENT_QUEUE.get(timeout=3)
self.assertIsInstance(resp_logon, fix_messages_4_2_0_base.Logon)
self.assertIsInstance(sent_logon, fix_messages_4_2_0_base.Logon)
self.do_logout(self.client_app)
def tearDown(self):
self.server_app.close_connection(self._testMethodName)
self.client_app.close_connection(self._testMethodName)
self.client.stop_all()
self.server.stop_all()
try:
self.assertTrue(SERVER_QUEUE.empty())
self.assertTrue(CLIENT_QUEUE.empty())
finally:
while not SERVER_QUEUE.empty():
SERVER_QUEUE.get()
while not CLIENT_QUEUE.empty():
CLIENT_QUEUE.get()
self.server_app.engines[self._testMethodName].store.clean_up()
self.client_app.engines[self._testMethodName].store.clean_up()
if __name__ == "__main__":
unittest.main()
| 54.340909
| 171
| 0.51659
|
c29ba62541bb2a86d1de9a3636faa6afc9546869
| 1,057
|
py
|
Python
|
server/utils.py
|
githubalvin/pandorabox
|
4bca09ea6df9c91fd2344a4346d037f5a1be643b
|
[
"MIT"
] | null | null | null |
server/utils.py
|
githubalvin/pandorabox
|
4bca09ea6df9c91fd2344a4346d037f5a1be643b
|
[
"MIT"
] | null | null | null |
server/utils.py
|
githubalvin/pandorabox
|
4bca09ea6df9c91fd2344a4346d037f5a1be643b
|
[
"MIT"
] | null | null | null |
if "_SINGLE_OBJ" not in globals():
_SINGLE_OBJ = {}
class SingletonMeta(type):
def __new__(cls, name ,bases, attrs):
init_func = attrs.get("__init__", None)
if init_func is None and bases:
init_func = getattr(bases[0], "__init__", None)
if init_func is not None:
def __myinit__(obj, *args, **kwargs):
if obj.__class__.single_inited:
return
_SINGLE_OBJ[obj.__class__.__name__] = obj
init_func(obj, *args, **kwargs)
obj.__class__.single_inited = True
attrs["__init__"] = __myinit__
return super(SingletonMeta, cls).__new__(cls, name ,bases, attrs)
class Singleton(metaclass=SingletonMeta):
"""单例类"""
def __new__(cls, *args, **kwargs):
if cls.__name__ not in _SINGLE_OBJ:
obj = super(Singleton, cls).__new__(cls, *args, **kwargs)
obj.__class__.single_inited = False
_SINGLE_OBJ[cls.__name__] = obj
return _SINGLE_OBJ[cls.__name__]
| 31.088235
| 73
| 0.591296
|
306feb44c1d6e62d79092d6c5d931fe32d4887ff
| 20,099
|
py
|
Python
|
meetings/flask_main.py
|
pahunter90/proj8-FreeTimes
|
2812d16b6c1839a681a42e90b5539f10d7df24f5
|
[
"Artistic-2.0"
] | null | null | null |
meetings/flask_main.py
|
pahunter90/proj8-FreeTimes
|
2812d16b6c1839a681a42e90b5539f10d7df24f5
|
[
"Artistic-2.0"
] | null | null | null |
meetings/flask_main.py
|
pahunter90/proj8-FreeTimes
|
2812d16b6c1839a681a42e90b5539f10d7df24f5
|
[
"Artistic-2.0"
] | null | null | null |
import flask
from flask import render_template
from flask import request
from flask import url_for
import uuid
import heapq
from available import Available
from event import Event
import json
import logging
# Date handling
import arrow # Replacement for datetime, based on moment.js
# import datetime # But we still need time
from dateutil import tz # For interpreting local times
# OAuth2 - Google library implementation for convenience
from oauth2client import client
import httplib2 # used in oauth2 flow
# Google API for services
from apiclient import discovery
###
# Globals
###
import config
if __name__ == "__main__":
CONFIG = config.configuration()
else:
CONFIG = config.configuration(proxied=True)
app = flask.Flask(__name__)
app.debug=CONFIG.DEBUG
app.logger.setLevel(logging.DEBUG)
app.secret_key=CONFIG.SECRET_KEY
SCOPES = 'https://www.googleapis.com/auth/calendar.readonly'
CLIENT_SECRET_FILE = CONFIG.GOOGLE_KEY_FILE ## You'll need this
APPLICATION_NAME = 'MeetMe class project'
CALENDARS = [] #Global container to hold calendars, eventually will be Database
EVENTS = [] #Global container to hold events, eventually will be Database
#############################
#
# Pages (routed from URLs)
#
#############################
@app.route("/")
@app.route("/index")
def index():
app.logger.debug("Entering index")
if 'begin_date' not in flask.session:
init_session_values()
return render_template('index.html')
@app.route("/choose")
def choose():
global CALENDARS
## We'll need authorization to list calendars
## I wanted to put what follows into a function, but had
## to pull it back here because the redirect has to be a
## 'return'
app.logger.debug("Checking credentials for Google calendar access")
credentials = valid_credentials()
if not credentials:
app.logger.debug("Redirecting to authorization")
return flask.redirect(flask.url_for('oauth2callback'))
service = get_gcal_service(credentials)
app.logger.debug("Returned from get_gcal_service")
CALENDARS = []
flask.g.calendars = list_calendars(service)
return render_template('choose_cals.html')
@app.route("/choose_events", methods=['GET', 'POST'])
def choose_events():
global CALENDARS
global EVENTS
## For each calendar, print the events in date and time order
app.logger.debug("Finding Events for each Calendar")
## Make sure we still have access to the account
app.logger.debug("Checking credentials for Google calendar access")
credentials = valid_credentials()
if not credentials:
app.logger.debug("Redirecting to authorization")
return flask.redirect(flask.url_for('oauth2callback'))
service = get_gcal_service(credentials)
# Get the list of calendars to include from the html form
if CALENDARS == []:
CALENDARS = flask.request.form.getlist('include')
# Returns a list of dateTime ranges to look through for overlap
day_ranges = get_dateTime_list()
time_min = arrow.get(flask.session['begin_date']).floor('day')
time_max = arrow.get(flask.session['end_date']).ceil('day')
EVENTS = []
flask.g.events = []
for calendar in CALENDARS:
# Calls a function that returns a list of events
calendar = service.calendars().get(calendarId=calendar).execute()
list_events = service.events().list(calendarId=calendar['id'],
singleEvents=True,
timeMin=time_min, timeMax=time_max).execute()['items']
for i in range(len(list_events)):
transparent = True
# Check if event is marked as available
if 'transparency' not in list_events[i]:
transparent = False
elif list_events[i]['transparency'] == 'opaque':
transparent = False
# Only do this if 'busy' event
if not transparent:
# 'date' only there if all day event
if 'date' in list_events[i]['start']:
# all day event
event_start_time = arrow.get(list_events[i]['start']['date']).floor('day')
event_end_time = arrow.get(list_events[i]['start']['date']).ceil('day')
else:
# normal event
event_start_time = arrow.get(list_events[i]['start']['dateTime'])
event_end_time = arrow.get(list_events[i]['end']['dateTime'])
for date_range in day_ranges:
# Check if any part of an event overlaps
# Note: date/time range is not inclusive (using strict inequalities)
if date_range[0] < event_start_time < date_range[1] or \
date_range[0] < event_end_time < date_range[1] or \
(date_range[0] >= event_start_time and date_range[1] <= event_end_time):
# make sure it's not being added twice
if list_events[i] in EVENTS:
continue
else:
EVENTS.append(list_events[i])
# call a function that sorts the entire list of events by start date and time
# and returns a printable string for the html page
sort_events()
flask.g.events = EVENTS
# render a new html page "show_events" that lists the events in order
# I did this instead of rendering on the index page. I thought it was cleaner
return render_template('choose_events.html')
@app.route("/show_available", methods=['POST'])
def show_available():
"""
Shows times the user is available within the given date time range
"""
global EVENTS
## Make sure we still have access to the account
app.logger.debug("Checking credentials for Google calendar access")
credentials = valid_credentials()
if not credentials:
app.logger.debug("Redirecting to authorization")
return flask.redirect(flask.url_for('oauth2callback'))
service = get_gcal_service(credentials)
flask.g.available = []
A = Available(flask.session['begin_date'], flask.session['end_date'],
get_flask_times())
ignore_events = flask.request.form.getlist('ignore')
for event in EVENTS:
if not event.id in ignore_events:
for i in range(len(A.time)):
if event.start <= A.time[i] < event.end:
A.available[i] = False
i = 0
started = False
while i < len(A.time):
if i == len(A.time)-1:
if started:
end_range = A.time[i]
started = False
flask.g.available.append([start_range.format("MM-DD: h:mma"), end_range.format("MM-DD: h:mma")])
else:
if not started:
if A.time[i].shift(minutes=+15) == A.time[i+1] and A.available[i]:
start_range = A.time[i]
started = True
else:
if not A.time[i].shift(minutes=+15) == A.time[i+1] or not A.available[i]:
end_range = A.time[i]
started = False
flask.g.available.append([start_range.format("MM-DD: h:mma"), end_range.format("MM-DD: h:mma")])
i+=1
return render_template('free_times.html')
def get_dateTime_list():
"""
Returns a list of tuples that are start and end times for
each acceptable chunk in the date range
"""
b_hour, b_minute, e_hour, e_minute = get_flask_times()
start_day = arrow.get(flask.session['begin_date'])
end_day = arrow.get(flask.session['end_date']).ceil('day')
start_day = start_day.replace(tzinfo='US/Pacific')
end_day = end_day.replace(tzinfo='US/Pacific')
#Set the first time range
start_time = start_day.replace(hour=b_hour, minute=b_minute)
end_time = start_day.replace(hour=e_hour, minute=e_minute)
#Set the ultimate end day and time
end_day = end_day.replace(hour=e_hour, minute=e_minute)
day_ranges = []
if start_time >= end_time:
end_time = end_time.shift(days=+1)
end_day = end_day.shift(days=+1)
while start_time < end_day:
day_ranges.append((start_time, end_time))
start_time = start_time.shift(days=+1)
end_time = end_time.shift(days=+1)
return day_ranges
def get_flask_times():
"""
Returns the integer versions of the time session objects as hour and minute
"""
b_hour = int(to_24(flask.session['begin_time'])[:2])
b_minute = int(to_24(flask.session['begin_time'])[-2:])
e_hour = int(to_24(flask.session['end_time'])[:2])
e_minute = int(to_24(flask.session['end_time'])[-2:])
return [b_hour, b_minute, e_hour, e_minute]
####
#
# Google calendar authorization:
# Returns us to the main /choose screen after inserting
# the calendar_service object in the session state. May
# redirect to OAuth server first, and may take multiple
# trips through the oauth2 callback function.
#
# Protocol for use ON EACH REQUEST:
# First, check for valid credentials
# If we don't have valid credentials
# Get credentials (jump to the oauth2 protocol)
# (redirects back to /choose, this time with credentials)
# If we do have valid credentials
# Get the service object
#
# The final result of successful authorization is a 'service'
# object. We use a 'service' object to actually retrieve data
# from the Google services. Service objects are NOT serializable ---
# we can't stash one in a cookie. Instead, on each request we
# get a fresh serivce object from our credentials, which are
# serializable.
#
# Note that after authorization we always redirect to /choose;
# If this is unsatisfactory, we'll need a session variable to use
# as a 'continuation' or 'return address' to use instead.
#
####
def valid_credentials():
"""
Returns OAuth2 credentials if we have valid
credentials in the session. This is a 'truthy' value.
Return None if we don't have credentials, or if they
have expired or are otherwise invalid. This is a 'falsy' value.
"""
if 'credentials' not in flask.session:
return None
credentials = client.OAuth2Credentials.from_json(
flask.session['credentials'])
if (credentials.invalid or
credentials.access_token_expired):
return None
return credentials
def get_gcal_service(credentials):
"""
We need a Google calendar 'service' object to obtain
list of calendars, busy times, etc. This requires
authorization. If authorization is already in effect,
we'll just return with the authorization. Otherwise,
control flow will be interrupted by authorization, and we'll
end up redirected back to /choose *without a service object*.
Then the second call will succeed without additional authorization.
"""
app.logger.debug("Entering get_gcal_service")
http_auth = credentials.authorize(httplib2.Http())
service = discovery.build('calendar', 'v3', http=http_auth)
app.logger.debug("Returning service")
return service
def sort_events():
"""
Sort events using a priority queue
"""
global EVENTS
H = []
for event in EVENTS:
if 'date' in event['start']:
E = Event(arrow.get(event['start']['date']).floor('day'),
arrow.get(event['start']['date']).ceil('day'),
event['summary'], event['id'])
else:
E = Event(arrow.get(event['start']['dateTime']),
arrow.get(event['end']['dateTime']),
event['summary'], event['id'])
heapq.heappush(H, E)
EVENTS = []
while H:
EVENTS.append(heapq.heappop(H))
@app.route('/oauth2callback')
def oauth2callback():
"""
The 'flow' has this one place to call back to. We'll enter here
more than once as steps in the flow are completed, and need to keep
track of how far we've gotten. The first time we'll do the first
step, the second time we'll skip the first step and do the second,
and so on.
"""
app.logger.debug("Entering oauth2callback")
flow = client.flow_from_clientsecrets(
CLIENT_SECRET_FILE,
scope= SCOPES,
redirect_uri=flask.url_for('oauth2callback', _external=True))
## Note we are *not* redirecting above. We are noting *where*
## we will redirect to, which is this function.
## The *second* time we enter here, it's a callback
## with 'code' set in the URL parameter. If we don't
## see that, it must be the first time through, so we
## need to do step 1.
app.logger.debug("Got flow")
if 'code' not in flask.request.args:
app.logger.debug("Code not in flask.request.args")
auth_uri = flow.step1_get_authorize_url()
return flask.redirect(auth_uri)
## This will redirect back here, but the second time through
## we'll have the 'code' parameter set
else:
## It's the second time through ... we can tell because
## we got the 'code' argument in the URL.
app.logger.debug("Code was in flask.request.args")
auth_code = flask.request.args.get('code')
credentials = flow.step2_exchange(auth_code)
flask.session['credentials'] = credentials.to_json()
## Now I can build the service and execute the query,
## but for the moment I'll just log it and go back to
## the main screen
app.logger.debug("Got credentials")
return flask.redirect(flask.url_for('choose'))
#####
#
# Option setting: Buttons or forms that add some
# information into session state. Don't do the
# computation here; use of the information might
# depend on what other information we have.
# Setting an option sends us back to the main display
# page, where we may put the new information to use.
#
#####
@app.route('/setrange', methods=['POST'])
def setrange():
"""
User chose a date range with the bootstrap daterange
widget.
"""
app.logger.debug("Entering setrange")
daterange = request.form.get('daterange')
flask.session['begin_time'] = request.form.get('earliest')
flask.session['end_time'] = request.form.get('latest')
flask.session['daterange'] = daterange
daterange_parts = daterange.split()
flask.session['begin_date'] = interpret_date(daterange_parts[0])
flask.session['end_date'] = interpret_date(daterange_parts[2])
app.logger.debug("Setrange parsed {} - {} dates as {} - {}".format(
daterange_parts[0], daterange_parts[1],
flask.session['begin_date'], flask.session['end_date']))
return flask.redirect(flask.url_for("choose"))
#convert from 12hr to 24hr time
def to_24(time):
hour = time[0:2]
minute = time[3:5]
am_pm = time[6:8]
if not hour == '12' and am_pm == 'PM':
hour = str(int(hour)+12)
elif hour == '12' and am_pm == 'AM':
hour='00'
return str(hour) + ":" + minute
####
#
# Initialize session variables
#
####
def init_session_values():
"""
Start with some reasonable defaults for date and time ranges.
Note this must be run in app context ... can't call from main.
"""
# Default date span = tomorrow to 1 week from now
now = arrow.now('local') # We really should be using tz from browser
tomorrow = now.replace(days=+1)
nextweek = now.replace(days=+7)
flask.session["begin_date"] = tomorrow.floor('day').isoformat()
flask.session["end_date"] = nextweek.ceil('day').isoformat()
flask.session["daterange"] = "{} - {}".format(
tomorrow.format("MM/DD/YYYY"),
nextweek.format("MM/DD/YYYY"))
# Default time span each day, 8 to 5
flask.session["begin_time"] = "09:00 AM"
flask.session["end_time"] = "05:00 PM"
def interpret_time( text ):
"""
Read time in a human-compatible format and
interpret as ISO format with local timezone.
May throw exception if time can't be interpreted. In that
case it will also flash a message explaining accepted formats.
"""
app.logger.debug("Decoding time '{}'".format(text))
time_formats = ["ha", "h:mma", "h:mm a", "H:mm"]
try:
as_arrow = arrow.get(text, time_formats).replace(tzinfo=tz.tzlocal())
as_arrow = as_arrow.replace(year=2016) #HACK see below
app.logger.debug("Succeeded interpreting time")
except:
app.logger.debug("Failed to interpret time")
flask.flash("Time '{}' didn't match accepted formats 13:30 or 1:30pm"
.format(text))
raise
return as_arrow.isoformat()
#HACK #Workaround
# isoformat() on raspberry Pi does not work for some dates
# far from now. It will fail with an overflow from time stamp out
# of range while checking for daylight savings time. Workaround is
# to force the date-time combination into the year 2016, which seems to
# get the timestamp into a reasonable range. This workaround should be
# removed when Arrow or Dateutil.tz is fixed.
# FIXME: Remove the workaround when arrow is fixed (but only after testing
# on raspberry Pi --- failure is likely due to 32-bit integers on that platform)
def interpret_date( text ):
"""
Convert text of date to ISO format used internally,
with the local time zone.
"""
try:
as_arrow = arrow.get(text, "MM/DD/YYYY").replace(
tzinfo=tz.tzlocal())
except:
flask.flash("Date '{}' didn't fit expected format 12/31/2001")
raise
return as_arrow.isoformat()
def next_day(isotext):
"""
ISO date + 1 day (used in query to Google calendar)
"""
as_arrow = arrow.get(isotext)
return as_arrow.replace(days=+1).isoformat()
####
#
# Functions (NOT pages) that return some information
#
####
def list_calendars(service):
"""
Given a google 'service' object, return a list of
calendars. Each calendar is represented by a dict.
The returned list is sorted to have
the primary calendar first, and selected (that is, displayed in
Google Calendars web app) calendars before unselected calendars.
"""
app.logger.debug("Entering list_calendars")
calendar_list = service.calendarList().list().execute()["items"]
result = [ ]
for cal in calendar_list:
kind = cal["kind"]
id = cal["id"]
if "description" in cal:
desc = cal["description"]
else:
desc = "(no description)"
summary = cal["summary"]
# Optional binary attributes with False as default
selected = ("selected" in cal) and cal["selected"]
primary = ("primary" in cal) and cal["primary"]
result.append(
{ "kind": kind,
"id": id,
"summary": summary,
"selected": selected,
"primary": primary
})
return sorted(result, key=cal_sort_key)
def cal_sort_key( cal ):
"""
Sort key for the list of calendars: primary calendar first,
then other selected calendars, then unselected calendars.
(" " sorts before "X", and tuples are compared piecewise)
"""
if cal["selected"]:
selected_key = " "
else:
selected_key = "X"
if cal["primary"]:
primary_key = " "
else:
primary_key = "X"
return (primary_key, selected_key, cal["summary"])
#################
#
# Functions used within the templates
#
#################
@app.template_filter( 'fmtdate' )
def format_arrow_date( date ):
try:
normal = arrow.get( date )
return normal.format("ddd MM/DD/YYYY")
except:
return "(bad date)"
@app.template_filter( 'fmttime' )
def format_arrow_time( time ):
try:
normal = arrow.get( time )
return normal.format("HH:mm")
except:
return "(bad time)"
#############
if __name__ == "__main__":
# App is created above so that it will
# exist whether this is 'main' or not
# (e.g., if we are running under green unicorn)
app.run(port=CONFIG.PORT,host="0.0.0.0")
| 35.138112
| 116
| 0.639833
|
0e5b9c9178eb5cfe62e5e608dfc019dd257e6670
| 10,689
|
py
|
Python
|
kubernetes/client/models/io_xk8s_cluster_v1alpha3_machine_deployment_spec_template_metadata_owner_references.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/io_xk8s_cluster_v1alpha3_machine_deployment_spec_template_metadata_owner_references.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/io_xk8s_cluster_v1alpha3_machine_deployment_spec_template_metadata_owner_references.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'block_owner_deletion': 'bool',
'controller': 'bool',
'kind': 'str',
'name': 'str',
'uid': 'str'
}
attribute_map = {
'api_version': 'apiVersion',
'block_owner_deletion': 'blockOwnerDeletion',
'controller': 'controller',
'kind': 'kind',
'name': 'name',
'uid': 'uid'
}
def __init__(self, api_version=None, block_owner_deletion=None, controller=None, kind=None, name=None, uid=None, local_vars_configuration=None): # noqa: E501
"""IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._block_owner_deletion = None
self._controller = None
self._kind = None
self._name = None
self._uid = None
self.discriminator = None
self.api_version = api_version
if block_owner_deletion is not None:
self.block_owner_deletion = block_owner_deletion
if controller is not None:
self.controller = controller
self.kind = kind
self.name = name
self.uid = uid
@property
def api_version(self):
"""Gets the api_version of this IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences. # noqa: E501
API version of the referent. # noqa: E501
:return: The api_version of this IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences.
API version of the referent. # noqa: E501
:param api_version: The api_version of this IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and api_version is None: # noqa: E501
raise ValueError("Invalid value for `api_version`, must not be `None`") # noqa: E501
self._api_version = api_version
@property
def block_owner_deletion(self):
"""Gets the block_owner_deletion of this IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences. # noqa: E501
If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. # noqa: E501
:return: The block_owner_deletion of this IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences. # noqa: E501
:rtype: bool
"""
return self._block_owner_deletion
@block_owner_deletion.setter
def block_owner_deletion(self, block_owner_deletion):
"""Sets the block_owner_deletion of this IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences.
If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. # noqa: E501
:param block_owner_deletion: The block_owner_deletion of this IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences. # noqa: E501
:type: bool
"""
self._block_owner_deletion = block_owner_deletion
@property
def controller(self):
"""Gets the controller of this IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences. # noqa: E501
If true, this reference points to the managing controller. # noqa: E501
:return: The controller of this IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences. # noqa: E501
:rtype: bool
"""
return self._controller
@controller.setter
def controller(self, controller):
"""Sets the controller of this IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences.
If true, this reference points to the managing controller. # noqa: E501
:param controller: The controller of this IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences. # noqa: E501
:type: bool
"""
self._controller = controller
@property
def kind(self):
"""Gets the kind of this IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences. # noqa: E501
Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences.
Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and kind is None: # noqa: E501
raise ValueError("Invalid value for `kind`, must not be `None`") # noqa: E501
self._kind = kind
@property
def name(self):
"""Gets the name of this IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences. # noqa: E501
Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names # noqa: E501
:return: The name of this IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences.
Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names # noqa: E501
:param name: The name of this IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def uid(self):
"""Gets the uid of this IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences. # noqa: E501
UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids # noqa: E501
:return: The uid of this IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences. # noqa: E501
:rtype: str
"""
return self._uid
@uid.setter
def uid(self, uid):
"""Sets the uid of this IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences.
UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids # noqa: E501
:param uid: The uid of this IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and uid is None: # noqa: E501
raise ValueError("Invalid value for `uid`, must not be `None`") # noqa: E501
self._uid = uid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, IoXK8sClusterV1alpha3MachineDeploymentSpecTemplateMetadataOwnerReferences):
return True
return self.to_dict() != other.to_dict()
| 40.033708
| 322
| 0.67939
|
acf9f02907c9158399c5ea5352441cec5acd7d6e
| 58,647
|
py
|
Python
|
src/bin/visitdiff.py
|
eddieTest/visit
|
ae7bf6f5f16b01cf6b672d34e2d293fa7170616b
|
[
"BSD-3-Clause"
] | null | null | null |
src/bin/visitdiff.py
|
eddieTest/visit
|
ae7bf6f5f16b01cf6b672d34e2d293fa7170616b
|
[
"BSD-3-Clause"
] | null | null | null |
src/bin/visitdiff.py
|
eddieTest/visit
|
ae7bf6f5f16b01cf6b672d34e2d293fa7170616b
|
[
"BSD-3-Clause"
] | 1
|
2020-03-18T23:17:43.000Z
|
2020-03-18T23:17:43.000Z
|
###############################################################################
#
# Purpose: Sets up a 2x2 layout of vis windows and expressions for database
# differencing of scalar variables (vector variables are still to
# be added). The intersection of the scalar variable names is
# calculated and we define CMFE expressions for them to compute
# dbl - dbr. Then, the first scalar in the set of CMFE expressions is
# plotted, along with its source values from dbl, dbr. The ell, 'l'
# and arr, 'r', nomenclature is to indicate left and right operands
# of the differencing operation.
#
# Usage: visit -diff dbl dbr [ -force_pos_cmfe ]
#
# Notes: dbl, dbr are the names of the databases that VisIt will difference.
#
# Future work: It would be nice if the CLI could define callback functions
# to be executed when the user does something such as changing
# the active variable. You can imagine that we would want to
# change the plotted variables from other windows too when
# executing such a callback function.
#
# Programmer: Mark C. Miller (based on original design by Brad Whitlock)
# Date: Wed Jul 18 10:17:11 PDT 2007
#
##############################################################################
import sys, string, os, re, time
###############################################################################
# Function: help
#
# Purpose: Print a useful help message
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def help():
print """
This python script is intended to be used in concert with VisIt's CLI and GUI.
It is invokved using the command 'visit -diff <dbl> <dbr> [ -force_pos_cmfe ]'.
This script will generate the necessary Cross-Mesh Field Evaluation (CMFE)
expressions to facilitate visualization and analysis of the differences between
two databases. VisIt will open windows to display both the left and right
databases as well as their difference. The windows are numbered as follows
Upper Left (1) -- shows Left-Right (a)Upper Right (2) -- shows Right-Left
Lower Left (3) -- shows Left Lower Right (4) -- shows Right
(a) only for position-based CMFE expressions.
VisIt uses the Cross-Mesh Field Evaluation (CMFE) expression functions
to generate the differences. A CMFE function creates an instance
of a variable from another (source) mesh on the specified (destination)
mesh. Therefore, window 1 (upper left) shows the difference obtained
when taking a variable from the mesh in the right database, applying the
CMFE expression function to map it onto the mesh in the left database and
then subtracting it from the same variable in the left database.
VisIt can use two variants of CMFE expression functions depending
on how similar the source and destination meshes are; connectivity-based
(conn_cmfe) which assumes the underlying mesh(s) for the left and right
databases have identical connectivity and position-based (pos_cmfe) which
does not make this assumption. VisIt will attempt to automatically select
which variant of CMFE expression to use based on some simple heuristics.
For meshes with identical connectivity, conn_cmfe expressions are
preferrable because they are higher performance and do not require VisIt
to perform any interpolation. In fact, the conn_cmfe operation is
perfectly anti-symmetric. That is Left-Right = -(Right-Left).
The same cannot be said for pos_cmfe expressions. For this reason,
window 2 (upper right) is only ever active when using position-based CMFE
expressions. It shows the (possibly different) difference obtained when
taking a variable from the mesh in the left database, applying the CMFE
expression function to map it onto the mesh in the right database and
then subtracting it from the same variable in the right database.
Pos_cmfe expressions will attempt to generate useful results regardless of
the similarity of the underlying meshes. You can force use of pos_cmfe
expressions by adding '-force_pos_cmfe' to the command line when running
'visit -diff'.
Note that the differences VisIt will compute in this mode are single
precision. This is true regardless of whether the input data is itself
double precision. VisIt will convert double precision to single
precision before processing it. Although this is a result of earlier
visualization-specific design requirements and constraints, the intention
is that eventually double precision will be supported.
Expressions for the differences for all scalar variables will be under the
'diffs' submenu. For material volume fractions, the scalar volume fraction
variables will be under the 'matvf_comps' submenu and their differences will
be under 'diffs/matvf_comps' submenu. Likewise for vector variables, their
scalar components will be under the 'vector_comps' submenu and their
differences under the 'diffs/vector_comps' submenu.
'visit -diff' is operated using a combination of VisIt's GUI and CLI.
There are a number of python functions defined in this script. These
are...
ToggleMesh() -- Toggle the mesh plot(s) on/off.
ToggleBoundary() -- Toggle the material boundary plot(s) on/off.
ToggleHidePloti() -- Toggle hiding the ith plot in the plot list(s)
DiffSummary() -- Examine all variables in the database and report a
summary of differences found in each.
ChangeVar("foo") -- Change the variable displayed in all windows to "foo".
ZPick((1,2,3)) -- Perform a zone-pick in all windows for the zone ids 1,2,3
NPick((4,5,6)) -- Perform a node-pick in all windows for the node ids 4,5,6
For the functions described above with no arguments, there are pre-defined macros
in VisIt's GUI that can be found under Controls->Macros. Not all of the convenience
functions available in this script are actionable through the GUI. Only those that
DO NOT reuquire some kind of user input are.
Finally, you should be able to do whatever operations you wish in a given window
and then synchronize all other windows to the same state. To do this, add whatever
operators, plots, as well as adjustments to plot and operator attributes you wish
to a given window. Then use the SyncWindows() method to bring all other windows
into a consistent state. For example, if you add plots and operators to the window
1 (the upper left window where L-R is displayed), then do SyncWindows(1) will bring
all other windows into an identical state.
SyncWindows(a) -- Synchronize all windows to window 'a', where a is 1...4.
There are buttons defined in Controls->Macros to perform these synchronization
operations. For example, the SyncToL-RDiff button will synchronize all windows
to be consistent with whatever was done in the window where L-R is displayed
(upper left).
Finally, if you move around in time in a given window, use the SyncTimeState()
method to synchronise all windows to the current time state.
SyncTimeStates(a) -- Synchronise all windows' time state to window 'a'.
Note that 'visit -diff' cannot currently handle differences in databases that
have a different number of time states.
"""
###############################################################################
# Function: GetDiffVarNames
#
# Purpose: Given any variable's name (in diff menu or submenus) return all
# varieties of names for it. If absolute and relative differencing
# is added, this is the place to handle the naming.
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def GetDiffVarNames(name):
retval = ()
varname = re.search("diff/(.*)", name)
if varname != None:
varname = varname.group(1)
retval = (varname, name)
else:
retval = (name, "diff/%s"%name)
return retval
###############################################################################
# Function: GetNDomains
#
# Purpose: Return number of domains for a given mesh
#
# Programmer: Brad Whitlock
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def GetNDomains(metadata, meshname):
nd = 1
for mi in range(metadata.GetNumMeshes()):
if metadata.GetMeshes(mi).name == meshname:
nd = metadata.GetMeshes(mi).numBlocks
break
return nd
###############################################################################
# Function: GetMeshType
#
# Purpose: Return type of given mesh
#
# Programmer: Brad Whitlock
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def GetMeshType(metadata, meshname):
mt = -1
for mi in range(metadata.GetNumMeshes()):
if metadata.GetMeshes(mi).name == meshname:
mt = metadata.GetMeshes(mi).meshType
break
return mt
###############################################################################
# Function: GetVarInfo
#
# Purpose: Return a named portion of a metadata object
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def GetVarInfo(metadata, varname):
for i in range(metadata.GetNumScalars()):
if metadata.GetScalars(i).name == varname:
return metadata.GetScalars(i)
for i in range(metadata.GetNumMeshes()):
if metadata.GetMeshes(i).name == varname:
return metadata.GetMeshes(i)
for i in range(metadata.GetNumMaterials()):
if metadata.GetMaterials(i).name == varname:
return metadata.GetMaterials(i)
for i in range(metadata.GetNumVectors()):
if metadata.GetVectors(i).name == varname:
return metadata.GetVectors(i)
for i in range(metadata.GetNumArrays()):
if metadata.GetArrays(i).name == varname:
return metadata.GetArrays(i)
for i in range(metadata.GetNumCurves()):
if metadata.GetCurves(i).name == varname:
return metadata.GetCurves(i)
for i in range(metadata.GetNumLabels()):
if metadata.GetLabels(i).name == varname:
return metadata.GetLabels(i)
for i in range(metadata.GetNumTensors()):
if metadata.GetTensors(i).name == varname:
return metadata.GetTensors(i)
return 0
###############################################################################
# Function: GetVarType
#
# Purpose: Return a variable's avt type
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
# Modifications:
#
# Mark C. Miller, Tue Aug 28 16:25:05 PDT 2007
# Added logic to check defined expressions and expressions in metadata
#
###############################################################################
def GetVarType(metadata, varname):
theType = "Unknown"
vInfo = GetVarInfo(metadata, varname)
if vInfo != 0:
tmpType = re.search("<type 'avt([A-Z][a-z]*)MetaData'>", str(type(vInfo)))
if tmpType != None:
theType = tmpType.group(1)
# if we don't have an answer, look at currently defined expressions
if theType == "Unknown":
el = Expressions()
i = 0
while i < len(el) and theType == "Unknown":
exp = el[i]
if exp[0] == varname:
theType = "Scalar" # assume its a scalar
break
i = i + 1
# if we don't have an answer, look at expressions from the database
if theType == "Unknown":
el = metadata.GetExprList()
for i in range(el.GetNumExpressions()):
exp = el.GetExpressions(i)
if exp.name == varname:
tmpType = re.search("\ntype = ([A-Z][a-z]*)MeshVar", str(exp))
if tmpType != None:
theType = tmpType.group(1)
break
return theType
###############################################################################
# Function: MeshForVar
#
# Purpose: Determine the mesh for a given variable
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def MeshForVar(metadata, varname, dontCheckExpressions=0):
meshName = "Unknown"
vInfo = GetVarInfo(metadata, varname)
if vInfo != 0 and hasattr(vInfo, "meshName"):
tmpMeshName = re.search("\nmeshName = \"(.*)\"\n",str(vInfo))
if tmpMeshName != None:
meshName = tmpMeshName.group(1)
else:
# look at meshes themselves
for i in range(metadata.GetNumMeshes()):
if metadata.GetMeshes(i).name == varname:
meshName = varname
break
ttab = string.maketrans("()<>,:","@@@@@@")
# if we don't yet have an answer, look at current expressions
if meshName == "Unknown" and dontCheckExpressions == 0:
exprList = Expressions()
i = 0;
while i < len(exprList) and meshName == "Unknown":
theExpr = exprList[i]
if theExpr[0] == varname:
defnTmp = string.translate(theExpr[1],ttab)
defnFields = defnTmp.split('@')
for f in defnFields:
meshNameTmp = MeshForVar(metadata, f, 1)
if meshNameTmp != "Unknown":
meshName = meshNameTmp
break
i = i + 1
# if we don't yet have an answer, look at expressions from database
if meshName == "Unknown" and dontCheckExpressions == 0:
exprList = metadata.GetExprList()
i = 0;
while i < exprList.GetNumExpressions() and meshName == "Unknown":
theExpr = exprList.GetExpressions(i)
if theExpr.name == varname:
defnTmp = string.translate(theExpr.definition,ttab)
defnFields = defnTmp.split('@')
for f in defnFields:
meshNameTmp = MeshForVar(metadata, f, 1)
if meshNameTmp != "Unknown":
meshName = meshNameTmp
break
i = i + 1
return meshName
###############################################################################
# Function: MatForMesh
#
# Purpose: Return a material object for a given mesh
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def MatForMesh(metadata, meshname):
for i in range(metadata.GetNumMaterials()):
if metadata.GetMaterials(i).meshName == meshname:
return metadata.GetMaterials(i).name
###############################################################################
# Function: GetVarCentering
#
# Purpose: Return the centering for a given variable
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def GetVarCentering(metadata, varname):
centering = "Unknown"
vInfo = GetVarInfo(metadata, varname)
if vInfo != 0 and hasattr(vInfo, "centering"):
tmpCentering = re.search("\ncentering = (AVT_[A-Z]*) *#.*\n",str(vInfo))
if tmpCentering != None:
centering = tmpCentering.group(1)
return centering
###############################################################################
# Function: IsNotScalarVarPlotType
#
# Purpose: Return whether or not the given plot type supports simple scalar
# variables.
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def IsNotScalarVarPlotType(plotType):
plotTypeName = PlotPlugins()[plotType]
if plotTypeName == "Mesh" or \
plotTypeName == "Boundary" or \
plotTypeName == "FilledBoundary" or \
plotTypeName == "Vector" or \
plotTypeName == "Molecule" or \
plotTypeName == "Subset":
return 1
return 0
###############################################################################
# Function: GetCurrentTimeState
#
# Purpose: Given a window id, return the current time state in that window
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def GetCurrentTimeState(win):
SetActiveWindow(win)
wi = GetWindowInformation()
if wi.activeTimeSlider == -1:
return 0
return wi.timeSliderCurrentStates[wi.activeTimeSlider]
###############################################################################
# Function: SyncTimeStates
#
# Purpose: Ensure that the various data structures of this script are brought up
# to date with the current time state of the specified source window.
# Also, ensure that all windows' time states are brought up to date with
# the specified window's time state.
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def SyncTimeStates(srcWin):
global currentTimeState
global mdl
global mdr
global dbl
global dbr
if currentTimeState != -1:
# no work to do if the current time state is already set
tmpCurrentTimeState = GetCurrentTimeState(srcWin)
if currentTimeState == tmpCurrentTimeState:
print "Time state is up to date"
return
print "Updating time state to state %d"%tmpCurrentTimeState
currentTimeState = tmpCurrentTimeState
else:
print "Updating time state to state 0"
currentTimeState = 0
TimeSliderSetState(currentTimeState)
# There is a bug with correlations when time arg is used to GetMetaData.
# Without it, it turns out we always get state zero.
# mdl = GetMetaData(dbl, currentTimeState)
# mdr = GetMetaData(dbr, currentTimeState)
mdl = GetMetaData(dbl)
mdr = GetMetaData(dbr)
if mdl.numStates != mdr.numStates:
print "Database \"%s\" has %d states"%(dbl, mdl.numStates)
print "Database \"%s\" has %d states"%(dbr, mdr.numStates)
print "Currently, 'visit -diff' is unable to handle databases with different numbers of states"
sys.exit(4)
UpdateExpressions(mdl, mdr)
###############################################################################
# Function: SyncTime...
#
# Purpose: Stubs to register as macros
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def SyncTimeL_R():
SyncTimeStates(1)
def SyncTimeR_L():
SyncTimeStates(2)
def SyncTimeLeft():
SyncTimeStates(3)
def SyncTimeRight():
SyncTimeStates(4)
###############################################################################
# Function: ProcessCLArgs
#
# Purpose: Read the command line arguments
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
# Modifications:
# Mark C. Miller, Tue Aug 21 11:17:20 PDT 2007
# Added support for difference summary mode
#
# Mark C. Miller, Tue Aug 28 16:25:05 PDT 2007
# Added logic to set noWinMode
#
# Brad Whitlock, Wed Feb 3 17:08:34 PST 2010
# I added an -diff_format argument to compensate for the recent loss
# of --assume_format elsewhere in VisIt.
#
###############################################################################
def ProcessCLArgs():
global dbl
global dbr
global forcePosCMFE
global diffSummaryOnly
global noWinMode
try:
i = 1
while i < len(sys.argv):
if sys.argv[i] == "-vdiff":
dbl = sys.argv[i+1]
dbr = sys.argv[i+2]
i = i + 2
if sys.argv[i] == "-force_pos_cmfe":
forcePosCMFE = 1
if sys.argv[i] == "-summary_only":
diffSummaryOnly = 1
if sys.argv[i] == "-nowin":
noWinMode = 1
if sys.argv[i] == "-diff_format":
SetPreferredFileFormats(sys.argv[i+1])
i = i + 1
i = i + 1
except:
print "The -vdiff flag takes 2 database names.", dbl, dbr
sys.exit(1)
if dbl == "notset" or dbr == "notset":
print "The -vdiff argument was not given."
sys.exit(2)
###############################################################################
# Function: UpdateThisExpression
#
# Purpose: Given the list of currently defined expressions, determine if the
# new expression (exprName, expr) is being added, updated or left
# unchanged.
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def UpdateThisExpression(exprName, expr, currentExpressions, addedExpressions,
updatedExpressions, unchangedExpressions):
# Add or update the expression.
foundExprName = 0
for expr_i in range(len(currentExpressions)):
if currentExpressions[expr_i][0] == exprName:
foundExprName = 1
if currentExpressions[expr_i][1] == expr:
unchangedExpressions.append(exprName)
break
else:
DeleteExpression(exprName)
DefineScalarExpression(exprName, expr)
updatedExpressions.append(exprName)
break
if foundExprName == 0:
DefineScalarExpression(exprName, expr)
addedExpressions.append(exprName)
###############################################################################
# Function: UpdateExpressions
#
# Purpose: Define various expressions needed to represent the difference
# between corresponding variables in the left and right databases.
#
# First, we get the currently defined expressions and remove any
# that come from the database metadata.
#
# Next, we iterate over all scalar variables defining either conn_
# of pos_ cmfes for their difference. Note: We don't really handle
# the R-PosCMFE(L) case yet.
#
# Next, we iterate over all material objects, defining matvf
# expressions for each material as a scalar and then difference
# expressions for these scalars. Likewise for vector variables.
#
# Finally, we use UpdateThisExpression to ensure we don't re-define
# the same expressions and remove old expressions as we vary time
# states.
#
# Programmer: Brad Whiltlock
# Date: Wed Jul 18 18:12:28 PDT 2007
#
# Modifications:
# Mark C. Miller, Wed Jul 18 18:12:28 PDT 2007
# Made it work on material volume fractions and vectors. Made it handle
# changes in timesteps along with adding new expressions for new variables,
# deleting old expressions and leaving unchanged expressions alone.
#
# Mark C. Miller, Thu Jul 19 21:36:47 PDT 2007
# Inverted loops to identify pre-defined expressions coming from md.
#
# Mark C. Miller, Tue Aug 21 11:17:20 PDT 2007
# Added support for difference summary mode
#
# Mark C. Miller, Thu Dec 3 20:53:21 PST 2009
# Apply patch from Cihan Altinay for typo of 'numDims' on vector var
# metadata to 'varDims'
###############################################################################
def UpdateExpressions(mdl, mdr):
global forcePosCMFE
global cmfeMode
global diffVars
if diffSummaryOnly == 0:
print "Defining expressions for state %d"%currentTimeState
cmfeModeNew = 0
diffVarsNew = []
addedExpressions = []
updatedExpressions = []
deletedExpressions = []
unchangedExpressions = []
currentExpressionsTmp = Expressions()
currentExpressionsList = []
# remove any pre-defined expressions in currentExpressions
# coming from the metadata
for expr_i in range(len(currentExpressionsTmp)):
foundIt = 0
# Look for it in the left db's metadata
for expr_j in range(mdl.GetExprList().GetNumExpressions()):
if currentExpressionsTmp[expr_i][0] == \
mdl.GetExprList().GetExpressions(expr_j).name:
foundIt = 1
break
if foundIt == 0:
# Look for it in the right db's metadata
for expr_j in range(mdr.GetExprList().GetNumExpressions()):
if currentExpressionsTmp[expr_i][0] == \
mdr.GetExprList().GetExpressions(expr_j).name:
foundIt = 1
break
# If we didn't find it in either left or right dbs md, it is
# NOT a pre-defined expression. So, we can keep it.
if foundIt == 0:
currentExpressionsList.append(currentExpressionsTmp[expr_j])
currentExpressions = tuple(currentExpressionsList)
# Iterate over all the scalar variables in metadata.
for scalar_i1 in range(mdl.GetNumScalars()):
for scalar_i2 in range(mdr.GetNumScalars()):
valid = mdl.GetScalars(scalar_i1).validVariable and \
mdr.GetScalars(scalar_i2).validVariable
namematch = mdl.GetScalars(scalar_i1).name == \
mdr.GetScalars(scalar_i2).name
if valid and namematch:
# Create the expression name.
if mdl.GetScalars(scalar_i1).name[0] == '/':
exprName = "diff" + mdl.GetScalars(scalar_i1).name
else:
exprName = "diff/" + mdl.GetScalars(scalar_i1).name
# The name of the scalar
sName = mdl.GetScalars(scalar_i1).name
qsName = sName
if string.find(qsName, "/") != -1:
qsName = "<" + qsName + ">"
# Determine some properties about the mesh so we can decide
# Whether we'll use conn_cmfe or pos_cmfe.
m1Name = mdl.GetScalars(scalar_i1).meshName
m2Name = mdr.GetScalars(scalar_i2).meshName
nb1 = GetNDomains(mdl, m1Name)
mt1 = GetMeshType(mdl, m1Name)
nb2 = GetNDomains(mdr, m2Name)
mt2 = GetMeshType(mdr, m2Name)
if nb1 == nb2 and mt1 == mt2 and m1Name == m2Name and forcePosCMFE != 1:
expr = "%s - conn_cmfe(<%s:%s>, %s)" % (qsName, dbr, sName, m1Name)
else:
expr = "%s - pos_cmfe(<%s:%s>, %s, 0.)" % (qsName, dbr, sName, m1Name)
cmfeModeNew = 1
diffVarsNew.append(exprName)
UpdateThisExpression(exprName, expr, currentExpressions, addedExpressions,
updatedExpressions, unchangedExpressions)
# Iterate over all the material variables in metadata.
for mat_i1 in range(mdl.GetNumMaterials()):
for mat_i2 in range(mdr.GetNumMaterials()):
matl = mdl.GetMaterials(mat_i1)
matr = mdr.GetMaterials(mat_i2)
valid = matl.validVariable and matr.validVariable
nameMatch = matl.name == matr.name
numMatsMatch = matl.numMaterials == matr.numMaterials
matNamesMatch = matl.materialNames == matr.materialNames
if valid and nameMatch and numMatsMatch and matNamesMatch:
# Determine some properties about the mesh so we can decide
# Whether we'll use conn_cmfe or pos_cmfe.
m1Name = matl.meshName
m2Name = matr.meshName
nb1 = GetNDomains(mdl, m1Name)
mt1 = GetMeshType(mdl, m1Name)
nb2 = GetNDomains(mdr, m2Name)
mt2 = GetMeshType(mdr, m2Name)
for m in range(matl.numMaterials):
# Create the matvf expression for this mat
matName = matl.materialNames[m]
altMatName = matName.replace(" ","_")
matNum = matName.split(' ')[0]
matvfExprName = "matvf_comps/" + altMatName
matvfexpr = "matvf(%s,[%s])"%(matl.name, matNum)
UpdateThisExpression(matvfExprName, matvfexpr, currentExpressions, addedExpressions,
updatedExpressions, unchangedExpressions)
# Create the expression for the difference in matvfs for this mat
exprName = "diff/matvf_comps/" + altMatName
if nb1 == nb2 and mt1 == mt2 and m1Name == m2Name and forcePosCMFE != 1:
expr = "<matvf_comps/%s> - conn_cmfe(<%s:matvf_comps/%s>, %s)" % (altMatName, dbr, altMatName, m1Name)
else:
expr = "<matvf_comps/%s> - pos_cmfe(<%s:matvf_comps/%s>, %s, 0.)" % (altMatName, dbr, altMatName, m1Name)
cmfeModeNew = 1
diffVarsNew.append(exprName)
UpdateThisExpression(exprName, expr, currentExpressions, addedExpressions,
updatedExpressions, unchangedExpressions)
# Iterate over all the vector variables in metadata.
for vec_i1 in range(mdl.GetNumVectors()):
for vec_i2 in range(mdr.GetNumVectors()):
vecl = mdl.GetVectors(vec_i1)
vecr = mdr.GetVectors(vec_i2)
valid = vecl.validVariable and vecr.validVariable
nameMatch = vecl.name == vecr.name
numDimsMatch = vecl.varDim == vecr.varDim
if valid and nameMatch and numDimsMatch:
# Determine some properties about the mesh so we can decide
# Whether we'll use conn_cmfe or pos_cmfe.
m1Name = vecl.meshName
m2Name = vecr.meshName
nb1 = GetNDomains(mdl, m1Name)
mt1 = GetMeshType(mdl, m1Name)
nb2 = GetNDomains(mdr, m2Name)
mt2 = GetMeshType(mdr, m2Name)
for m in range(vecl.varDim):
# Create the expression to extract a component for this vector
compName = vecl.name + "%02d"%m
vecExprName = "vector_comps/" + compName
vecexpr = "%s[%d]"%(vecl.name, m)
UpdateThisExpression(vecExprName, vecexpr, currentExpressions, addedExpressions,
updatedExpressions, unchangedExpressions)
# Create the expression for the difference in components
exprName = "diff/vector_comps/" + compName
if nb1 == nb2 and mt1 == mt2 and m1Name == m2Name and forcePosCMFE != 1:
expr = "<vector_comps/%s> - conn_cmfe(<%s:vector_comps/%s>, %s)" % (compName, dbr, compName, m1Name)
else:
expr = "<vector_comps/%s> - pos_cmfe(<%s:vector_comps/%s>, %s, 0.)" % (compName, dbr, compName, m1Name)
cmfeModeNew = 1
diffVarsNew.append(exprName)
UpdateThisExpression(exprName, expr, currentExpressions, addedExpressions,
updatedExpressions, unchangedExpressions)
# Finally, delete any expressions we aren't using anymore.
for expr_i in range(len(currentExpressions)):
foundExprName = 0
for expr_j in range(len(unchangedExpressions)):
if unchangedExpressions[expr_j] == currentExpressions[expr_i][0]:
foundExprName = 1
break
for expr_j in range(len(updatedExpressions)):
if updatedExpressions[expr_j] == currentExpressions[expr_i][0]:
foundExprName = 1
break
for expr_j in range(len(addedExpressions)):
if addedExpressions[expr_j] == currentExpressions[expr_i][0]:
foundExprName = 1
break
# if foundExprName == 0:
# DeleteExpression(currentExpressions[expr_i][0])
# deletedExpressions.append(currentExpressions[expr_i][0])
# Print out some information about what we did
if diffSummaryOnly == 0:
if len(addedExpressions) > 0:
print " Added %d expressions..."%len(addedExpressions)
for expr_i in range(len(addedExpressions)):
print " %s"%addedExpressions[expr_i]
if len(unchangedExpressions) > 0:
print " Unchanged %d expressioons..."%len(unchangedExpressions)
for expr_i in range(len(unchangedExpressions)):
print " %s"%unchangedExpressions[expr_i]
if len(updatedExpressions) > 0:
print " Updated %d expressions..."%len(updatedExpressions)
for expr_i in range(len(updatedExpressions)):
print " %s"%updatedExpressions[expr_i]
if len(deletedExpressions) > 0:
print " Deleted %d expressions"%len(deletedExpressions)
for expr_i in range(len(deletedExpressions)):
print " %s"%deletedExpressions[expr_i]
print "Finished defining expressions"
cmfeMode = cmfeModeNew
diffVarsNew.sort()
diffVars = diffVarsNew
###############################################################################
# Function: Initialize
#
# Purpose: Setup the initial windows and behavior
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
# Modifications:
# Mark C. Miller, Tue Aug 21 11:17:20 PDT 2007
# Added support for difference summary mode
#
# Mark C. Miller, Tue Aug 28 16:25:05 PDT 2007
# Added logic to return early when in nowin mode; for testing
#
# Brad Whitlock, Wed Feb 3 17:124:23 PST 2010
# Don't use SetWindowLayout because it causes small test baseline images.
#
###############################################################################
def Initialize():
global winDbMap
global cmfeMode
global oldw
global noWinMode
#
# Open left and right database operands
#
if OpenDatabase(dbl) == 0:
print "VisIt could not open ", dbl
sys.exit(3)
if OpenDatabase(dbr) == 0:
print "VisIt could not open ", dbr
sys.exit(3)
#
# Make a 2x2 window layout as follows
# 1: L-CMFE(R) 2: R-CMFE(L) -- only when cmfeMode==1
# 3: L 4: R
SetCloneWindowOnFirstRef(1)
ToggleLockTime()
ToggleLockViewMode()
for i in (0,1,2):
SetActiveWindow(1)
CloneWindow()
SetActiveWindow(1)
SyncTimeStates(0)
# If we were able to create any expressions, let's set up some plots based on the
# first one. That way, we can also set up some annotations.
winDbMap = {1 : dbl, 2 : dbr, 3 : dbl, 4 : dbr}
if len(diffVars) > 0:
theVar = GetDiffVarNames(diffVars[0])
windowsToVars = {1 : theVar[1], 2 : theVar[1], 3 : theVar[0], 4 : theVar[0]}
for win in (1,2,3,4):
SetActiveWindow(win)
DeleteAllPlots()
ActivateDatabase(winDbMap[win])
if win == 2 and cmfeMode == 0:
continue
AddPlot("Pseudocolor", windowsToVars[win])
else:
print "No plots are being set up by default since the databases did not have any scalars in common."
sys.exit(5)
# Set up text annotations.
windowsToAnnots = {1 : "L-ConnCMFE(R)", 2 : "Unused", 3 : "Left-db", 4 : "Right-db"}
if cmfeMode == 1:
windowsToAnnots = {1 : "L-PosCMFE(R)", 2 : "R-PosCMFE(L)", 3 : "Left-db", 4 : "Right-db"}
for win in (1,2,3,4):
if win == 2 and cmfeMode == 0:
continue
SetActiveWindow(win)
annot = CreateAnnotationObject("Text2D")
annot.text = windowsToAnnots[win]
annot.height = 0.03
annot.position = (0.70,0.95)
annot.useForegroundForTextColor = 0
annot.textColor = (255,0,0,255)
annot.fontBold = 1
SetActiveWindow(1)
CreateDatabaseCorrelation("DIFF", (dbl, dbr), 0)
# Open the GUI
if noWinMode == 0:
OpenGUI()
else:
return
SetWindowArea(410,0,1100,1100)
# Register macro only seems to work from window 1
SetActiveWindow(1)
RegisterMacro("DiffSummary", DiffSummary)
RegisterMacro("ToggleMesh", ToggleMesh)
RegisterMacro("ToggleBoundary", ToggleBoundary)
RegisterMacro("SyncWinsL-R", SyncWinsL_R)
RegisterMacro("SyncWinsR-L", SyncWinsR_L)
RegisterMacro("SyncWinsLeft", SyncWinsLeft)
RegisterMacro("SyncWinsRight", SyncWinsRight)
RegisterMacro("SyncTimeL-R", SyncTimeL_R)
RegisterMacro("SyncTimeR-L", SyncTimeR_L)
RegisterMacro("SyncTimeLeft", SyncTimeLeft)
RegisterMacro("SyncTimeRight", SyncTimeRight)
RegisterMacro("ToggleHidePlot0", ToggleHidePlot0)
RegisterMacro("ToggleHidePlot1", ToggleHidePlot1)
RegisterMacro("ToggleHidePlot2", ToggleHidePlot2)
RegisterMacro("ToggleHidePlot3", ToggleHidePlot3)
RegisterMacro("ToggleHidePlot4", ToggleHidePlot4)
RegisterMacro("ToggleHidePlot5", ToggleHidePlot5)
for win in (1,2,3,4):
SetActiveWindow(win)
DrawPlots()
SetActiveWindow(1)
if diffSummaryOnly == 0:
print "Type 'help()' to get more information on using 'visit -diff'"
###############################################################################
# Function: ChangeVar
#
# Purpose: Change the currently plotted variable in all windows
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
# Modifications:
#
# Mark C. Miller, Tue Aug 28 16:25:05 PDT 2007
# Added logic to detect use of var from 'diff/' menu and issue warning
#
###############################################################################
def ChangeVar(new_var):
leadingDiff = re.search("^diff/(.*)", new_var)
if leadingDiff != None:
print "Passed variable from 'diff/' menu to ChangeVar()."
print "Pass only the original name of the variable to ChangeVar()."
print "Removing leading 'diff/' and using name \"%s\""%leadingDiff.group(1)
new_var = leadingDiff.group(1)
varType = GetVarType(mdl, new_var)
if varType == "Unknown":
print "Unable to find variable type for variable \"%s\""%new_var
return
for win in (1,2,3,4):
if win == 2 and cmfeMode == 0:
continue
SetActiveWindow(win)
plotToChange = -1
pl = GetPlotList()
for p in range(pl.GetNumPlots()):
plotType = pl.GetPlots(p).plotType
plotTypeName = PlotPlugins()[plotType]
if varType == "Material" and \
(plotTypeName == "Boundary" or \
plotTypeName == "FilledBoundary"):
plotToChange = p
elif varType == "Scalar" and \
(plotTypeName == "Contour" or \
plotTypeName == "Histogram" or \
plotTypeName == "Pseudocolor" or \
plotTypeName == "Spreadsheet" or \
plotTypeName == "Surface" or \
plotTypeName == "Volume"):
plotToChange = p
elif varType == "Vector" and \
(plotTypeName == "Streamline" or \
plotTypeName == "Vector" or \
plotTypeName == "Truecolor"):
plotToChange = p
elif varType == plotTypeName:
plotToChange = p
if plotToChange != -1:
break
if plotToChange != -1:
SetActivePlots((p,))
if win == 1:
ChangeActivePlotsVar("diff/%s"%new_var);
else:
ChangeActivePlotsVar(new_var);
else:
print "Unable to find an existing plot compatible with the variable \"%s\""%new_var
SetActiveWindow(1)
###############################################################################
# Function: HideAllUnHiddenPlots
#
# Purpose: Hides all plots that are currently NOT hidden in the specified
# window
#
# Programmer: Mark C. Miller
# Date: Mon Aug 27 16:58:29 PDT 2007
#
###############################################################################
def HideAllUnHiddenPlots(winId):
SetActiveWindow(winId)
pl = GetPlotList()
plotsToHide = []
for p in range(pl.GetNumPlots()):
plot = pl.GetPlots(p)
if plot.hiddenFlag == 0:
plotsToHide.append(p)
SetActivePlots(tuple(plotsToHide))
HideActivePlots()
return tuple(plotsToHide)
###############################################################################
# Function: UnHideAllUnHiddenPlots
#
# Purpose: Undoes the effect of HideAllUnHiddenPlots.
#
# Programmer: Mark C. Miller
# Date: Mon Aug 27 16:58:29 PDT 2007
#
###############################################################################
def UnHideAllUnHiddenPlots(winId, plotsToUnHide):
SetActiveWindow(winId)
SetActivePlots(plotsToUnHide)
HideActivePlots()
###############################################################################
# Function: ToggleHidePlot
#
# Purpose: Toggle hiding a specified plot id
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def ToggleHidePlot(plotId):
# determine target of the toggle (to hide or unhide)
hiddenTarget = 0
for win in (1,2,3,4):
if win == 2 and cmfeMode == 0:
continue
SetActiveWindow(win)
plotList = GetPlotList()
if plotId >= plotList.GetNumPlots():
print "Plot id %d is out of range 0...%d"%(plotId,plotList.GetNumPlots()-1)
return
if plotList.GetPlots(plotId).hiddenFlag == 1:
hiddenTarget = hiddenTarget - 1
else:
hiddenTarget = hiddenTarget + 1
# At this point, if hiddenTarget is largely negative, the target
# state is to UNhide the plots, else hide the plots
for win in (1,2,3,4):
if win == 2 and cmfeMode == 0:
continue
SetActiveWindow(win)
plotList = GetPlotList()
if plotList.GetPlots(plotId).hiddenFlag == 1:
if hiddenTarget <= 0:
SetActivePlots((plotId,))
HideActivePlots()
else:
if hiddenTarget > 0:
SetActivePlots((plotId,))
HideActivePlots()
SetActiveWindow(1)
def ToggleHidePlot0():
ToggleHidePlot(0)
def ToggleHidePlot1():
ToggleHidePlot(1)
def ToggleHidePlot2():
ToggleHidePlot(2)
def ToggleHidePlot3():
ToggleHidePlot(3)
def ToggleHidePlot4():
ToggleHidePlot(4)
def ToggleHidePlot5():
ToggleHidePlot(5)
###############################################################################
# Function: TogglePlot
#
# Purpose: Toggle a specified plot type on/off
#
# Determine all <plotTypeName> plots to be displayed or hidden based on
# the plot variables currently in window 1. First, find all the
# plots that are <plotTypeName> plots and record their hidden state in
# the plotInfo map. Next, find all the plots that are not <plotTypeName>,
# and see if the associated <plotTypeName> for those plots is already in
# the plotInfo map. If it is, then that variable's <plotTypeName> is already
# present and its status is recorded. Otherwise, that variable's
# <plotTypeName> gets added to the plotInfo map with a status of 0 (!exist)
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def TogglePlot(plotTypeName):
for win in (1,2,3,4):
if win == 2 and cmfeMode == 0:
continue
plotInfo = {}
SetActiveWindow(win)
pl = GetPlotList()
for p in range(pl.GetNumPlots()):
plot = pl.GetPlots(p)
if PlotPlugins()[plot.plotType] == plotTypeName:
plotName = plot.plotVar
try:
plotName = re.search("diff/(.*)",plot.plotVar).group(1)
except:
try:
plotName = re.search("(.*)",plot.plotVar).group(1)
except:
plotName = plot.plotVar
if plot.hiddenFlag == 1:
plotInfo[plotName] = (1, p) # exists and is hidden
else:
plotInfo[plotName] = (2, p) # exists and is displayed
#
# Second pass for the non-<plotTypeName> plots. Will determine only
# <plotTypeName> plots that need to be added.
#
for p in range(pl.GetNumPlots()):
plot = pl.GetPlots(p)
if PlotPlugins()[plot.plotType] != plotTypeName:
varName = plot.plotVar
try:
varName = re.search("diff/(.*)",plot.plotVar).group(1)
except:
try:
varName = re.search("(.*)",plot.plotVar).group(1)
except:
varName = plot.plotVar
plotName ="Unknown"
if plotTypeName == "Mesh":
plotName = MeshForVar(mdl,varName)
elif plotTypeName == "Boundary":
plotName = MeshForVar(mdl,varName)
plotName = MatForMesh(mdl,plotName)
if plotName == "Unknown":
continue
if plotName not in plotInfo:
plotInfo[plotName] = (0, p)
#
# At this point, plotInfo is populated with the names of all the <plotTypeName>
# plots and whether they are currently non-existant (0), hidden (1) or
# displayed (2) along with their index (p) in the plot list. So, now,
# we determine the target state of the TogglePlot command. Should the
# <plotTypeName> plot(s) be on (that is exist and displayed) or off (not-exist
# or hidden)? In general, the situation can be mixed at this point and
# so we determine based on majority status
#
if win == 1:
targetState = 0
for m in plotInfo.keys():
if plotInfo[m][0] == 0 or plotInfo[m][0] == 1:
targetState = targetState + 1
else:
targetState = targetState - 1
#
# First handle toggling of existing plots (hidden or displayed)
#
plotsToToggle = []
for m in plotInfo.keys():
if targetState > 0 and plotInfo[m][0] == 1:
plotsToToggle.append(plotInfo[m][1])
if targetState <= 0 and plotInfo[m][0] == 2:
plotsToToggle.append(plotInfo[m][1])
if len(plotsToToggle) > 0:
SetActivePlots(tuple(plotsToToggle))
HideActivePlots()
#
# Now handle adding new <plotTypeName> plots if needed
#
if targetState > 0:
for m in plotInfo.keys():
if plotInfo[m][0] == 0:
AddPlot(plotTypeName, m)
DrawPlots()
SetActiveWindow(1)
def ToggleMesh():
TogglePlot("Mesh")
def ToggleBoundary():
TogglePlot("Boundary")
###############################################################################
# Function: MinimizePickOutput
#
# Purpose: Reduce output generated by pick on stdout to bare minimum for
# PickLoop function.
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def MinimizePickOutput():
global pa_orig
SuppressQueryOutputOn()
pa_orig = GetPickAttributes()
pa = pa_orig
pa.displayIncidentElements = 0
pa.showNodeId = 0
pa.showTimeStep = 0
pa.showMeshName = 0
pa.showZoneId = 0
pa.displayPickLetter = 1
SetPickAttributes(pa)
###############################################################################
# Function: UnMinimizePickOutput
#
# Purpose: Undue the reduction in pick output made by MinimizePickOutput.
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def UnMinimizePickOutput():
global pa_orig
SetPickAttributes(pa_orig)
SuppressQueryOutputOff()
###############################################################################
# Function: PickLoop
#
# Purpose: Perform a zone or node pick over a specified tuple of element ids.
# Also, handle case where user may have added variables to the
# PickAttributes to be returned during picking. Report the output in
# a useful tabular form.
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def PickLoop(ids, pickType):
for win in (1,2,3,4):
if win == 2 and cmfeMode == 0:
continue
SetActiveWindow(win)
ClearPickPoints()
ResetPickLetter()
s = ["","",""]
MinimizePickOutput()
npicks = 1
for win in (1,2,3,4):
if win == 2 and cmfeMode == 0:
continue
SetActiveWindow(win)
for id in ids:
if pickType == "zonal":
PickByZone(id)
else:
PickByNode(id)
tmp = GetPickOutput()
picks = []
if win == 1:
picks = re.findall("diff/(.*): *<(zonal|nodal)> = ([0-9.e+\-]*)\s*",tmp)
npicks = len(picks)
for p in range(len(picks)):
s[win-1] = s[win-1] + "%s=%s"%(picks[p][0], picks[p][2]) + ";"
else:
picks = re.findall("(.*): *<(zonal|nodal)> = ([0-9.e+\-]*)\s*",tmp)
for p in range(len(picks)):
s[win-1] = s[win-1] + "%s"%picks[p][2] + ";"
dpicks = s[0].split(";")
lpicks = s[2].split(";")
rpicks = s[3].split(";")
result = " id | var | DIFF | dbLeft | dbRight \n"
result = result + "---------|------------------|------------------|------------------|------------------\n"
k = 0
for id in ids:
for p in range(npicks):
dsplit = dpicks[k].split("=")
result = result + "% 9d|% 18s|% 18s|% 18s|% 18s\n"%(id,dsplit[0],dsplit[1],lpicks[k],rpicks[k])
k = k + 1
# Disabled for now: winds up poorly formatting the message
# ClientMethod("MessageBoxOk", result)
print result
SetActiveWindow(1)
UnMinimizePickOutput()
def ZPick(zoneIds):
PickLoop(zoneIds, "zonal")
def NPick(nodeIds):
PickLoop(nodeIds, "nodal")
###############################################################################
# Function: SyncWindows
#
# Purpose: Bring all the windows up to date with contents of the specified
# source window. This is done by deleting all the other windows and
# re-cloning them from the source. Although this is costly, it is
# the only easy way to ensure that all plots, operators, lighting,
# etc., are consistent.
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
# Modifications:
# Cyrus Harrison, Mon May 16 09:15:21 PDT 2011
# Update argument passed to GetAnnotationObject().
#
###############################################################################
def SyncWindows(srcWin):
global dbr
global dbl
#
# Get List of active plots
#
activePlotsList = []
hiddenPlotsList = []
SetActiveWindow(srcWin)
srcPlots = GetPlotList()
for p in range(srcPlots.GetNumPlots()):
if srcPlots.GetPlots(p).activeFlag == 1:
activePlotsList.append(p)
if srcPlots.GetPlots(p).hiddenFlag == 1:
hiddenPlotsList.append(p)
#
# Delete the old windows so we can re-clone them
#
for win in (1,2,3,4):
if win == 2 and cmfeMode == 0:
continue
if win == srcWin:
continue
SetActiveWindow(win)
DeleteWindow()
#
# Clone the src window and adjust variable names
#
for win in (1,2,3,4):
if win == 2 and cmfeMode == 0:
continue
if win == srcWin:
continue
SetActiveWindow(srcWin)
CloneWindow()
SetActiveWindow(win)
# re-set the annotations
ao = GetAnnotationObject(GetAnnotationObjectNames()[1])
if win == 1:
ReplaceDatabase(dbl)
if cmfeMode == 0:
ao.text = "L-ConnCMFE(R)"
else:
ao.text = "L-PosCMFE(R)"
elif win == 2:
ReplaceDatabase(dbr)
if cmfeMode == 0:
ao.text = "Unused"
else:
ao.text = "R-PosCMFE(L)"
elif win == 3:
ReplaceDatabase(dbl)
ao.text = "Left-db"
elif win == 4:
ReplaceDatabase(dbr)
ao.text = "Right-db"
ao.position = (0.7, 0.95)
# reset the plot variables
plots = GetPlotList()
for p in range(plots.GetNumPlots()):
pv = plots.GetPlots(p).plotVar
if IsNotScalarVarPlotType(plots.GetPlots(p).plotType):
continue
theVar = GetDiffVarNames(pv)
if win == 1 and pv == theVar[0]:
print "Warning: Looks like you are not displaying a diff variable in the DIFF window"
SetActivePlots((p,))
if win == 1:
ChangeActivePlotsVar(theVar[1])
else:
ChangeActivePlotsVar(theVar[0])
DrawPlots()
hiddenPlotsTmp = tuple(hiddenPlotsList)
if len(hiddenPlotsTmp) > 0:
SetActivePlots(tuple(hiddenPlotsList))
HideActivePlots()
SetActivePlots(tuple(activePlotsList))
SetActiveWindow(srcWin)
###############################################################################
# Function: SyncWins...
#
# Purpose: Stubs to register as macros
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def SyncWinsL_R():
SyncWindows(1)
def SyncWinsR_L():
SyncWindows(2)
def SyncWinsLeft():
SyncWindows(3)
def SyncWinsRight():
SyncWindows(4)
###############################################################################
# Function: CompareMinMaxInfos
#
# Purpose: Sorter function for sorting output from DiffSummary
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def CompareMinMaxInfos(a1, a2):
v1min = abs(a1[1])
v1max = abs(a1[5])
v2min = abs(a2[1])
v2max = abs(a2[5])
v1 = v1min
if v1max > v1min:
v1 = v1max
v2 = v2min
if v2max > v2min:
v2 = v2max
if v1 < v2:
return 1
elif v1 > v2:
return -1
else:
return 0
###############################################################################
# Function: DiffSummary
#
# Purpose: Iterate over all variables in diffVars and report differences.
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
# Modifications:
# Mark C. Miller, Tue Aug 21 10:03:35 PDT 2007
# Added calls to disable re-draws and then re-enable to accelerate
#
# Mark C. Miller, Tue Aug 21 11:17:20 PDT 2007
# Added support for difference summary mode
#
# Mark C. Miller, Mon Aug 27 17:00:24 PDT 2007
# Added calls to Hide/UnHide all unhidden plots so we don't get a
# "plot dimensions don't match" error message from VisIt when displaying
# each variable in the list.
#
# Mark C. Miller, Tue Aug 28 16:25:05 PDT 2007
# Added return of result string to facilitate testing.
#
###############################################################################
def DiffSummary():
SetActiveWindow(1)
plotsToUnHide = HideAllUnHiddenPlots(1)
DisableRedraw()
SuppressQueryOutputOn()
diffSummary = []
resultStr=""
for v in diffVars:
vname = re.search("diff/(.*)",v)
if vname != None:
vname = vname.group(1)
else:
vname = v
if diffSummaryOnly == 1:
print "Processing variable \"%s\""%v
AddPlot("Pseudocolor", v)
DrawPlots()
Query("MinMax")
qo = GetQueryOutputString()
qv = GetQueryOutputValue()
mininfo = re.search("Min = ([0-9.e+\-]*) \((node|zone) ([0-9]*) (in domain ([0-9]*) at|at())",qo)
maxinfo = re.search("Max = ([0-9.e+\-]*) \((node|zone) ([0-9]*) (in domain ([0-9]*) at|at())",qo)
# val node|zone elem-id dom-id
# 0 1/5 2/6 3/7 4/8
if mininfo != None and maxinfo != None:
diffSummary.append( \
(vname[-12:], qv[0], mininfo.group(2), mininfo.group(3), mininfo.group(5), \
qv[1], maxinfo.group(2), maxinfo.group(3), maxinfo.group(5)))
else:
diffSummary.append((vname[-12:], 0.0, "Unknown", "Unknown", "Unknown", \
0.0, "Unknown", "Unknown", "Unknown"))
#time.sleep(0.5)
DeleteActivePlots()
SuppressQueryOutputOff()
print "Difference Summary sorted in decreasing difference magnitude...\n"
print "NOTE: Differences are computed in only single precision"
print " var |max -diff | max -elem ; -dom |max +diff | max +elem ; +dom |"
print "------------|------------|--------------------|------------|--------------------|"
diffSummary.sort(CompareMinMaxInfos)
for k in range(len(diffSummary)):
if diffSummary[k][1] == 0.0 and diffSummary[k][5] == 0.0:
print "% 12.12s| NO DIFFERENCES"%diffSummary[k][0]
resultStr = resultStr + "% 12.12s| NO DIFFERENCES\n"%diffSummary[k][0]
else:
print "% 12.12s|%+12.7f|%4s % 7s;% 7s|%+12.7f|%4s % 7s;% 7s|"%diffSummary[k]
resultStr = resultStr + "% 12.12s|%+12.7f|%4s % 7s;% 7s|%+12.7f|%4s % 7s;% 7s|\n"%diffSummary[k]
UnHideAllUnHiddenPlots(1, plotsToUnHide)
RedrawWindow()
return resultStr
###############################################################################
# Main program and global variables
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
# Modifications:
# Mark C. Miller, Tue Aug 21 11:17:20 PDT 2007
# Added support for difference summary mode
###############################################################################
diffVars = []
dbl = "notset"
dbr = "notset"
mdl = 0
mdr = 0
forcePosCMFE = 0
diffSummaryOnly = 0
cmfeMode = 0
currentTimeState = -1
noWinMode = 0
ProcessCLArgs()
Initialize()
if diffSummaryOnly == 1:
DiffSummary()
sys.exit()
| 37.570147
| 129
| 0.564462
|
ce0c39c8ce02953e9ad51bf9f31dd13082d8c747
| 8,992
|
py
|
Python
|
agent.py
|
workofart/brawlstars-ai
|
df5f7855bf0c0bfe68d2eaf4a9e3a916c7d7189c
|
[
"MIT"
] | 12
|
2020-01-10T16:08:26.000Z
|
2022-02-03T12:10:59.000Z
|
agent.py
|
workofart/brawlstars-ai
|
df5f7855bf0c0bfe68d2eaf4a9e3a916c7d7189c
|
[
"MIT"
] | 1
|
2020-07-09T01:48:17.000Z
|
2020-07-21T18:22:37.000Z
|
agent.py
|
workofart/brawlstars-ai
|
df5f7855bf0c0bfe68d2eaf4a9e3a916c7d7189c
|
[
"MIT"
] | 5
|
2021-02-21T08:44:17.000Z
|
2022-02-03T12:11:02.000Z
|
import tensorflow as tf
import random, time
import numpy as np
from experiencebuffer import Experience_Buffer
from net.dqnet import DQN_NNET
from utilities.utilities import take_action, get_latest_run_count
from utilities.window import WindowMgr
# Hyper Parameters for DQN
LEARNING_RATE = 1e-3
INITIAL_EPSILON = 0.7 # starting value of epsilon
FINAL_EPSILON = 0.05 # ending value of epislon
DECAY = 0.993 # epsilon decay
GAMMA = 0.90 # discount factor for q value
UPDATE_TARGET_NETWORK = 2
SAVE_NETWORK = 3
w = WindowMgr()
class BrawlAgent:
def __init__(self, env):
# init some parameters
self.epsilon = INITIAL_EPSILON
self.final_epsilon = FINAL_EPSILON
self.env = env
self.replay_buffer = Experience_Buffer()
self.state_dim = env.observation_space.shape[1] # TODO, need to define a structure
self.action_dim = len(env.action_space)
self.movement_dim = len(env.movement_space)
self.learning_rate = LEARNING_RATE
self.update_target_net_freq = UPDATE_TARGET_NETWORK # how many timesteps to update target network params
self.is_updated_target_net = False
self.isTest = False
# Action Q_networks
self.a_network = DQN_NNET(self.state_dim, self.action_dim, self.learning_rate, 'action_q_network')
self.a_target_network = DQN_NNET(self.state_dim, self.action_dim, self.learning_rate, 'action_target_q_network')
# Movement Q_networks
self.m_network = DQN_NNET(self.state_dim, self.movement_dim, self.learning_rate, 'movement_q_network')
self.m_target_network = DQN_NNET(self.state_dim, self.movement_dim, self.learning_rate, 'movement_target_q_network')
# Init session
# self.session = tf.InteractiveSession()
self.session = tf.get_default_session()
self.session.run(tf.initializers.global_variables())
# # Tensorboard
self.summary_writer = tf.summary.FileWriter('logs/' + str(get_latest_run_count()))
self.summary_writer.add_graph(self.session.graph)
# loading networks
self.saver = tf.train.Saver()
checkpoint = tf.train.get_checkpoint_state('logs/' + str(get_latest_run_count()-2) + "/saved_networks")
if (checkpoint and checkpoint.model_checkpoint_path):
self.saver.restore(self.session, checkpoint.model_checkpoint_path)
print("Successfully loaded:", checkpoint.model_checkpoint_path)
else:
print("Could not find old network weights")
def act(self, state):
# if self.isTrain is True and self.epsilon > FINAL_EPSILON:
# self.epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / self.env.data_length
if random.random() <= self.epsilon and self.isTest is not True:
action = random.randint(0, self.action_dim - 1)
movement = random.randint(0, self.movement_dim - 1)
else:
a_output = self.a_network.output.eval(feed_dict = {
self.a_network.state_input:state
})[0]
action = np.argmax(a_output)
m_output = self.m_network.output.eval(feed_dict = {
self.m_network.state_input:state
})[0]
movement = np.argmax(m_output)
# print('Selected Action: {0}'.format(action))
# print('Selected Movement: {0}'.format(movement))
w.find_window_wildcard("雷电模拟器")
w.set_foreground()
take_action(movement, action)
return [movement, action]
def perceive(self, state, action, reward, next_state, done):
# Assumes "replay_buffer" contains [state, movement, action, reward, next_state, done]
one_hot_movement = np.zeros(self.movement_dim)
one_hot_movement[action[0]] = 1
one_hot_action = np.zeros(self.action_dim)
one_hot_action[action[1]] = 1
self.replay_buffer.add([state, one_hot_movement, one_hot_action, reward, next_state, done])
def update_target_q_net_if_needed(self, step):
if step % self.update_target_net_freq == 0 and step > 0 and self.is_updated_target_net is False:
# Get the parameters of our DQNNetwork
m_from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "m_q_network")
# Get the parameters of our Target_network
m_to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "m_target_q_network")
# Get the parameters of our DQNNetwork
a_from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "a_q_network")
# Get the parameters of our Target_network
a_to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "a_target_q_network")
op_holder = []
# Update our target_q_network parameters with q_network parameters
for from_var,to_var in zip(m_from_vars,m_to_vars):
op_holder.append(to_var.assign(from_var))
for from_var,to_var in zip(a_from_vars,a_to_vars):
op_holder.append(to_var.assign(from_var))
self.session.run(op_holder)
self.is_updated_target_net = True
print('Timesteps:{} | Target Q-network has been updated.'.format(self.env.time_step))
def train_dqn_network(self, ep, batch_size=32):
self.update_target_q_net_if_needed(ep)
# Assumes "replay_samples" contains [state, movement, action, reward, next_state, done]
replay_samples = self.replay_buffer.sample(batch_size)
state_batch = np.reshape([data[0] for data in replay_samples], (batch_size, self.state_dim))
movement_batch = np.reshape([data[1] for data in replay_samples], (batch_size, self.movement_dim))
action_batch = np.reshape([data[2] for data in replay_samples], (batch_size, self.action_dim))
reward_batch = np.reshape([data[3] for data in replay_samples], (batch_size, 1))
next_state_batch = np.reshape([data[4] for data in replay_samples], (batch_size, self.state_dim))
# Get the Target Q-value for the next state using the target network,
# by making a second forward-prop
m_target_q_val_batch = self.session.run(self.m_target_network.output, feed_dict={self.m_target_network.state_input:next_state_batch})
a_target_q_val_batch = self.session.run(self.a_target_network.output, feed_dict={self.a_target_network.state_input:next_state_batch})
# Get Q values for next state using the q-network
m_q_val_batch = self.session.run(self.m_network.output, feed_dict={self.m_network.state_input:next_state_batch})
a_q_val_batch = self.session.run(self.a_network.output, feed_dict={self.a_network.state_input:next_state_batch})
# Target Q-value - "advantages/q-vals" derived from rewards
m_y_batch = []
a_y_batch = []
for i in range(0, batch_size):
# Use Q-network to select the best action for next state
movement = np.argmax(m_q_val_batch[i])
action = np.argmax(a_q_val_batch[i])
done = replay_samples[i][5]
if done:
m_y_batch.append(reward_batch[i])
a_y_batch.append(reward_batch[i])
else:
m_y_batch.append(reward_batch[i] + GAMMA * m_target_q_val_batch[i][movement])
a_y_batch.append(reward_batch[i] + GAMMA * a_target_q_val_batch[i][action])
# Train on one batch on the Q-network
start_time = time.time()
_, m_c, m_summary = self.session.run([self.m_network.optimizer, self.m_network.cost, self.m_network.merged_summary],
# _, m_c = self.session.run([self.m_network.optimizer, self.m_network.cost],
feed_dict={
self.m_network.Q_input: np.reshape(m_y_batch, (batch_size, 1)),
self.m_network.action_input: movement_batch,
self.m_network.state_input: state_batch
}
)
_, a_c, a_summary = self.session.run([self.a_network.optimizer, self.a_network.cost, self.a_network.merged_summary],
# _, a_c = self.session.run([self.a_network.optimizer, self.a_network.cost],
feed_dict={
self.a_network.Q_input: np.reshape(a_y_batch, (batch_size, 1)),
self.a_network.action_input: action_batch,
self.a_network.state_input: state_batch
}
)
# print('Training time: ' + str(time.time() - start_time))
self.summary_writer.add_summary(m_summary, ep)
self.summary_writer.add_summary(a_summary, ep)
# save network 9 times per episode
if ep % SAVE_NETWORK == 0:
self.saver.save(self.session, 'logs/' + str(get_latest_run_count()-1) +'/saved_networks/' + 'network' + '-dqn', global_step = ep)
return m_c, a_c
| 48.344086
| 141
| 0.658474
|
85b53f9bf5a613e008af6b1cf773c3ab6d0fa1c6
| 41,526
|
py
|
Python
|
test/integration/test_integration_basics.py
|
zhaomoy/nighthawk
|
8e8d925f193d1787dff31b4d401745505803641b
|
[
"Apache-2.0"
] | null | null | null |
test/integration/test_integration_basics.py
|
zhaomoy/nighthawk
|
8e8d925f193d1787dff31b4d401745505803641b
|
[
"Apache-2.0"
] | null | null | null |
test/integration/test_integration_basics.py
|
zhaomoy/nighthawk
|
8e8d925f193d1787dff31b4d401745505803641b
|
[
"Apache-2.0"
] | null | null | null |
"""Tests Nighthawk's basic functionality."""
import json
import logging
import math
import os
import pytest
import subprocess
import sys
import time
from threading import Thread
from test.integration.common import IpVersion
from test.integration.integration_test_fixtures import (
http_test_server_fixture, https_test_server_fixture, https_test_server_fixture,
multi_http_test_server_fixture, multi_https_test_server_fixture, quic_test_server_fixture,
server_config, server_config_quic)
from test.integration import asserts
from test.integration import utility
# TODO(oschaaf): we mostly verify stats observed from the client-side. Add expectations
# for the server side as well.
@pytest.mark.skipif(utility.isSanitizerRun(), reason="Unstable and very slow in sanitizer runs")
def test_http_h1(http_test_server_fixture):
"""Test http1 over plain http.
Runs the CLI configured to use plain HTTP/1 against our test server, and sanity
checks statistics from both client and server.
"""
parsed_json, _ = http_test_server_fixture.runNighthawkClient([
http_test_server_fixture.getTestServerRootUri(), "--duration", "100",
"--termination-predicate", "benchmark.http_2xx:24"
])
counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 1)
asserts.assertCounterEqual(counters, "upstream_cx_rx_bytes_total", 3400)
asserts.assertCounterEqual(counters, "upstream_cx_total", 1)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total", 500)
asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1)
asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
asserts.assertCounterEqual(counters, "default.total_match_count", 1)
global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(parsed_json)
asserts.assertEqual(int(global_histograms["benchmark_http_client.response_body_size"]["count"]),
25)
asserts.assertEqual(int(global_histograms["benchmark_http_client.response_header_size"]["count"]),
25)
asserts.assertEqual(
int(global_histograms["benchmark_http_client.response_body_size"]["raw_mean"]), 10)
asserts.assertEqual(
int(global_histograms["benchmark_http_client.response_header_size"]["raw_mean"]), 97)
asserts.assertEqual(int(global_histograms["benchmark_http_client.response_body_size"]["raw_min"]),
10)
asserts.assertEqual(
int(global_histograms["benchmark_http_client.response_header_size"]["raw_min"]), 97)
asserts.assertEqual(int(global_histograms["benchmark_http_client.response_body_size"]["raw_max"]),
10)
asserts.assertEqual(
int(global_histograms["benchmark_http_client.response_header_size"]["raw_max"]), 97)
asserts.assertEqual(
int(global_histograms["benchmark_http_client.response_body_size"]["raw_pstdev"]), 0)
asserts.assertEqual(
int(global_histograms["benchmark_http_client.response_header_size"]["raw_pstdev"]), 0)
asserts.assertEqual(len(counters), 12)
def _mini_stress_test(fixture, args):
# run a test with more rps then we can handle, and a very small client-side queue.
# we should observe both lots of successfull requests as well as time spend in blocking mode.,
parsed_json, _ = fixture.runNighthawkClient(args)
counters = fixture.getNighthawkCounterMapFromJson(parsed_json)
# We set a reasonably low expectation of 100 requests. We set it low, because we want this
# test to succeed on a reasonable share of setups (hopefully practically all).
MIN_EXPECTED_REQUESTS = 100
asserts.assertCounterEqual(counters, "benchmark.http_2xx", MIN_EXPECTED_REQUESTS)
if "--h2" in args:
asserts.assertCounterEqual(counters, "upstream_cx_http2_total", 1)
else:
asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 1)
global_histograms = fixture.getNighthawkGlobalHistogramsbyIdFromJson(parsed_json)
if "--open-loop" in args:
asserts.assertEqual(int(global_histograms["sequencer.blocking"]["count"]), 0)
else:
asserts.assertGreaterEqual(int(global_histograms["sequencer.blocking"]["count"]), 1)
asserts.assertGreaterEqual(
int(global_histograms["benchmark_http_client.request_to_response"]["count"]), 1)
asserts.assertGreaterEqual(int(global_histograms["benchmark_http_client.latency_2xx"]["count"]),
1)
return counters
# The mini stress tests below are executing in closed-loop mode. As we guard the pool against
# overflows, we can set fixed expectations with respect to overflows and anticipated pending
# totals.
def test_http_h1_mini_stress_test_with_client_side_queueing(http_test_server_fixture):
"""Run a max rps test with the h1 pool against our test server, using a small client-side queue."""
counters = _mini_stress_test(http_test_server_fixture, [
http_test_server_fixture.getTestServerRootUri(), "--rps", "999999", "--max-pending-requests",
"10", "--connections", "1", "--duration", "100", "--termination-predicate",
"benchmark.http_2xx:99", "--simple-warmup"
])
asserts.assertCounterGreaterEqual(counters, "upstream_rq_pending_total", 11)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_overflow", 10)
def test_http_h1_mini_stress_test_without_client_side_queueing(http_test_server_fixture):
"""Run a max rps test with the h1 pool against our test server, with no client-side queueing."""
counters = _mini_stress_test(http_test_server_fixture, [
http_test_server_fixture.getTestServerRootUri(), "--rps", "999999", "--connections", "1",
"--duration", "100", "--termination-predicate", "benchmark.http_2xx:99"
])
asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1)
asserts.assertNotIn("upstream_cx_overflow", counters)
def test_http_h2_mini_stress_test_with_client_side_queueing(http_test_server_fixture):
"""Run a max rps test with the h2 pool against our test server, using a small client-side queue."""
counters = _mini_stress_test(http_test_server_fixture, [
http_test_server_fixture.getTestServerRootUri(), "--rps", "999999", "--max-pending-requests",
"10", "--h2", "--max-active-requests", "1", "--connections", "1", "--duration", "100",
"--termination-predicate", "benchmark.http_2xx:99", "--simple-warmup"
])
asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1)
asserts.assertCounterGreaterEqual(counters, "upstream_rq_pending_overflow", 10)
def test_http_h2_mini_stress_test_without_client_side_queueing(http_test_server_fixture):
"""Run a max rps test with the h2 pool against our test server, with no client-side queueing."""
counters = _mini_stress_test(http_test_server_fixture, [
http_test_server_fixture.getTestServerRootUri(), "--rps", "999999", "--h2",
"--max-active-requests", "1", "--connections", "1", "--duration", "100",
"--termination-predicate", "benchmark.http_2xx:99"
])
asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1)
asserts.assertNotIn("upstream_rq_pending_overflow", counters)
@pytest.mark.skipif(not utility.isRunningInCircleCi(),
reason="Has very high failure rate in local executions.")
@pytest.mark.skipif(utility.isSanitizerRun(), reason="Unstable and very slow in sanitizer runs")
def test_http_h1_mini_stress_test_open_loop(http_test_server_fixture):
"""Run an H1 open loop stress test. We expect higher pending and overflow counts."""
counters = _mini_stress_test(http_test_server_fixture, [
http_test_server_fixture.getTestServerRootUri(), "--rps", "10000", "--max-pending-requests",
"1", "--open-loop", "--max-active-requests", "1", "--connections", "1", "--duration", "100",
"--termination-predicate", "benchmark.http_2xx:99", "--simple-warmup"
])
# we expect pool overflows
asserts.assertCounterGreater(counters, "benchmark.pool_overflow", 10)
@pytest.mark.skipif(not utility.isRunningInCircleCi(),
reason="Has very high failure rate in local executions.")
@pytest.mark.skipif(utility.isSanitizerRun(), reason="Unstable and very slow in sanitizer runs")
def test_http_h2_mini_stress_test_open_loop(http_test_server_fixture):
"""Run an H2 open loop stress test. We expect higher overflow counts."""
counters = _mini_stress_test(http_test_server_fixture, [
http_test_server_fixture.getTestServerRootUri(), "--rps", "10000", "--max-pending-requests",
"1", "--h2", "--open-loop", "--max-active-requests", "1", "--duration", "100",
"--termination-predicate", "benchmark.http_2xx:99", "--simple-warmup"
])
# we expect pool overflows
asserts.assertCounterGreater(counters, "benchmark.pool_overflow", 10)
def test_http_h2(http_test_server_fixture):
"""Test h2 over plain http.
Runs the CLI configured to use h2c against our test server, and sanity
checks statistics from both client and server.
"""
parsed_json, _ = http_test_server_fixture.runNighthawkClient([
"--h2",
http_test_server_fixture.getTestServerRootUri(), "--max-active-requests", "1", "--duration",
"100", "--termination-predicate", "benchmark.http_2xx:24", "--rps", "100"
])
counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
asserts.assertCounterEqual(counters, "upstream_cx_http2_total", 1)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_rx_bytes_total", 1030)
asserts.assertCounterEqual(counters, "upstream_cx_total", 1)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total", 403)
asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1)
asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
asserts.assertCounterEqual(counters, "default.total_match_count", 1)
asserts.assertEqual(len(counters), 12)
def test_http_concurrency(http_test_server_fixture):
"""Test that concurrency acts like a multiplier."""
parsed_json, _ = http_test_server_fixture.runNighthawkClient([
"--concurrency 4 --rps 100 --connections 1", "--duration", "100", "--termination-predicate",
"benchmark.http_2xx:24",
http_test_server_fixture.getTestServerRootUri()
])
counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
# Quite a loose expectation, but this may fluctuate depending on server load.
# Ideally we'd see 4 workers * 5 rps * 5s = 100 requests total
asserts.assertCounterEqual(counters, "benchmark.http_2xx", 100)
asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 4)
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"])
def test_https_h1(https_test_server_fixture):
"""Test h1 over https.
Runs the CLI configured to use HTTP/1 over https against our test server, and sanity
checks statistics from both client and server.
"""
parsed_json, _ = https_test_server_fixture.runNighthawkClient([
https_test_server_fixture.getTestServerRootUri(), "--connections", "1", "--rps", "100",
"--duration", "100", "--termination-predicate", "benchmark.http_2xx:24"
])
counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 1)
asserts.assertCounterEqual(counters, "upstream_cx_rx_bytes_total", 3400)
asserts.assertCounterEqual(counters, "upstream_cx_total", 1)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total", 500)
asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1)
asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
asserts.assertCounterEqual(counters, "ssl.ciphers.ECDHE-RSA-AES128-GCM-SHA256", 1)
asserts.assertCounterEqual(counters, "ssl.curves.X25519", 1)
asserts.assertCounterEqual(counters, "ssl.handshake", 1)
asserts.assertCounterEqual(counters, "ssl.sigalgs.rsa_pss_rsae_sha256", 1)
asserts.assertCounterEqual(counters, "ssl.versions.TLSv1.2", 1)
asserts.assertCounterEqual(counters, "default.total_match_count", 1)
asserts.assertEqual(len(counters), 17)
server_stats = https_test_server_fixture.getTestServerStatisticsJson()
asserts.assertEqual(
https_test_server_fixture.getServerStatFromJson(server_stats,
"http.ingress_http.downstream_rq_2xx"), 25)
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"])
def test_https_h2(https_test_server_fixture):
"""Test http2 over https.
Runs the CLI configured to use HTTP/2 (using https) against our test server, and sanity
checks statistics from both client and server.
"""
parsed_json, _ = https_test_server_fixture.runNighthawkClient([
"--h2",
https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100",
"--termination-predicate", "benchmark.http_2xx:24", "--max-active-requests", "1"
])
counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
asserts.assertCounterEqual(counters, "upstream_cx_http2_total", 1)
# Through emperical observation, 1030 has been determined to be the minimum of bytes
# we can expect to have received when execution has stopped.
asserts.assertCounterGreaterEqual(counters, "upstream_cx_rx_bytes_total", 1030)
asserts.assertCounterEqual(counters, "upstream_cx_total", 1)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total", 403)
asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1)
asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
asserts.assertCounterEqual(counters, "ssl.ciphers.ECDHE-RSA-AES128-GCM-SHA256", 1)
asserts.assertCounterEqual(counters, "ssl.curves.X25519", 1)
asserts.assertCounterEqual(counters, "ssl.handshake", 1)
asserts.assertCounterEqual(counters, "ssl.sigalgs.rsa_pss_rsae_sha256", 1)
asserts.assertCounterEqual(counters, "ssl.versions.TLSv1.2", 1)
asserts.assertCounterEqual(counters, "default.total_match_count", 1)
asserts.assertEqual(len(counters), 17)
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"])
def test_https_h2_multiple_connections(https_test_server_fixture):
"""Test that the experimental h2 pool uses multiple connections.
The burst we send ensures we will need 10 connections right away, as we
limit max active streams per connection to 1 by setting the experimental
flag to use multiple h2 connections.
"""
parsed_json, _ = https_test_server_fixture.runNighthawkClient([
"--h2",
https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100",
"--termination-predicate", "benchmark.http_2xx:99", "--max-active-requests", "10",
"--max-pending-requests", "10", "--max-concurrent-streams", "1", "--burst-size", "10"
])
counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 100)
# Empirical observation shows we may end up creating more then 10 connections.
# This is stock Envoy h/2 pool behavior.
asserts.assertCounterGreaterEqual(counters, "upstream_cx_http2_total", 10)
def test_h3_quic(quic_test_server_fixture):
"""Test http3 quic.
Runs the CLI configured to use HTTP/3 Quic against our test server, and sanity
checks statistics from both client and server.
"""
parsed_json, _ = quic_test_server_fixture.runNighthawkClient([
"--protocol http3",
quic_test_server_fixture.getTestServerRootUri(),
"--rps",
"100",
"--duration",
"100",
"--termination-predicate",
"benchmark.http_2xx:24",
"--max-active-requests",
"1",
# Envoy doesn't support disabling certificate verification on Quic
# connections, so the host in our requests has to match the hostname in
# the leaf certificate.
"--request-header",
"Host:www.lyft.com"
])
counters = quic_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
asserts.assertCounterEqual(counters, "upstream_cx_http3_total", 1)
asserts.assertCounterEqual(counters, "upstream_cx_total", 1)
asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1)
asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
asserts.assertCounterEqual(counters, "default.total_match_count", 1)
def _do_tls_configuration_test(https_test_server_fixture, cli_parameter, use_h2):
"""Test with different ciphers.
For a given choice of (--tls-context, --transport-socket) x (H1, H2),
run a series of traffic tests with different ciphers.
Args:
https_test_server_fixture: pytest.fixture that controls a test server and client
cli_parameter: string, --tls-context or --transport-socket
use_h2: boolean, whether to pass --h2
"""
if cli_parameter == "--tls-context":
json_template = "{common_tls_context:{tls_params:{cipher_suites:[\"-ALL:%s\"]}}}"
else:
json_template = "%s%s%s" % (
"{name:\"envoy.transport_sockets.tls\",typed_config:{",
"\"@type\":\"type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\",",
"common_tls_context:{tls_params:{cipher_suites:[\"-ALL:%s\"]}}}}")
for cipher in [
"ECDHE-RSA-AES128-SHA",
"ECDHE-RSA-CHACHA20-POLY1305",
]:
parsed_json, _ = https_test_server_fixture.runNighthawkClient(
(["--protocol", "http2"] if use_h2 else []) + [
"--duration", "10", "--termination-predicate", "benchmark.http_2xx:0", cli_parameter,
json_template % cipher,
https_test_server_fixture.getTestServerRootUri()
])
counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterGreaterEqual(counters, "ssl.ciphers.%s" % cipher, 1)
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"])
def test_https_h1_tls_context_configuration(https_test_server_fixture):
"""Test that specifying tls cipher suites works with the h1 pool."""
_do_tls_configuration_test(https_test_server_fixture, "--tls-context", use_h2=False)
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"])
def test_https_h1_transport_socket_configuration(https_test_server_fixture):
"""Test that specifying tls cipher suites via transport socket works with the h1 pool."""
_do_tls_configuration_test(https_test_server_fixture, "--transport-socket", use_h2=False)
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"])
def test_https_h2_tls_context_configuration(https_test_server_fixture):
"""Test that specifying tls cipher suites works with the h2 pool."""
_do_tls_configuration_test(https_test_server_fixture, "--tls-context", use_h2=True)
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"])
def test_https_h2_transport_socket_configuration(https_test_server_fixture):
"""Test that specifying tls cipher suites via transport socket works with the h2 pool."""
_do_tls_configuration_test(https_test_server_fixture, "--transport-socket", use_h2=True)
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"])
def test_https_prefetching(https_test_server_fixture):
"""Test we prefetch connections.
We test for 1 second at 1 rps, which should
result in 1 connection max without prefetching. However, we specify 50 connections
and the prefetching flag, so we ought to see 50 http1 connections created.
"""
parsed_json, _ = https_test_server_fixture.runNighthawkClient([
"--duration 1", "--rps 1", "--prefetch-connections", "--connections 50",
https_test_server_fixture.getTestServerRootUri()
])
counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 50)
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"])
def test_https_log_verbosity(https_test_server_fixture):
"""Test that the specified log verbosity level is respected.
This tests for a sentinel we know is only right when the level
is set to 'trace'.
"""
# TODO(oschaaf): this is kind of fragile. Can we improve?
trace_level_sentinel = "nighthawk_service_zone"
_, logs = https_test_server_fixture.runNighthawkClient(
["--duration 1", "--rps 1", "-v debug",
https_test_server_fixture.getTestServerRootUri()])
asserts.assertNotIn(trace_level_sentinel, logs)
_, logs = https_test_server_fixture.runNighthawkClient(
["--duration 1", "--rps 1", "-v trace",
https_test_server_fixture.getTestServerRootUri()])
asserts.assertIn(trace_level_sentinel, logs)
def test_dotted_output_format(http_test_server_fixture):
"""Test that we get the dotted string output format when requested, and ensure we get latency percentiles."""
output, _ = http_test_server_fixture.runNighthawkClient([
"--duration 1", "--rps 10", "--output-format dotted",
http_test_server_fixture.getTestServerRootUri()
],
as_json=False)
asserts.assertIn("global.benchmark_http_client.request_to_response.permilles-500.microseconds",
output)
# TODO(oschaaf): add percentiles to the gold testing in the C++ output formatter
# once the fortio formatter has landed (https://github.com/envoyproxy/nighthawk/pull/168)
def test_cli_output_format(http_test_server_fixture):
"""Test that we observe latency percentiles with CLI output."""
output, _ = http_test_server_fixture.runNighthawkClient(
["--duration 1", "--rps 10",
http_test_server_fixture.getTestServerRootUri()], as_json=False)
asserts.assertIn("Initiation to completion", output)
asserts.assertIn("Percentile", output)
@pytest.mark.parametrize(
'filter_configs',
["{}", "{static_delay: \"0.01s\"}", "{emit_previous_request_delta_in_response_header: \"aa\"}"])
def test_request_body_gets_transmitted(http_test_server_fixture, filter_configs):
"""Test request body transmission handling code for our extensions.
Ensure that the number of bytes we request for the request body gets reflected in the upstream
connection transmitted bytes counter for h1 and h2.
"""
def check_upload_expectations(fixture, parsed_json, expected_transmitted_bytes,
expected_received_bytes):
counters = fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total",
expected_transmitted_bytes)
server_stats = fixture.getTestServerStatisticsJson()
# Server side expectations start failing with larger upload sizes
asserts.assertGreaterEqual(
fixture.getServerStatFromJson(server_stats,
"http.ingress_http.downstream_cx_rx_bytes_total"),
expected_received_bytes)
# TODO(#531): The dynamic-delay extension hangs unless we lower the request entity body size.
upload_bytes = 1024 * 1024 if "static_delay" in filter_configs else 1024 * 1024 * 3
requests = 10
args = [
http_test_server_fixture.getTestServerRootUri(), "--duration", "100", "--rps", "100",
"--request-body-size",
str(upload_bytes), "--termination-predicate",
"benchmark.http_2xx:%s" % str(requests), "--connections", "1", "--request-method", "POST",
"--max-active-requests", "1", "--request-header",
"x-nighthawk-test-server-config:%s" % filter_configs
]
# Test we transmit the expected amount of bytes with H1
parsed_json, _ = http_test_server_fixture.runNighthawkClient(args)
check_upload_expectations(http_test_server_fixture, parsed_json, upload_bytes * requests,
upload_bytes * requests)
# Test we transmit the expected amount of bytes with H2
args.append("--h2")
parsed_json, _ = http_test_server_fixture.runNighthawkClient(args)
# We didn't reset the server in between, so our expectation for received bytes on the server side is raised.
check_upload_expectations(http_test_server_fixture, parsed_json, upload_bytes * requests,
upload_bytes * requests * 2)
def test_http_h1_termination_predicate(http_test_server_fixture):
"""Test with a termination predicate.
Should result in successfull execution, with 10 successfull requests.
We would expect 25 based on rps and duration.
"""
parsed_json, _ = http_test_server_fixture.runNighthawkClient([
http_test_server_fixture.getTestServerRootUri(), "--duration", "5", "--rps", "500",
"--connections", "1", "--termination-predicate", "benchmark.http_2xx:9"
])
counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterEqual(counters, "benchmark.http_2xx", 10)
def test_http_h1_failure_predicate(http_test_server_fixture):
"""Test with a failure predicate.
Should result in failing execution, with 10 successfull requests.
"""
parsed_json, _ = http_test_server_fixture.runNighthawkClient([
http_test_server_fixture.getTestServerRootUri(), "--duration", "5", "--rps", "500",
"--connections", "1", "--failure-predicate", "benchmark.http_2xx:0"
],
expect_failure=True)
counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterEqual(counters, "benchmark.http_2xx", 1)
def test_bad_arg_error_messages(http_test_server_fixture):
"""Test arguments that pass proto validation, but are found to be no good nonetheless, result in reasonable error messages."""
_, err = http_test_server_fixture.runNighthawkClient(
[http_test_server_fixture.getTestServerRootUri(), "--termination-predicate ", "a:a"],
expect_failure=True,
as_json=False)
assert "Bad argument: Termination predicate 'a:a' has an out of range threshold." in err
def test_multiple_backends_http_h1(multi_http_test_server_fixture):
"""Test that we can load-test multiple backends on http.
Runs the CLI configured to use plain HTTP/1 against multiple test servers, and sanity
checks statistics from both client and server.
"""
nighthawk_client_args = [
"--multi-target-path", "/", "--duration", "100", "--termination-predicate",
"benchmark.http_2xx:24"
]
for uri in multi_http_test_server_fixture.getAllTestServerRootUris():
nighthawk_client_args.append("--multi-target-endpoint")
nighthawk_client_args.append(uri.replace("http://", "").replace("/", ""))
parsed_json, stderr = multi_http_test_server_fixture.runNighthawkClient(nighthawk_client_args)
counters = multi_http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 3)
asserts.assertCounterGreater(counters, "upstream_cx_rx_bytes_total", 0)
asserts.assertCounterEqual(counters, "upstream_cx_total", 3)
asserts.assertCounterGreater(counters, "upstream_cx_tx_bytes_total", 0)
asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 3)
asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
asserts.assertCounterEqual(counters, "default.total_match_count", 3)
for parsed_server_json in multi_http_test_server_fixture.getAllTestServerStatisticsJsons():
single_2xx = multi_http_test_server_fixture.getServerStatFromJson(
parsed_server_json, "http.ingress_http.downstream_rq_2xx")
# Confirm that each backend receives some traffic
asserts.assertGreaterEqual(single_2xx, 1)
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"])
def test_multiple_backends_https_h1(multi_https_test_server_fixture):
"""Test that we can load-test multiple backends on https.
Runs the CLI configured to use HTTP/1 with TLS against multiple test servers, and sanity
checks statistics from both client and server.
"""
nighthawk_client_args = [
"--multi-target-use-https", "--multi-target-path", "/", "--duration", "100",
"--termination-predicate", "benchmark.http_2xx:24"
]
for uri in multi_https_test_server_fixture.getAllTestServerRootUris():
nighthawk_client_args.append("--multi-target-endpoint")
nighthawk_client_args.append(uri.replace("https://", "").replace("/", ""))
parsed_json, stderr = multi_https_test_server_fixture.runNighthawkClient(nighthawk_client_args)
counters = multi_https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 3)
asserts.assertCounterGreater(counters, "upstream_cx_rx_bytes_total", 0)
asserts.assertCounterEqual(counters, "upstream_cx_total", 3)
asserts.assertCounterGreater(counters, "upstream_cx_tx_bytes_total", 0)
asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 3)
asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
asserts.assertCounterEqual(counters, "default.total_match_count", 3)
total_2xx = 0
for parsed_server_json in multi_https_test_server_fixture.getAllTestServerStatisticsJsons():
single_2xx = multi_https_test_server_fixture.getServerStatFromJson(
parsed_server_json, "http.ingress_http.downstream_rq_2xx")
asserts.assertBetweenInclusive(single_2xx, 8, 9)
total_2xx += single_2xx
asserts.assertBetweenInclusive(total_2xx, 24, 25)
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/sni_origin.yaml"])
def test_https_h1_sni(https_test_server_fixture):
"""Test that SNI indication works on https/h1."""
# Verify success when we set the right host
parsed_json, _ = https_test_server_fixture.runNighthawkClient([
https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100",
"--termination-predicate", "benchmark.http_2xx:2", "--request-header", "host: sni.com"
])
counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 1)
asserts.assertCounterGreaterEqual(counters, "ssl.handshake", 1)
# Verify failure when we set no host (will get plain http)
parsed_json, _ = https_test_server_fixture.runNighthawkClient(
[https_test_server_fixture.getTestServerRootUri(), "--rps", "20", "--duration", "100"],
expect_failure=True)
# Verify success when we use plain http and don't request the sni host
parsed_json, _ = https_test_server_fixture.runNighthawkClient([
https_test_server_fixture.getTestServerRootUri().replace("https://", "http://"), "--rps",
"100", "--duration", "20", "--termination-predicate", "benchmark.http_2xx:2"
],
expect_failure=False)
counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 1)
asserts.assertNotIn("ssl.handshake", counters)
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/sni_origin.yaml"])
def test_https_h2_sni(https_test_server_fixture):
"""Tests that SNI indication works on https/h1."""
# Verify success when we set the right host
parsed_json, _ = https_test_server_fixture.runNighthawkClient([
https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100",
"--termination-predicate", "benchmark.http_2xx:2", "--request-header", ":authority: sni.com",
"--h2"
])
counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_http2_total", 1)
asserts.assertCounterEqual(counters, "ssl.handshake", 1)
# Verify success when we set the right host
parsed_json, _ = https_test_server_fixture.runNighthawkClient([
https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100",
"--termination-predicate", "benchmark.http_2xx:2", "--request-header", "host: sni.com", "--h2"
])
counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_http2_total", 1)
asserts.assertCounterEqual(counters, "ssl.handshake", 1)
# Verify failure when we set no host (will get plain http)
parsed_json, _ = https_test_server_fixture.runNighthawkClient([
https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", "--h2"
],
expect_failure=True)
# Verify failure when we provide both host and :authority: (will get plain http)
parsed_json, _ = https_test_server_fixture.runNighthawkClient([
https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", "--h2",
"--request-header", "host: sni.com", "--request-header", ":authority: sni.com"
],
expect_failure=True)
@pytest.fixture(scope="function", params=[1, 25])
def qps_parameterization_fixture(request):
"""Yield queries per second values to iterate test parameterization on."""
param = request.param
yield param
@pytest.fixture(scope="function", params=[5, 10])
def duration_parameterization_fixture(request):
"""Yield duration values to iterate test parameterization on."""
param = request.param
yield param
@pytest.mark.skipif(utility.isSanitizerRun(), reason="Unstable in sanitizer runs")
def test_http_request_release_timing(http_test_server_fixture, qps_parameterization_fixture,
duration_parameterization_fixture):
"""Test latency-sample-, query- and reply- counts in various configurations."""
for concurrency in [1, 2]:
parsed_json, _ = http_test_server_fixture.runNighthawkClient([
http_test_server_fixture.getTestServerRootUri(), "--duration",
str(duration_parameterization_fixture), "--rps",
str(qps_parameterization_fixture), "--concurrency",
str(concurrency)
])
global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(
parsed_json)
counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
global_result = http_test_server_fixture.getGlobalResults(parsed_json)
actual_duration = utility.get_execution_duration_from_global_result_json(global_result)
# Ensure Nighthawk managed to execute for at least some time.
assert actual_duration >= 1
# The actual duration is a float, flooring if here allows us to use
# the GreaterEqual matchers below.
total_requests = qps_parameterization_fixture * concurrency * math.floor(actual_duration)
asserts.assertGreaterEqual(
int(global_histograms["benchmark_http_client.request_to_response"]["count"]),
total_requests)
asserts.assertGreaterEqual(
int(global_histograms["benchmark_http_client.queue_to_connect"]["count"]), total_requests)
asserts.assertGreaterEqual(int(global_histograms["benchmark_http_client.latency_2xx"]["count"]),
total_requests)
asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", (total_requests))
# Give system resources some time to recover after the last execution.
time.sleep(2)
def _send_sigterm(process):
# Sleep for a while, under tsan the client needs a lot of time
# to start up. 10 seconds has been determined to work through
# emperical observation.
time.sleep(10)
process.terminate()
def test_cancellation_with_infinite_duration(http_test_server_fixture):
"""Test that we can use signals to cancel execution."""
args = [
http_test_server_fixture.nighthawk_client_path, "--concurrency", "2",
http_test_server_fixture.getTestServerRootUri(), "--no-duration", "--output-format", "json"
]
client_process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
Thread(target=(lambda: _send_sigterm(client_process))).start()
stdout, stderr = client_process.communicate()
client_process.wait()
output = stdout.decode('utf-8')
asserts.assertEqual(client_process.returncode, 0)
parsed_json = json.loads(output)
counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterEqual(counters, "graceful_stop_requested", 2)
asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1)
@pytest.mark.parametrize('server_config', [
"nighthawk/test/integration/configurations/nighthawk_http_origin.yaml",
"nighthawk/test/integration/configurations/nighthawk_track_timings.yaml"
])
def test_http_h1_response_header_latency_tracking(http_test_server_fixture, server_config):
"""Test emission and tracking of response header latencies.
Run the CLI configured to track latencies delivered by response header from the test-server.
Ensure that the origin_latency_statistic histogram receives the correct number of inputs.
"""
parsed_json, _ = http_test_server_fixture.runNighthawkClient([
http_test_server_fixture.getTestServerRootUri(), "--connections", "1", "--rps", "100",
"--duration", "100", "--termination-predicate", "benchmark.http_2xx:99",
"--latency-response-header-name", "x-origin-request-receipt-delta"
])
global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(parsed_json)
asserts.assertEqual(int(global_histograms["benchmark_http_client.latency_2xx"]["count"]), 100)
# Verify behavior is correct both with and without the timing filter enabled.
expected_histogram_count = 99 if "nighthawk_track_timings.yaml" in server_config else 0
asserts.assertEqual(
int(global_histograms["benchmark_http_client.origin_latency_statistic"]["count"]),
expected_histogram_count)
def _run_client_with_args(args):
return utility.run_binary_with_args("nighthawk_client", args)
def test_client_help():
"""Test that passing --help behaves as expected."""
(exit_code, output) = _run_client_with_args("--help")
asserts.assertEqual(exit_code, 0)
asserts.assertIn("USAGE", output)
def test_client_bad_arg():
"""Test that passing bad arguments behaves as expected."""
(exit_code, output) = _run_client_with_args("127.0.0.1 --foo")
asserts.assertEqual(exit_code, 1)
asserts.assertIn("PARSE ERROR: Argument: --foo", output)
def test_client_cli_bad_uri(http_test_server_fixture):
"""Test that passing a bad URI to the client results in nice behavior."""
_, err = http_test_server_fixture.runNighthawkClient(["http://http://foo"],
expect_failure=True,
as_json=False)
assert "Invalid target URI" in err
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"])
def test_drain(https_test_server_fixture):
"""Test that the pool drain timeout is effective, and we terminate in a timely fashion.
Sets up the test server to delay replies 100 seconds. Our execution will only last 30 seconds, so we
expect to observe no replies. Termination should be cut short by the drain timeout, which means
that we should have results in approximately execution duration + drain timeout = 35 seconds.
(the pool drain timeout is hard coded to 5 seconds as of writing this).
If drain timeout is reached, a message will be logged to the user.
"""
parsed_json, logs = https_test_server_fixture.runNighthawkClient([
https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "20",
"--request-header", "x-nighthawk-test-server-config: {static_delay: \"100s\"}"
])
counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 1)
asserts.assertNotIn("benchmark.http_2xx", counters)
asserts.assertIn("Wait for the connection pool drain timed out, proceeding to hard shutdown",
logs)
| 50.641463
| 128
| 0.746737
|
c075f94675358fe5634d9652eddecd36ee31c4eb
| 10,894
|
py
|
Python
|
api/base/exceptions.py
|
birdbrained/osf.io
|
ca70cf9fdacc2f3771038c8e5bc1c19e7126fd50
|
[
"Apache-2.0"
] | 1
|
2019-12-23T04:30:20.000Z
|
2019-12-23T04:30:20.000Z
|
api/base/exceptions.py
|
birdbrained/osf.io
|
ca70cf9fdacc2f3771038c8e5bc1c19e7126fd50
|
[
"Apache-2.0"
] | 16
|
2020-03-24T16:30:32.000Z
|
2022-03-03T22:39:45.000Z
|
api/base/exceptions.py
|
birdbrained/osf.io
|
ca70cf9fdacc2f3771038c8e5bc1c19e7126fd50
|
[
"Apache-2.0"
] | null | null | null |
import httplib as http
from django.utils.translation import ugettext_lazy as _
from rest_framework import status
from rest_framework.exceptions import APIException, AuthenticationFailed
def get_resource_object_member(error_key, context):
from api.base.serializers import RelationshipField
field = context['view'].serializer_class._declared_fields.get(error_key, None)
if field:
return 'relationships' if isinstance(field, RelationshipField) else 'attributes'
# If field cannot be found (where read/write operations have different serializers,
# assume error was in 'attributes' by default
return 'attributes'
def dict_error_formatting(errors, context, index=None):
"""
Formats all dictionary error messages for both single and bulk requests
"""
formatted_error_list = []
# Error objects may have the following members. Title and id removed to avoid clash with "title" and "id" field errors.
top_level_error_keys = ['links', 'status', 'code', 'detail', 'source', 'meta']
# Resource objects must contain at least 'id' and 'type'
resource_object_identifiers = ['type', 'id']
if index is None:
index = ''
else:
index = str(index) + '/'
for error_key, error_description in errors.items():
if isinstance(error_description, basestring):
error_description = [error_description]
if error_key in top_level_error_keys:
formatted_error_list.extend({error_key: description} for description in error_description)
elif error_key in resource_object_identifiers:
formatted_error_list.extend([{'source': {'pointer': '/data/{}'.format(index) + error_key}, 'detail': reason} for reason in error_description])
elif error_key == 'non_field_errors':
formatted_error_list.extend([{'detail': description for description in error_description}])
else:
formatted_error_list.extend([{'source': {'pointer': '/data/{}{}/'.format(index, get_resource_object_member(error_key, context)) + error_key}, 'detail': reason} for reason in error_description])
return formatted_error_list
def json_api_exception_handler(exc, context):
"""
Custom exception handler that returns errors object as an array
"""
# We're deliberately not stripping html from exception detail.
# This creates potential vulnerabilities to script injection attacks
# when returning raw user input into error messages.
#
# Fortunately, Django's templating language strips markup bu default,
# but if our frontend changes we may lose that protection.
# TODO: write tests to ensure our html frontend strips html
# Import inside method to avoid errors when the OSF is loaded without Django
from rest_framework.views import exception_handler
response = exception_handler(exc, context)
errors = []
if response:
message = response.data
if isinstance(exc, TwoFactorRequiredError):
response['X-OSF-OTP'] = 'required; app'
if isinstance(exc, JSONAPIException):
errors.extend([{'source': exc.source or {}, 'detail': exc.detail, 'meta': exc.meta or {}}])
elif isinstance(message, dict):
errors.extend(dict_error_formatting(message, context, index=None))
else:
if isinstance(message, basestring):
message = [message]
for index, error in enumerate(message):
if isinstance(error, dict):
errors.extend(dict_error_formatting(error, context, index=index))
else:
errors.append({'detail': error})
response.data = {'errors': errors}
return response
class EndpointNotImplementedError(APIException):
status_code = status.HTTP_501_NOT_IMPLEMENTED
default_detail = _('This endpoint is not yet implemented.')
class ServiceUnavailableError(APIException):
status_code = status.HTTP_503_SERVICE_UNAVAILABLE
default_detail = _('Service is unavailable at this time.')
class JSONAPIException(APIException):
"""Inherits from the base DRF API exception and adds extra metadata to support JSONAPI error objects
:param str detail: a human-readable explanation specific to this occurrence of the problem
:param dict source: A dictionary containing references to the source of the error.
See http://jsonapi.org/format/#error-objects.
Example: ``source={'pointer': '/data/attributes/title'}``
:param dict meta: A meta object containing non-standard meta info about the error.
"""
status_code = status.HTTP_400_BAD_REQUEST
def __init__(self, detail=None, source=None, meta=None):
super(JSONAPIException, self).__init__(detail=detail)
self.source = source
self.meta = meta
# Custom Exceptions the Django Rest Framework does not support
class Gone(JSONAPIException):
status_code = status.HTTP_410_GONE
default_detail = ('The requested resource is no longer available.')
def UserGone(user):
return Gone(
detail='The requested user is no longer available.',
meta={
'full_name': user.fullname, 'family_name': user.family_name, 'given_name': user.given_name,
'middle_names': user.middle_names, 'profile_image': user.profile_image_url(),
},
)
class Conflict(JSONAPIException):
status_code = status.HTTP_409_CONFLICT
default_detail = ('Resource identifier does not match server endpoint.')
class JSONAPIParameterException(JSONAPIException):
def __init__(self, detail=None, parameter=None):
source = {
'parameter': parameter,
}
super(JSONAPIParameterException, self).__init__(detail=detail, source=source)
class JSONAPIAttributeException(JSONAPIException):
def __init__(self, detail=None, attribute=None):
source = {
'pointer': '/data/attributes/{}'.format(attribute),
}
super(JSONAPIAttributeException, self).__init__(detail=detail, source=source)
class InvalidQueryStringError(JSONAPIParameterException):
"""Raised when client passes an invalid value to a query string parameter."""
default_detail = 'Query string contains an invalid value.'
status_code = http.BAD_REQUEST
class InvalidFilterOperator(JSONAPIParameterException):
"""Raised when client passes an invalid operator to a query param filter."""
status_code = http.BAD_REQUEST
def __init__(self, detail=None, value=None, valid_operators=('eq', 'lt', 'lte', 'gt', 'gte', 'contains', 'icontains')):
if value and not detail:
valid_operators = ', '.join(valid_operators)
detail = "Value '{0}' is not a supported filter operator; use one of {1}.".format(
value,
valid_operators,
)
super(InvalidFilterOperator, self).__init__(detail=detail, parameter='filter')
class InvalidFilterValue(JSONAPIParameterException):
"""Raised when client passes an invalid value to a query param filter."""
status_code = http.BAD_REQUEST
def __init__(self, detail=None, value=None, field_type=None):
if not detail:
detail = "Value '{0}' is not valid".format(value)
if field_type:
detail += ' for a filter on type {0}'.format(
field_type,
)
detail += '.'
super(InvalidFilterValue, self).__init__(detail=detail, parameter='filter')
class InvalidFilterError(JSONAPIParameterException):
"""Raised when client passes an malformed filter in the query string."""
default_detail = _('Query string contains a malformed filter.')
status_code = http.BAD_REQUEST
def __init__(self, detail=None):
super(InvalidFilterError, self).__init__(detail=detail, parameter='filter')
class InvalidFilterComparisonType(JSONAPIParameterException):
"""Raised when client tries to filter on a field that is not a date or number type"""
default_detail = _('Comparison operators are only supported for dates and numbers.')
status_code = http.BAD_REQUEST
class InvalidFilterMatchType(JSONAPIParameterException):
"""Raised when client tries to do a match filter on a field that is not a string or a list"""
default_detail = _('Match operators are only supported for strings and lists.')
status_code = http.BAD_REQUEST
class InvalidFilterFieldError(JSONAPIParameterException):
"""Raised when client tries to filter on a field that is not supported"""
default_detail = _('Query contained one or more filters for invalid fields.')
status_code = http.BAD_REQUEST
def __init__(self, detail=None, parameter=None, value=None):
if value and not detail:
detail = "Value '{}' is not a filterable field.".format(value)
super(InvalidFilterFieldError, self).__init__(detail=detail, parameter=parameter)
class UnconfirmedAccountError(APIException):
status_code = 400
default_detail = _('Please confirm your account before using the API.')
class UnclaimedAccountError(APIException):
status_code = 400
default_detail = _('Please claim your account before using the API.')
class DeactivatedAccountError(APIException):
status_code = 400
default_detail = _('Making API requests with credentials associated with a deactivated account is not allowed.')
class MergedAccountError(APIException):
status_code = 400
default_detail = _('Making API requests with credentials associated with a merged account is not allowed.')
class InvalidAccountError(APIException):
status_code = 400
default_detail = _('Making API requests with credentials associated with an invalid account is not allowed.')
class TwoFactorRequiredError(AuthenticationFailed):
default_detail = _('Must specify two-factor authentication OTP code.')
pass
class InvalidModelValueError(JSONAPIException):
status_code = 400
default_detail = _('Invalid value in POST/PUT/PATCH request.')
class TargetNotSupportedError(Exception):
"""Raised if a TargetField is used for a resource that isn't supported."""
pass
class RelationshipPostMakesNoChanges(Exception):
"""Raised when a post is on a relationship that already exists, so view can return a 204"""
pass
class NonDescendantNodeError(APIException):
"""Raised when a client attempts to associate a non-descendant node with a view only link"""
status_code = 400
default_detail = _('The node {0} cannot be affiliated with this View Only Link because the node you\'re trying to affiliate is not descended from the node that the View Only Link is attached to.')
def __init__(self, node_id, detail=None):
if not detail:
detail = self.default_detail.format(node_id)
super(NonDescendantNodeError, self).__init__(detail=detail)
| 39.32852
| 205
| 0.705985
|
1664cdfcc1db200e3bfacc4ac7f36145a606fc00
| 233
|
py
|
Python
|
Module 2/Chapter02/borg_singleton_2.py
|
real-slim-chadi/Python_Master-the-Art-of-Design-Patterns
|
95ec92272374e330b04d931208abbb184c7c7908
|
[
"MIT"
] | 73
|
2016-09-15T23:07:04.000Z
|
2022-03-05T15:09:48.000Z
|
Module 2/Chapter02/borg_singleton_2.py
|
real-slim-chadi/Python_Master-the-Art-of-Design-Patterns
|
95ec92272374e330b04d931208abbb184c7c7908
|
[
"MIT"
] | null | null | null |
Module 2/Chapter02/borg_singleton_2.py
|
real-slim-chadi/Python_Master-the-Art-of-Design-Patterns
|
95ec92272374e330b04d931208abbb184c7c7908
|
[
"MIT"
] | 51
|
2016-10-07T20:47:51.000Z
|
2021-12-22T21:00:24.000Z
|
__author__ = 'Chetan'
class Borg(object):
_shared_state = {}
def __new__(cls, *args, **kwargs):
obj = super(Borg, cls).__new__(cls, *args, **kwargs)
obj.__dict__ = cls._shared_state
return obj
| 25.888889
| 60
| 0.592275
|
3db9eb2d293ccb980c59e54891bd8a24756dd6c7
| 47,707
|
py
|
Python
|
hiproc/resolve_jitter.py
|
AndrewAnnex/hiproc
|
c6c2bbdfa1d421e28d497bb6bfd70a5cde28b4f8
|
[
"Apache-2.0"
] | null | null | null |
hiproc/resolve_jitter.py
|
AndrewAnnex/hiproc
|
c6c2bbdfa1d421e28d497bb6bfd70a5cde28b4f8
|
[
"Apache-2.0"
] | null | null | null |
hiproc/resolve_jitter.py
|
AndrewAnnex/hiproc
|
c6c2bbdfa1d421e28d497bb6bfd70a5cde28b4f8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
Perform the jitter derivation for a HiRISE observation.
Outputs are the jitter in the x (sample) and y (line) directions in
pixels, and the time (ephemeris time at that translation), average error
between the derived jitter function and the original data, linerate
(line time read from flat files), and TDI. These outputs are
written to a text file. Another output is the pixel smear, also
written to a file.
The C++ version of this runs ~5x faster, FYI.
WARNING: This program and its modules may not be fully functional and
may return poor results.
"""
# Copyright (C) 2013-2020 Arizona Board of Regents on behalf of the Lunar and
# Planetary Laboratory at the University of Arizona.
# - Original MatLab program written by Aaron Boyd and Sarah Mattson for
# HiROC as part of the process to describe and correct geometric
# distortions caused by jitter in HiRISE images.
# Original version written approximately 6/2008.
#
# Copyright (c) 2012, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
# - C++ version written by Oleg Alexandrov based on the above MatLab
# program, resolveJitter4HiJACK.m version 1.4
#
# Copyright 2020-2021, Ross A. Beyer (rbeyer@seti.org)
# - Elements of this Python program are are based on the C++ version but
# the logic here is rewritten from scratch to emulate functionality.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# The only write-up that I could find for the pre-cursor was:
# S. Mattson, A. Boyd, R. L. Kirk, D. A. Cook, and E. Howington-Kraus,
# HiJACK: Correcting spacecraft jitter in HiRISE images of Mars,
# European Planetary Science Congress 2009, #604
# https://ui.adsabs.harvard.edu/abs/2009epsc.conf..604M}
# However, that's really only a high-level description.
import argparse
import csv
import itertools
import logging
import math
import os
import pkg_resources
import sys
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import medfilt
from scipy.interpolate import PchipInterpolator
from scipy.optimize import minimize_scalar
import pvl
import hiproc.hirise as hirise
import hiproc.util as util
from hiproc.FlatFile import FlatFile
logger = logging.getLogger(__name__)
def arg_parser():
parser = argparse.ArgumentParser(
description=__doc__, parents=[util.parent_parser()]
)
parser.add_argument(
"-c",
"--conf",
type=argparse.FileType('r'),
default=pkg_resources.resource_stream(
__name__,
'data/ResolveJitter.conf'
),
help="Path to a ResolveJitter.conf file, only needed if "
"--lineinterval isn't given.",
)
parser.add_argument(
"--csv",
action="store_true",
help="This program writes out a fixed-width data file with extra "
"information for plotting. This also writes out a "
"comma-separated version.",
)
parser.add_argument(
"--lineinterval",
type=float,
help="The number of lines to use to set the number of Fourier "
"transform intervals, defaults to Control_Lines in the "
"ResolveJitter.conf file.",
)
parser.add_argument(
"--outdir",
type=Path,
help="Output directory. Defaults to the directory of the first "
"input file.",
)
parser.add_argument(
"--outprefix",
help="Prefix string for output files. If not given, will default "
"to the Observation ID of the images.",
)
parser.add_argument(
"-p", "--plot", action="store_true", help="Displays interactive plot.",
)
parser.add_argument(
"--saveplot",
nargs="?",
default=False,
const=True,
help="Saves plot to a default filename in the output directory. "
"If a filename is provided it will be used to save the plot.",
)
parser.add_argument(
"--whichmatch1",
action="store_false",
dest="which1",
help="If specified, the sense of the offsets for the first "
"file will be relative to the MATCH cube, rather than the FROM "
"cube.",
)
parser.add_argument(
"--whichmatch2",
action="store_false",
dest="which2",
help="If specified, the sense of the offsets for the second "
"file will be relative to the MATCH cube, rather than the FROM "
"cube.",
)
parser.add_argument(
"--whichmatch3",
action="store_false",
dest="which3",
help="If specified, the sense of the offsets for the third "
"file will be relative to the MATCH cube, rather than the FROM "
"cube.",
)
# parser.add_argument(
# "--optimize",
# action="store_true",
# help="Will run experimental optimizer."
# )
parser.add_argument(
"files",
nargs="*",
help="Three flat.txt files that are the output of ISIS hijitreg.",
)
return parser
def main():
parser = arg_parser()
args = parser.parse_args()
util.set_logger(args.verbose, args.logfile, args.log)
if len(args.files) == 3:
# With just three arguments, these are the expected flat files.
fp1, fp2, fp3 = map(Path, args.files)
which1 = args.which1
which2 = args.which2
which3 = args.which3
elif len(args.files) == 9:
# This is the old-style positional calling
oldparser = argparse.ArgumentParser()
oldparser.add_argument("outdir", type=Path)
oldparser.add_argument("outprefix", type=str)
oldparser.add_argument("lineinterval", type=float)
oldparser.add_argument("file_path1", type=Path)
oldparser.add_argument("which1", type=int, choices=[-1, 1])
oldparser.add_argument("file_path2", type=Path)
oldparser.add_argument("which2", type=int, choices=[-1, 1])
oldparser.add_argument("file_path3", type=Path)
oldparser.add_argument("which3", type=int, choices=[-1, 1])
args = oldparser.parse_args(args.files, namespace=args)
fp1 = args.file_path1
fp2 = args.file_path2
fp3 = args.file_path3
which1 = True if args.which1 != 1 else False
which2 = True if args.which2 != 1 else False
which3 = True if args.which2 != 1 else False
else:
parser.error("Only takes 3 or 9 positional arguments.")
if args.lineinterval is None:
args.lineinterval = pvl.load(args.conf)["AutoRegistration"][
"ControlNet"
]["Control_Lines"]
elif args.lineinterval <= 0:
raise ValueError("--lineinterval must be positive.")
if args.outdir is None:
outdir = fp1.parent
else:
outdir = args.outdir
fp1 = set_file_path(outdir, fp1)
fp2 = set_file_path(outdir, fp2)
fp3 = set_file_path(outdir, fp3)
with util.main_exceptions(args.verbose):
start(
fp1,
which1,
fp2,
which2,
fp3,
which3,
line_interval=args.lineinterval,
outdir=outdir,
outprefix=args.outprefix,
plotshow=args.plot,
plotsave=args.saveplot,
writecsv=args.csv,
opt=args.optimize
)
return
def start(
file_path1: Path,
whichfrom1: bool,
file_path2: Path,
whichfrom2: bool,
file_path3: Path,
whichfrom3: bool,
line_interval: float,
outdir: Path,
outprefix=None,
plotshow=False,
plotsave=False,
writecsv=False,
opt=False
):
oid1 = hirise.get_ObsID_fromfile(file_path1)
oid2 = hirise.get_ObsID_fromfile(file_path2)
oid3 = hirise.get_ObsID_fromfile(file_path3)
if oid1 == oid2 == oid3:
oid = oid1
else:
raise ValueError(
f"The observation IDs from the three file"
f"paths ({file_path1}, {file_path2}, {file_path3}) do not match."
)
(
nfftime,
sample,
line,
linerate,
tdi,
t0,
t1,
t2,
t3,
offx_filtered,
offy_filtered,
xinterp,
yinterp,
min_avg_error,
min_k,
jittercheckx,
jitterchecky,
rh0,
) = resolve_jitter(
file_path1,
whichfrom1,
file_path2,
whichfrom2,
file_path3,
whichfrom3,
line_interval,
optimize=opt
)
# The outputs
logger.info(f"Average error is {min_avg_error} at min index {min_k}")
logger.info(f"linerate is {linerate}")
logger.info(f"TDI = {tdi}")
# To do: Remove the py suffix from output filenames.
if outprefix is None:
outprefix = str(oid)
else:
outprefix = str(outprefix)
# Characterize the smear:
(
max_smear_sample,
max_smear_line,
max_smear_mag,
dysdx,
dyldx,
xi,
) = pixel_smear(nfftime, sample, line, linerate, tdi)
write_smear_data(
(outdir / (outprefix + "_smear_py.txt")),
max_smear_sample,
max_smear_line,
max_smear_mag,
dysdx,
dyldx,
xi,
oid,
)
# Make a text file of the jitter data
jitter_p = outdir / (outprefix + "_jitter_py.txt")
jitter_text = [
f"""# Using image {oid} the jitter was found with an
# Average Error of {min_avg_error}
# Maximum Cross-track pixel smear {max_smear_sample}
# Maximum Down-track pixel smear {max_smear_line}
# Maximum Pixel Smear Magnitude {max_smear_mag}
#
# Sample Line ET"""
]
for s, l, e in zip(sample, line, nfftime):
jitter_text.append(f"""{s} {l} {e}""")
logger.info(f"Writing: {jitter_p}")
jitter_p.write_text("\n".join(jitter_text))
# Create data for plotting
data_p = outdir / (outprefix + "_jitter_plot_py.txt")
t_shift = [t1 - t0, t2 - t0, t3 - t0]
jittercheckx_shift = [
jittercheckx[0] + rh0["x1"],
jittercheckx[1] + rh0["x2"],
jittercheckx[2] + rh0["x3"],
]
jitterchecky_shift = [
jitterchecky[0] + rh0["y1"],
jitterchecky[1] + rh0["y2"],
jitterchecky[2] + rh0["y3"],
]
# Note the comment before the first label
# This ordering and naming is historic to the original file output.
string_labels = [
"# ET_shift",
"Sample",
"Line",
"t1_shift",
"offx1",
"xinterp1",
"jittercheckx1_shift",
"offy1",
"yinterp1",
"jitterchecky1_shift",
"t2_shift",
"offx2",
"xinterp2",
"jittercheckx2_shift",
"offy2",
"yinterp2",
"jitterchecky2_shift",
"t3_shift",
"offx3",
"xinterp3",
"jittercheckx3_shift",
"offy3",
"yinterp3",
"jitterchecky3_shift",
]
data_to_plot = (
string_labels,
nfftime - t0,
sample,
line,
t_shift[0],
offx_filtered[0],
xinterp[0],
jittercheckx_shift[0],
offy_filtered[0],
yinterp[0],
jitterchecky_shift[0],
t_shift[1],
offx_filtered[1],
xinterp[1],
jittercheckx_shift[1],
offy_filtered[1],
yinterp[1],
jitterchecky_shift[1],
t_shift[2],
offx_filtered[2],
xinterp[2],
jittercheckx_shift[2],
offy_filtered[2],
yinterp[2],
jitterchecky_shift[2],
)
write_data_for_plotting(data_p, *data_to_plot)
if writecsv:
write_csv(outdir / (outprefix + "_jitter_plot_py.csv"), *data_to_plot)
gnuplot_p = outdir / (outprefix + "_jitter_plot_py.plt")
img_file_name = outdir / (outprefix + "_jitter_plot_py.png")
write_gnuplot_file(
gnuplot_p, data_p, img_file_name, file_path1, file_path2, file_path3
)
if plotsave:
try:
plotsave = Path(plotsave)
except TypeError:
plotsave = outdir / (outprefix + "_jitter_plot_py.pdf")
if plotshow or plotsave:
plot(
t_shift,
offx_filtered,
offy_filtered,
xinterp,
yinterp,
jittercheckx_shift,
jitterchecky_shift,
[file_path1.stem, file_path2.stem, file_path3.stem],
nfftime - t0,
sample,
line,
show=plotshow,
save=plotsave,
)
return
def resolve_jitter(
file_path1: Path,
whichfrom1: bool,
file_path2: Path,
whichfrom2: bool,
file_path3: Path,
whichfrom3: bool,
line_interval: float,
window_size=11,
window_width=2,
optimize=False
):
"""
Returns a large tuple of information that is the result of solving for
the jitter based on the three input files.
The first file path sets some variables for all of the runs. The
whichfrom booleans determine determines the sense of the offsets.
A value of True makes the offsets relative to the From cube specified
in the flat.tab, and False makes the offsets relative to the Match cube.
:param file_path1: File Path for first flat.tab file.
:param whichfrom1: Offsets relative to FROM for first flat.tab file.
:param file_path2: File Path for second flat.tab file.
:param whichfrom2: Offsets relative to FROM for second flat.tab file.
:param file_path3: File Path for third flat.tab file.
:param whichfrom3: Offsets relative to FROM for third flat.tab file.
:param line_interval: The number of lines to use to set the number of
Fourier transform intervals.
:param window_size: The kernel size for median filtering the offsets,
should be an odd integer.
:param window_width: Sets the boundaries above and below the filtered
average beyond which to exclude outliers.
:return: A gnarly 18-tuple
The values of the 18-tuple are:
0
Time values starting at *t0* (numpy array).
1
Sample direction jitter for each time (numpy array).
2
Line direction jitter for each time (numpy array).
3
The LineRate from the FROM and MATCH files (float).
4
The TDI for the FROM and MATCH files (int).
5
Zero time at which the Fourier Transform steps start (float).
6
Unique time values from file one (numpy array).
7
Unique time values from file two (numpy array).
8
Unique time values from file three (numpy array).
9
List of three numpy arrays representing the Gaussian filtered
offsets in the sample direction from the three input files.
10
List of three numpy arrays representing the Gaussian filtered
offsets in the line direction from the three input files.
11
List of three numpy arrays representing the interpolation of
sample offsets for each input file at each time.
12
List of three numpy arrays representing the interpolation of
line offsets for each input file at each time.
13
The average error of the jitter solution with respect to the
measured offsets.
14
min_k
15
[min_jitter_check_x1, min_jitter_check_x2, min_jitter_check_x3],
16
[min_jitter_check_y1, min_jitter_check_y2, min_jitter_check_y3],
17
The first value of the sample and line from each file (dict).
"""
t1, offx1, offy1, lines1, dt1, tdi1, linerate1 = parse_file(
file_path1, window_size, window_width, whichfrom1
)
t2, offx2, offy2, lines2, dt2, tdi2, linerate2 = parse_file(
file_path2, window_size, window_width, whichfrom2
)
t3, offx3, offy3, lines3, dt3, tdi3, linerate3 = parse_file(
file_path3, window_size, window_width, whichfrom3
)
# nfft is the number of "steps" that we will be using for our
# fourier transforms. These steps represent a certain number of
# image lines, but also represent the number of "time steps" that
# we will use.
if lines1 == lines2 == lines3:
nfft = upper_power_of_two(lines1 / line_interval)
else:
raise ValueError(
"The number of lines in the three images is not identical."
)
if tdi1 == tdi2 == tdi3:
tdi = tdi3
else:
raise ValueError("The values of tdi are not identical.")
if linerate1 == linerate2 == linerate3:
linerate = linerate3
else:
raise ValueError("The values of linerate are not identical.")
offx_filtered = list(
map(
filter_data,
itertools.repeat(nfft),
itertools.repeat(2 / nfft),
(offx1, offx2, offx3),
)
)
offy_filtered = list(
map(
filter_data,
itertools.repeat(nfft),
itertools.repeat(2 / nfft),
(offy1, offy2, offy3),
)
)
# The values in tt are the fractional points in [0:1]
# that correspond to the nfft number.
tt = np.linspace(0, 1, nfft, endpoint=False)
# The first file to be parsed sets nfftime, t0, and duration
nfftime = np.linspace(t1[0], t1[-1], nfft)
# et_shift = et - t1[0]
t0 = t1[0]
duration = t1[-1] - t0
xinterp = [None] * 3
yinterp = [None] * 3
xinterp[0], yinterp[0], x1, y1, ddt1, overxx1, overyy1 = create_matrices(
t1,
offx_filtered[0],
offy_filtered[0],
dt1,
duration,
t0,
nfft,
nfftime,
tt,
)
xinterp[1], yinterp[1], x2, y2, ddt2, overxx2, overyy2 = create_matrices(
t2,
offx_filtered[1],
offy_filtered[1],
dt2,
duration,
t0,
nfft,
nfftime,
tt,
)
xinterp[2], yinterp[2], x3, y3, ddt3, overxx3, overyy3 = create_matrices(
t3,
offx_filtered[2],
offy_filtered[2],
dt3,
duration,
t0,
nfft,
nfftime,
tt,
)
logger.info("Searching for correct phasetol")
# For the test data, the following while loop will *always* run the full
# number of repetitions. If it were guaranteed that there was only
# one minima in the changing value of *error*, then we could exit
# this loop early, once the error starts going back up, or adopt
# strategies to divide and conquer the phasetol parameter space.
# A better understanding of the error behavior could allow us to speed
# these loops up.
#
# Tests with scipy.optimize.minimize_scalar() indicate that this
# data is not convex, and local minima confuse the minimizers.
# int k = 0;
# double minAvgError = numeric_limits<double>::max();
# int minK = 0;
# double error = errorTol;
# ArrayXd minJitterX, minJitterY;
# MatrixXd overxxx1, overyyy1, overxxx2, overyyy2, overxxx3, overyyy3;
# Tolerance for the error
error_tol = 0.0000000001
repetitions = 50
# Tolerance coefficient for the phase difference
tolcoef = 0.01
k = 0
error = error_tol
min_avg_error = sys.float_info.max
# min_k = 0
# No need to calculate these during every loop:
rh0 = dict(
x1=np.real(x1[0]) / 2.0,
x2=np.real(x2[0]) / 2.0,
x3=np.real(x3[0]) / 2.0,
y1=np.real(y1[0]) / 2.0,
y2=np.real(y2[0]) / 2.0,
y3=np.real(y3[0]) / 2.0,
)
if optimize:
# the scipy.optimize.minimize_scaler() methods got distracted by
# a local minima
opt_res = minimize_scalar(
jitter_error,
args=(
tolcoef,
tt,
duration,
rh0,
(dt1, dt2, dt3),
xinterp,
yinterp,
(ddt1, ddt2, ddt3),
(overxx1, overxx2, overxx3),
(overyy1, overyy2, overyy3)
),
method="Brent",
bracket=(0, 50),
# method="bounded",
# bounds=(0, 50),
tol=error_tol,
options=dict(maxiter=repetitions, disp=True)
)
logger.info(opt_res)
min_k = opt_res.x
else:
while error >= error_tol and k < repetitions:
k += 1
error = jitter_error(
k,
tolcoef,
tt,
duration,
rh0,
(dt1, dt2, dt3),
xinterp,
yinterp,
(ddt1, ddt2, ddt3),
(overxx1, overxx2, overxx3),
(overyy1, overyy2, overyy3)
)
if error < min_avg_error:
min_avg_error = error
min_k = k
logger.info(f"Minimum Error after phase filtering: {min_avg_error}")
# end while
min_jitterx, min_jittery = jitterxy(
min_k * tolcoef,
(ddt1, ddt2, ddt3),
(overxx1, overxx2, overxx3),
(overyy1, overyy2, overyy3),
)
logger.info("Searching for correct filter size.")
k = 0
min_k = 0
error = error_tol
min_avg_error = sys.float_info.max
# The jitter in the x (sample) and y (line) directions in pixels.
# This is the jitter with minimum error, after scanning through all
# frequencies omega.
# ArrayXd Sample, Line;
# ArrayXd minJitterCheckX1, minJitterCheckX2, minJitterCheckX3;
# ArrayXd minJitterCheckY1, minJitterCheckY2, minJitterCheckY3;
# starting a loop to find the correct filter size
while error >= error_tol and k < repetitions:
k += 1
omega = k - 1
c = omega / (2.0 * nfft)
jitterxx = filter_data(nfft, c, min_jitterx)
jitteryy = filter_data(nfft, c, min_jittery)
jitterxx = jitterxx - jitterxx[0]
jitteryy = jitteryy - jitteryy[0]
# checking
jittercheckx1 = (
np.interp(tt + dt1 / duration, tt, jitterxx, left=0, right=0)
- jitterxx
)
jitterchecky1 = (
np.interp(tt + dt1 / duration, tt, jitteryy, left=0, right=0)
- jitteryy
)
jittercheckx2 = (
np.interp(tt + dt2 / duration, tt, jitterxx, left=0, right=0)
- jitterxx
)
jitterchecky2 = (
np.interp(tt + dt2 / duration, tt, jitteryy, left=0, right=0)
- jitteryy
)
jittercheckx3 = (
np.interp(tt + dt3 / duration, tt, jitterxx, left=0, right=0)
- jitterxx
)
jitterchecky3 = (
np.interp(tt + dt3 / duration, tt, jitteryy, left=0, right=0)
- jitteryy
)
error_vec = (
1.0
/ 6.0
* (
np.abs(xinterp[0] - (jittercheckx1 + rh0["x1"]))
+ np.abs(xinterp[1] - (jittercheckx2 + rh0["x2"]))
+ np.abs(xinterp[2] - (jittercheckx3 + rh0["x3"]))
+ np.abs(yinterp[0] - (jitterchecky1 + rh0["y1"]))
+ np.abs(yinterp[1] - (jitterchecky2 + rh0["y2"]))
+ np.abs(yinterp[2] - (jitterchecky3 + rh0["y3"]))
)
)
error = error_vec.mean()
logger.info(f"Erorr for omega {omega}: {error}")
if error < min_avg_error:
min_k = k
min_avg_error = error
sample = jitterxx
line = jitteryy
min_jitter_check_x1 = jittercheckx1
min_jitter_check_x2 = jittercheckx2
min_jitter_check_x3 = jittercheckx3
min_jitter_check_y1 = jitterchecky1
min_jitter_check_y2 = jitterchecky2
min_jitter_check_y3 = jitterchecky3
# end while
return (
nfftime,
sample,
line,
linerate,
tdi,
t0,
t1,
t2,
t3,
offx_filtered,
offy_filtered,
xinterp,
yinterp,
min_avg_error,
min_k,
[min_jitter_check_x1, min_jitter_check_x2, min_jitter_check_x3],
[min_jitter_check_y1, min_jitter_check_y2, min_jitter_check_y3],
rh0,
)
def jitterxy(phasetol, ddt, overxx, overyy):
# it = iter((ddt, overxx, overyy))
# the_len = len(next(it))
# if not all(len(l) == the_len for l in it):
# raise ValueError('Not all lists have same length!')
# null the frequencies that cause a problem (really zero them out)
masked_overxx = list()
masked_overyy = list()
for (d, x, y) in zip(ddt, overxx, overyy):
xxx, yyy = mask_frequencies(phasetol, d, x, y)
masked_overxx.append(xxx)
masked_overyy.append(yyy)
# Adding all frequencies together
stackedx = np.ma.stack(masked_overxx)
stackedy = np.ma.stack(masked_overyy)
overxxx = np.ma.mean(stackedx, axis=0)
overyyy = np.ma.mean(stackedy, axis=0)
# take the sum of each row
overx = np.ma.sum(overxxx, axis=0)
overy = np.ma.sum(overyyy, axis=0)
jitterx = overx - overx[0]
jittery = overy - overy[0]
return jitterx, jittery
def jitter_error(
k, tolcoef, tt, duration, rh0, dt, xinterp, yinterp, ddt, overxx, overyy
):
# setting the phase tolerance
phasetol = k * tolcoef
jitterx, jittery = jitterxy(
phasetol,
ddt,
overxx,
overyy,
)
# checking
jittercheckx1 = (
np.interp(tt + dt[0] / duration, tt, jitterx, left=0, right=0)
- jitterx
)
jitterchecky1 = (
np.interp(tt + dt[0] / duration, tt, jittery, left=0, right=0)
- jittery
)
jittercheckx2 = (
np.interp(tt + dt[1] / duration, tt, jitterx, left=0, right=0)
- jitterx
)
jitterchecky2 = (
np.interp(tt + dt[1] / duration, tt, jittery, left=0, right=0)
- jittery
)
jittercheckx3 = (
np.interp(tt + dt[2] / duration, tt, jitterx, left=0, right=0)
- jitterx
)
jitterchecky3 = (
np.interp(tt + dt[2] / duration, tt, jittery, left=0, right=0)
- jittery
)
error_vec = (
np.abs(xinterp[0] - (jittercheckx1 + rh0["x1"]))
+ np.abs(xinterp[1] - (jittercheckx2 + rh0["x2"]))
+ np.abs(xinterp[2] - (jittercheckx3 + rh0["x3"]))
+ np.abs(yinterp[0] - (jitterchecky1 + rh0["y1"]))
+ np.abs(yinterp[1] - (jitterchecky2 + rh0["y2"]))
+ np.abs(yinterp[2] - (jitterchecky3 + rh0["y3"]))
) / 6.0
error = error_vec.mean()
logger.info(f"Error for phasetol {phasetol}: {error}")
return error
def create_matrices(
time: np.array,
offx: np.array,
offy: np.array,
dt: float,
duration: float,
t0: float,
nfft: int,
nfftime: np.array,
tt: np.array,
):
"""Returns a tuple of numpy arrays.
:param time: Time values in seconds (numpy array).
:param offx: Sample offsets (numpy array).
:param offy: Line offsets (numpy array).
:param dt: Time difference between the FROM and MATCH times (float).
:param duration: The total time duration (float).
:param t0: Zero time to start the Fourier Transform steps (float).
:param nfft: Number of divisions to use for the Fourier Transform (int).
:param nfftime: *nfft* time values starting at *t0* (numpy array).
:param tt: Fractional values in [0:1] that correspond to the nfft number
(numpy array).
:return: There are seven elements in the tuple:
0: Interpolation of *offx* at the values of *nfftime*
1: Interpolation of *offy* at the values of *nfftime*
2: Fourier transform of xinterp * (2 / *nfft*)
3: Fourier transform of yinterp * (2 / *nfft*)
4: phase difference
5: overxx: ?
6: overyy: ?
"""
t_shift = time - t0
fx = PchipInterpolator(t_shift, offx, extrapolate=False)
fy = PchipInterpolator(t_shift, offy, extrapolate=False)
xinterp = fx(nfftime - t0)
yinterp = fy(nfftime - t0)
np.nan_to_num(xinterp, copy=False, nan=np.mean(offx))
np.nan_to_num(yinterp, copy=False, nan=np.mean(offy))
# getting the frequencies of the Fourier transform
freq = np.linspace(0, nfft / 2, int(nfft / 2), endpoint=False)
# taking the fourier transform of the offsets
x = 2 * np.fft.fft(xinterp) / nfft
y = 2 * np.fft.fft(yinterp) / nfft
# separating sines and cosines
xa = x[: int(nfft / 2)].real
xb = -1 * x[: int(nfft / 2)].imag
ya = y[: int(nfft / 2)].real
yb = -1 * y[: int(nfft / 2)].imag
# calculates the phase difference
twopi = math.pi * 2
ddt_temp = (dt / duration * twopi) * freq
ddt = ddt_temp - twopi * np.floor(ddt_temp / twopi)
# the coeficients for the frequencies
with np.errstate(divide="ignore"):
aaax = (
-0.5
* (-1 * xa * np.cos(ddt) + np.sin(ddt) * xb - xa)
/ np.sin(ddt)
)
aaay = (
-0.5
* (-1 * ya * np.cos(ddt) + np.sin(ddt) * yb - ya)
/ np.sin(ddt)
)
with np.errstate(invalid="ignore"):
bbbx = -0.5 * (xb * np.cos(ddt) + np.sin(ddt) * xa + xb) / np.sin(ddt)
bbby = -0.5 * (yb * np.cos(ddt) + np.sin(ddt) * ya + yb) / np.sin(ddt)
# create series of sines and cosines
ft = freq.reshape(-1, 1) * tt.reshape(1, -1) * twopi
sn = np.sin(ft)
cn = np.cos(ft)
aaax_rep = np.repeat(aaax.reshape(-1, 1), tt.size, axis=1)
bbbx_rep = np.repeat(bbbx.reshape(-1, 1), tt.size, axis=1)
aaay_rep = np.repeat(aaay.reshape(-1, 1), tt.size, axis=1)
bbby_rep = np.repeat(bbby.reshape(-1, 1), tt.size, axis=1)
with np.errstate(invalid="ignore"):
overxx = aaax_rep * sn + bbbx_rep * cn
overyy = aaay_rep * sn + bbby_rep * cn
# Outputs
# ArrayXd & tt, ArrayXd & ET, ArrayXd & ET_shift,
# ArrayXd & ddt,
# ArrayXd & xinterp, ArrayXd & yinterp,
# ArrayXcd & X, ArrayXcd & Y, MatrixXd & overxx, MatrixXd & overyy
#
return xinterp, yinterp, x, y, ddt, overxx, overyy
def upper_power_of_two(value) -> int:
"""Returns the value of 2 raised to some power which is the smallest
such value that is just >= *value*."""
result = 1
while result < value:
result <<= 1
return result
def filter_data(nfft: int, c: float, data: np.array) -> np.array:
"""Apply a Gaussian filter to the data in the frequency domain.
:param nfft: The number of steps in the Fourier Transform (int).
:param c: ?
:param data: An array of data to be filtered (numpy array).
"""
if len(data.shape) > 1:
raise IndexError("The data array can only be 1D.")
# Use padding so the data is not distorted.
front_padding = math.floor(nfft - len(data) / 2)
back_padding = math.ceil(nfft - len(data) / 2)
# Apply the padding
padded = np.concatenate(
(
np.array([data[0]] * front_padding),
data,
np.array([data[-1]] * back_padding),
)
)
freq = np.fft.fft(padded)
# The exponential
exp_vec = np.hstack(
(np.linspace(0, nfft - 1, nfft), np.linspace(-1 * nfft, -1, nfft))
)
exponential = np.exp(-1 * c ** 2 * exp_vec ** 2)
# The ifft of the product
filtered = np.fft.ifft(freq * exponential)
# Remove the padding and take the real part
return filtered[front_padding : front_padding + len(data)].real
def mask_frequencies(phasetol: float, ddt: np.array, x: np.array, y: np.array):
"""Returns *x* and *y* as numpy masked arrays with 'problematic frequencies'
masked.
:param phasetol: Phase values from zero to *phasetol* and 2*pi - phasetol
will be masked.
:param ddt: phase difference
:param x: overxx from create_matrices()
:param y: overyy from create_matrices()
It is assumed that *ddt* has the same size as axis 0 of *x* and *y*.
"""
if x.shape != y.shape:
raise ValueError(
f"The shape of x {x.shape} and y {y.shape} must be the same."
)
if x.shape[0] != ddt.size:
raise ValueError(
f"The size of ddt ({ddt.size}) must be the same as the first axis"
f"of the x and y arrays {x.shape}"
)
# mask the frequencies that cause a problem
a = np.less(np.abs(ddt), phasetol)
b = np.greater(np.abs(ddt), (2 * math.pi) - phasetol)
null_positions = np.logical_or(a, b)
# We must reshape to a 2D column, and then tile that across, so that each
# row of the 2D matrix has the same value for all positions.
null_2d = np.tile(null_positions.reshape(-1, 1), (1, x.shape[1]))
x_masked = np.ma.array(x, mask=null_2d, fill_value=0)
y_masked = np.ma.array(y, mask=null_2d, fill_value=0)
return x_masked, y_masked
def parse_file(
file_path: os.PathLike, window_size: int, window_width: int, whichfrom=True
):
"""Returns a tuple of information from the Flat file at *file_path*.
There are seven elements in the tuple:
0: unique time values (numpy array)
1: sample offsets for each time (numpy array)
2: line offsets for each time (numpy array)
3: number of lines listed for the FROM file (int)
4: seconds between the times in the FROM and MATCH files (float)
5: The TDI for the FROM and MATCH files (int)
6: The LineRate from the FROM and MATCH files (float)
*window_size* is the kernel size for median filtering the offsets, should
be an odd integer.
*window_width* determines the boundaries above and below the filtered
average beyond which to exclude outliers.
The optional *whichfrom* parameter determines the sense of the offsets.
The default value of True makes the offsets relative to the From cube,
and False makes the offsets relative to the Match cube.
"""
logger.info(f"Reading: {file_path}")
flat = FlatFile(file_path)
if flat["FROM"]["TdiMode"] == flat["MATCH"]["TdiMode"]:
tdi = int(flat["FROM"]["TdiMode"])
else:
raise ValueError(
f"The TdiMode is different for FROM ({flat['FROM']['TdiMode']}) "
f"and MATCH ({flat['MATCH']['TdiMode']}) in {file_path}"
)
if flat["FROM"]["LineRate"] == flat["MATCH"]["LineRate"]:
line_rate = float(flat["FROM"]["LineRate"].split()[0])
else:
raise ValueError(
f"The LineRate is different for FROM ({flat['FROM']['LineRate']}) "
f"and MATCH ({flat['MATCH']['LineRate']}) in {file_path}"
)
if whichfrom == 1:
column = "FromTime"
which = -1
else:
column = "MatchTime"
which = 1
# dt = which * (data[0][0] - data[0][3]);
dt = which * (float(flat[0]["FromTime"]) - float(flat[0]["MatchTime"]))
time = list()
offset_x = list()
offset_y = list()
for row in flat:
time.append(float(row[column]))
offset_x.append(
which * (float(row["RegSamp"]) - float(row["FromSamp"]))
)
offset_y.append(
which * (float(row["RegLine"]) - float(row["FromLine"]))
)
time_arr = np.array(time)
offx_arr = np.array(offset_x)
offy_arr = np.array(offset_y)
magnitude = np.sqrt(offx_arr ** 2 + offy_arr ** 2)
avemag = medfilt(magnitude, window_size)
# Throw out the out-of-range values:
high_window = avemag + window_width
low_window = avemag - window_width
good_idxs = np.nonzero(
np.logical_and(low_window < magnitude, magnitude < high_window)
)
# Some flat files have more than one measurement for the same timestamp.
# For those that do, the multiple x and y offsets for the same
# timestamp are averaged together.
#
# Also, the original MatLab code had no functions, but repeated code,
# and there was a transcription error on the third pass of this averaging
# such that the very first row was never included in the averaging.
# The C++ code allowed for that broken behavior, but we won't here.
t_arr, unique_idxs = np.unique(time_arr[good_idxs], return_index=True)
if unique_idxs.size == time_arr[good_idxs].size:
offx = offx_arr[good_idxs]
offy = offy_arr[good_idxs]
else:
x_means = list()
for a in np.split(offx_arr[good_idxs], unique_idxs[1:]):
x_means.append(np.mean(a))
offx = np.array(x_means)
y_means = list()
for a in np.split(offy_arr[good_idxs], unique_idxs[1:]):
y_means.append(np.mean(a))
offy = np.array(y_means)
return t_arr, offx, offy, int(flat["FROM"]["Lines"]), dt, tdi, line_rate
def pixel_smear(
t: np.array, sample: np.array, line: np.array, linerate: float, tdi: int,
):
"""Returns the smear values from the derived jitter function.
Pixel smear due to jitter is calculated by interpolating the jitter
function at intervals equivalent to the linerate. Then the
difference is taken over that interval and multiplied by the TDI.
This provides an estimated minimum for pixel smear. If the motion
that caused the smear is not captured in the jitter derivation,
then it cannot be plotted here.
:param t: Time values (numpy array).
:param sample: Pixel offsets in the sample direction (numpy array).
:param line: Pixel offsest in the line direction (numpy array).
:param linerate: The image line rate (float).
:param tdi: The image TDI value (int).
:param path: Optional path to write out smear details to (Path).
:param image_id: Optional Observation ID (string).
:return: six-tuple which contains:
0. sample maximum smear value (float)
1. line maximum smear value (float)
2. Maximum smear magnitude (float)
3. Sample smear values (numpy array)
4. Line smear values (numpy array)
5. Ephemeris times at linerate intervals (numpy array)
"""
# The output here differs from the C++ output in a very, very minor
# way. The absolute difference between the line and sample smear
# is 0.002 pixels. I could not track this down. It may be due to
# precision differences in the double values that C++ uses, but I'm
# just not sure.
# The array T has large values. Use a shifted version of it when
# interpolating to reduce the effect of those values on numerical
# accuracy.
shifted_t = t - t[0]
# xi = T(1):linerate:T(end);
n = math.floor((shifted_t[-1] - shifted_t[0]) / linerate) + 1
# This is from the original code, but by definition shifted_t[0] is zero.
# xi = np.linspace(0, n - 1, n) * linerate + shifted_t[0]
xi = np.linspace(0, n - 1, n) * linerate
# Interpolate the jitter function at intervals equivalent to the linerate
f_samp = PchipInterpolator(shifted_t, sample, extrapolate=False)
f_line = PchipInterpolator(shifted_t, line, extrapolate=False)
yis = f_samp(xi)
yil = f_line(xi)
np.nan_to_num(yis, copy=False, nan=0)
np.nan_to_num(yil, copy=False, nan=0)
# Undo the earlier shift
xi += t[0]
# Calculate the rate of change with respect to the linerate
# in the sample direction
dysdx = np.diff(yis) * tdi
# in the line direction
dyldx = np.diff(yil) * tdi
# Calculate the magnitude of the smear
mag_smear = np.sqrt(dysdx ** 2 + dyldx ** 2)
# Find maxSmearS, the largest element by magnitude in dysdx
msi = np.argmax(np.abs(dysdx))
max_smear_s = dysdx[msi]
# Find maxSmearL, the largest element by magnitude in dyldx
msi = np.argmax(np.abs(dyldx))
max_smear_l = dyldx[msi]
# Find maxSmearMag, the largest element by magnitude in magSmear
max_smear_mag = np.max(mag_smear)
# Outputs
return max_smear_s, max_smear_l, max_smear_mag, dysdx, dyldx, xi
def set_file_path(location: Path, file_path: Path):
if file_path.parent == location:
return file_path
else:
return location / file_path.name
def plot(
t,
x,
y,
xinterp,
yinterp,
jittercheckx,
jitterchecky,
title,
et,
sample,
line,
show=True,
save=False,
):
plt.ioff()
fig = plt.figure(constrained_layout=True)
gs = fig.add_gridspec(3, 6)
fig.suptitle("Resolve Jitter Results")
ax00 = fig.add_subplot(gs[0, 0:2])
ax00.set_title(title[0])
ax00.set_ylabel("Sample Offset")
ax01 = fig.add_subplot(gs[0, 2:4])
ax01.set_title(title[1])
ax02 = fig.add_subplot(gs[0, 4:])
ax02.set_title(title[2])
ax10 = fig.add_subplot(gs[1, 0:2])
ax10.set_ylabel("Line Offset")
ax10.set_xlabel("Seconds")
ax11 = fig.add_subplot(gs[1, 2:4])
ax11.set_xlabel("Seconds")
ax12 = fig.add_subplot(gs[1, 4:])
ax12.set_xlabel("Seconds")
ax20 = fig.add_subplot(gs[2, 0:3])
ax20.set_title("Cross-Track Jitter")
ax20.set_ylabel("Sample Offset")
ax20.set_xlabel("Seconds")
ax21 = fig.add_subplot(gs[2, 3:])
ax21.set_title("Down-Track Jitter")
ax21.set_ylabel("Line Offset")
ax21.set_xlabel("Seconds")
ax00.plot(t[0], x[0], "o", c="red")
ax00.plot(et, xinterp[0], c="green")
ax00.plot(et, jittercheckx[0], c="yellow")
ax01.plot(t[1], x[1], "o", c="red")
ax01.plot(et, xinterp[1], c="green")
ax01.plot(et, jittercheckx[1], c="yellow")
ax02.plot(t[2], x[2], "o", c="red")
ax02.plot(et, xinterp[2], c="green")
ax02.plot(et, jittercheckx[2], c="yellow")
ax10.plot(t[0], y[0], "o", c="red")
ax10.plot(et, yinterp[0], c="green")
ax10.plot(et, jitterchecky[0], c="yellow")
ax11.plot(t[1], y[1], "o", c="red")
ax11.plot(et, yinterp[1], c="green")
ax11.plot(et, jitterchecky[1], c="yellow")
ax12.plot(t[2], y[2], "o", c="red")
ax12.plot(et, yinterp[2], c="green")
ax12.plot(et, jitterchecky[2], c="yellow")
ax20.plot(et, sample, c="blue")
ax21.plot(et, line, c="blue")
if save:
logger.info(f"Writing: {save}")
plt.savefig(save)
if show:
plt.show()
def write_csv(path: os.PathLike, labels: list, *cols, fillvalue="nan"):
"""Identical to write_data_for_plotting(), but writes a CSV file
instead of a fixed-width text file.
"""
logger.info(f"Writing: {path}")
with open(path, "w", newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(labels)
for zipped in itertools.zip_longest(*cols, fillvalue=fillvalue):
writer.writerow(zipped)
def write_data_for_plotting(
path: os.PathLike, labels: list, *cols, fillvalue="nan"
):
"""Given a collection of arrays of data (*cols*), write those arrays as
fixed-width columns (25 characters wide) in a text file at *path*, prefixed
by the *labels* in the first row. The columns need not have the same number
of elements as each other, and any short columns will be filled with
*fillvalue*.
This is mostly historic to provide a text file that gnuplot can use.
"""
if len(labels) != len(cols):
raise IndexError(
"There is a different number of column labels than columns."
)
logger.info(f"Writing: {path}")
with open(path, "w") as f:
f.write("".join(map(lambda s: s.ljust(25), labels)) + "\n")
# Print the data columns
for zipped in itertools.zip_longest(*cols, fillvalue=fillvalue):
f.write(
"".join(map(lambda z: "{:>25.16}".format(z), zipped)) + "\n"
)
f.write("\n")
def write_smear_data(
path: Path,
max_smear_s,
max_smear_l,
max_smear_mag,
dysdx,
dyldx,
et,
image_id=None,
):
# Make a text file of the smear data
if image_id is None:
id_str = ""
else:
id_str = f" for {image_id}"
smear_text = [
f"""\
# Smear values are calculated from the derived jitter function{id_str}.
# Maximum Cross-track pixel smear {max_smear_s}
# Maximum Down-track pixel smear {max_smear_l}
# Maximum Pixel Smear Magnitude {max_smear_mag}
# Sample Line EphemerisTime"""
]
for ess, ell, exi in zip(dysdx, dyldx, et):
smear_text.append(f"{ess} {ell} {exi}")
logger.info(f"Writing: {path}")
path.write_text("\n".join(smear_text))
return
def write_gnuplot_file(
gnuplot_path: Path,
data_path: Path,
img_path: Path,
file_path1: Path,
file_path2: Path,
file_path3: Path,
):
"""Writes a gnuplot file that will plot the contents of a file
written by write_data_for_plotting().
The file will be written to *gnuplot_path*. *data_path* should be the
file written by write_data_for_plotting(). The *img_path* is the png
file that will be created when the file at *gnuplot_path* is run by
gnuplot. *file_path1*, *file_path2*, and *file_path3* are just used
to provide titles to the plots.
"""
logger.info(f"Writing: {gnuplot_path}")
gnuplot_path.write_text(
f"""\
dataFile = '{data_path}'
imgFile = '{img_path}'
filePath1 = '{file_path1}'
filePath2 = '{file_path2}'
filePath3 = '{file_path3}'
set terminal png size 1200, 900; set output imgFile
#set terminal pdfcairo; set output 'fig.pdf'
set multiplot # get into multiplot mode
set nokey # no legend
set grid
set datafile missing 'nan'
w3 = 1.0/3.0; # will do 3 columns of plots
set size w3, w3
set title filePath1
set origin 0, 2*w3
plot dataFile using 4:5 with points pointtype 7 pointsize 0.6 lc rgb 'red', \
dataFile using 1:6 with lines lc rgb 'green', \
dataFile using 1:7 with lines lc rgb 'yellow'
set title filePath2
set origin w3, 2*w3
plot dataFile using 11:12 with points pointtype 7 pointsize 0.6 lc rgb 'red', \
dataFile using 1:13 with lines lc rgb 'green', \
dataFile using 1:14 with lines lc rgb 'yellow'
set title filePath3
set origin 2*w3, 2*w3
plot dataFile using 18:19 with points pointtype 7 pointsize 0.6 lc rgb 'red', \
dataFile using 1:20 with lines lc rgb 'green', \
dataFile using 1:21 with lines lc rgb 'yellow'
set title ''
set origin 0, w3
plot dataFile using 4:8 with points pointtype 7 pointsize 0.6 lc rgb 'red', \
dataFile using 1:9 with lines lc rgb 'green', \
dataFile using 1:10 with lines lc rgb 'yellow'
set title ''
set origin w3, w3
plot dataFile using 11:15 with points pointtype 7 pointsize 0.6 lc rgb 'red', \
dataFile using 1:16 with lines lc rgb 'green', \
dataFile using 1:17 with lines lc rgb 'yellow'
set title ''
set origin 2*w3, w3
plot dataFile using 18:22 with points pointtype 7 pointsize 0.6 lc rgb 'red', \
dataFile using 1:23 with lines lc rgb 'green', \
dataFile using 1:24 with lines lc rgb 'yellow'
w2 = 0.5 # 1/2 of the plotting window
set size w2, w3
set title 'Cross-track Jitter'
set origin 0, 0
plot dataFile using 1:2 with lines lc rgb 'blue'
set title 'Down-track Jitter'
set origin w2, 0
plot dataFile using 1:3 with lines lc rgb 'blue'
unset multiplot # exit multiplot mode
"""
)
return
| 30.898316
| 80
| 0.609093
|
4704b37e9c30f87eefa31ab9211601949e6a2ec9
| 24,811
|
py
|
Python
|
safe.py
|
bjboyd02/repy_v2
|
18538558117287ddd801429ace5e47f52926343c
|
[
"MIT"
] | null | null | null |
safe.py
|
bjboyd02/repy_v2
|
18538558117287ddd801429ace5e47f52926343c
|
[
"MIT"
] | null | null | null |
safe.py
|
bjboyd02/repy_v2
|
18538558117287ddd801429ace5e47f52926343c
|
[
"MIT"
] | null | null | null |
"""
Authors: Phil Hassey, Armon Dadgar, Moshe Kaplan
Start Date: March 2007
Description:
There are 3 main components to this code:
Code safety analysis
This is done by creating an AST for the code, walking
through it node by node, and checking that only safe nodes
are used and that no unsafe strings are present.
Executing safe code
This is done by creating a dictionary with a key for each built-in
function, and then running the code using that dictionary as our
'context'.
SafeDict Class
This is a dict that prevents 'unsafe' values from being added.
SafeDict is used by virtual_namespace (for the safe eval) as the
dictionary of variables that will be accessible to the running code. The
reason it is important to prevent unsafe keys is because it is possible
to use them to break out of the sandbox. For example, it is possible to
change an objects private variables by manually bypassing python's name
mangling.
The original version of this file was written by Phil Hassey. it has since
been heavily rewritten for use in the Seattle project.
Comments:
Licensing:
This file is public domain.
Authors Comments:
Known limitations:
- Safe doesn't have any testing for timeouts/DoS. One-liners
like these will lock up the system: "while 1: pass", "234234**234234"
This is handled by a seperate portion of Repy which manages the CPU
usage.
- Lots of (likely) safe builtins and safe AST Nodes are not allowed.
I suppose you can add them to the whitelist if you want them. I
trimmed it down as much as I thought I could get away with and still
have useful python code.
- Might not work with future versions of python - this is made with
python 2.4 in mind. _STR_NOT_BEGIN might have to be extended
in the future with more magic variable prefixes. Or you can
switch to conservative mode, but then even variables like "my_var"
won't work, which is sort of a nuisance.
- If you get data back from a safe_exec, don't call any functions
or methods - they might not be safe with __builtin__ restored
to its normal state. Work with them again via an additional safe_exec.
- The "context" sent to the functions is not tested at all. If you
pass in a dangerous function {'myfile':file} the code will be able
to call it.
"""
# Reference materials:
# Built-in Objects
# http://docs.python.org/lib/builtin.html
# AST Nodes - compiler
# http://docs.python.org/lib/module-compiler.ast.html
# Types and members - inspection
# http://docs.python.org/lib/inspect-types.html
# The standard type heirarchy
# http://docs.python.org/ref/types.html
# Based loosely on - Restricted "safe" eval - by Babar K. Zafar
# (it isn't very safe, but it got me started)
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/496746
# Securing Python: Controlling the abilities of the interpreter
# (or - why even trying this is likely to end in tears)
# http://us.pycon.org/common/talkdata/PyCon2007/062/PyCon_2007.pdf
import os # This is for some path manipulation
import sys # This is to get sys.executable to launch the external process
import time # This is to sleep
# Currently required to filter out Android-specific debug messages, cf #1080
# and safe_check() below
try:
import android
IS_ANDROID = True
except ImportError:
IS_ANDROID = False
# Hide the DeprecationWarning for compiler
import warnings
warnings.simplefilter('ignore')
import compiler # Required for the code safety check
warnings.resetwarnings()
import UserDict # This is to get DictMixin
import platform # This is for detecting Nokia tablets
import threading # This is to get a lock
import harshexit # This is to kill the external process on timeout
import subprocess # This is to start the external process
import __builtin__
import nonportable # This is to get the current runtime
import repy_constants # This is to get our start-up directory
import exception_hierarchy # For exception classes
import encoding_header # Subtract len(ENCODING_HEADER) from error line numbers.
# Fix to make repy compatible with Python 2.7.2 on Ubuntu 11.10 (ticket #1049)
subprocess.getattr = getattr
# Armon: This is how long we will wait for the external process
# to validate the safety of the user code before we timeout,
# and exit with an exception
# AR: Increasing timeout to 15 seconds, see r3410 / #744
EVALUTATION_TIMEOUT = 15
if platform.machine().startswith('armv'):
# The Nokia needs more time to evaluate code safety, especially
# when under heavy loads
EVALUTATION_TIMEOUT = 200
"""
Repyv2 Changes
NODE_ATTR_OK:
Allow '__' in strings.
Added: 'value'
_NODE_CLASS_OK:
Allow exceptions
Added: 'TryExcept', 'TryFinally', 'Raise', 'ExcepthandlerType', 'Invert',
_BUILTIN_OK:
Disallow exiting directly, use exitall instead.
Removed: 'exit', 'quit
Needed for tracebackrepy
Added: 'isinstance', 'BaseException', 'WindowsError', 'type', 'issubclass'
Allow primitive marshalling to be built
Added: 'ord', 'chr'
Repy V2 doesn't allow print()
Removed: 'Print', 'Printnl'
_STR_OK:
Added:
'__repr__', '__str__'
"""
# This portion of the code is for the Code safety analysis
# This is done by creating an AST for the code, walking
# through it node by node, and checking that only safe nodes
# are used and that no unsafe strings are present.
_STR_OK = ['__init__','__del__','__iter__', '__repr__', '__str__']
# __ is not allowed because it can be used to access a 'private' object in a class
# by bypassing Python's name mangling.
_STR_NOT_CONTAIN = ['__']
_STR_NOT_BEGIN = ['im_','func_','tb_','f_','co_',]
# Disallow these exact strings.
# encode and decode are not allowed because of the potential for encoding bugs (#982)
_STR_NOT_ALLOWED = ['encode','decode']
def _is_string_safe(token):
"""
<Purpose>
Checks if a string is safe based on rules defined in
_STR_OK, _STR_NOT_CONTAIN, and _STR_NOT_BEGIN
<Arguments>
token: A value to check.
<Returns>
True if token is safe, false otherwise
"""
# If it's not a string, return True
if type(token) is not str and type(token) is not unicode:
return True
# If the string is explicitly allowed, return True
if token in _STR_OK:
return True
# Check if the string is specifically prohibited:
if token in _STR_NOT_ALLOWED:
return False
# Check all the prohibited sub-strings
for forbidden_substring in _STR_NOT_CONTAIN:
if forbidden_substring in token:
return False
# Check all the prohibited prefixes
# Return True if it is safe.
return not token.startswith(tuple(_STR_NOT_BEGIN))
_NODE_CLASS_OK = [
'Add', 'And', 'AssAttr', 'AssList', 'AssName', 'AssTuple',
'Assert', 'Assign','AugAssign', 'Bitand', 'Bitor', 'Bitxor', 'Break',
'CallFunc', 'Class', 'Compare', 'Const', 'Continue',
'Dict', 'Discard', 'Div', 'Ellipsis', 'Expression', 'FloorDiv',
'For', 'Function', 'Getattr', 'If', 'Keyword',
'LeftShift', 'List', 'ListComp', 'ListCompFor', 'ListCompIf', 'Mod',
'Module', 'Mul', 'Name', 'Node', 'Not', 'Or', 'Pass', 'Power',
'Return', 'RightShift', 'Slice', 'Sliceobj',
'Stmt', 'Sub', 'Subscript', 'Tuple', 'UnaryAdd', 'UnarySub', 'While',
# New additions
'TryExcept', 'TryFinally', 'Raise', 'ExcepthandlerType', 'Invert',
]
_NODE_ATTR_OK = ['value']
def _check_node(node):
"""
<Purpose>
Examines a node, its attributes, and all of its children (recursively) for
safety. A node is safe if it is in _NODE_CLASS_OK and an attribute is safe
if it is not a unicode string and either in _NODE_ATTR_OK or is safe as is
defined by _is_string_safe()
<Arguments>
node: A node in an AST
<Exceptions>
CheckNodeException if an unsafe node is used
CheckStrException if an attribute has an unsafe string
<Return>
None
"""
# Subtract length of encoding header from traceback line numbers.
# (See Issue [SeattleTestbed/repy_v2#95])
HEADERSIZE = len(encoding_header.ENCODING_DECLARATION.splitlines())
# Proceed with the node check.
if node.__class__.__name__ not in _NODE_CLASS_OK:
raise exception_hierarchy.CheckNodeException("Unsafe call '" +
str(node.__class__.__name__) + "' in line " + str(node.lineno - HEADERSIZE))
for attribute, value in node.__dict__.iteritems():
# Don't allow the construction of unicode literals
if type(value) == unicode:
raise exception_hierarchy.CheckStrException("Unsafe string '" +
str(value) + "' in line " + str(node.lineno - HEADERSIZE) +
", node attribute '" + str(attribute) + "'")
if attribute in _NODE_ATTR_OK:
continue
# JAC: don't check doc strings for __ and the like... (#889)
if attribute == 'doc' and (node.__class__.__name__ in
['Module', 'Function', 'Class']):
continue
# Check the safety of any strings
if not _is_string_safe(value):
raise exception_hierarchy.CheckStrException("Unsafe string '" +
str(value) + "' in line " + str(node.lineno - HEADERSIZE) +
", node attribute '" + str(attribute) + "'")
for child in node.getChildNodes():
_check_node(child)
def safe_check(code):
"""
<Purpose>
Takes the code as input, and parses it into an AST.
It then calls _check_node, which does a recursive safety check for every
node.
<Arguments>
code: A string representation of python code
<Exceptions>
CheckNodeException if an unsafe node is used
CheckStrException if an attribute has an unsafe string
<Return>
None
"""
parsed_ast = compiler.parse(code)
_check_node(parsed_ast)
# End of the code safety checking implementation
# Start code safety checking wrappers
def safe_check_subprocess(code):
"""
<Purpose>
Runs safe_check() in a subprocess. This is done because the AST
safe_check() uses a large amount of RAM. By running safe_check() in a
subprocess we can guarantee that the memory will be reclaimed when the
process ends.
<Arguments>
code: See safe_check.
<Exceptions>
As with safe_check.
<Return>
See safe_check.
"""
if IS_ANDROID:
(readhandle, writehandle) = os.pipe()
procpid = os.fork()
if procpid == 0:
os.close(readhandle)
# Check the code
try:
output = str(safe_check(code))
except Exception, e:
output = str(type(e)) + " " + str(e)
nonportable.write_message_to_pipe(writehandle, "safe_check", output)
os._exit(0)
else:
os.close(writehandle)
else:
# Get the path to safe_check.py by using the original start directory of python
path_to_safe_check = os.path.join(repy_constants.REPY_START_DIR, "safe_check.py")
# Start a safety check process, reading from the user code and outputing to a pipe we can read
proc = subprocess.Popen([sys.executable, path_to_safe_check],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# Write out the user code, close so the other end gets an EOF
proc.stdin.write(code)
proc.stdin.close()
# Wait for the process to terminate
starttime = nonportable.getruntime()
# Only wait up to EVALUTATION_TIMEOUT seconds before terminating
while nonportable.getruntime() - starttime < EVALUTATION_TIMEOUT:
# Did the process finish running?
if (IS_ANDROID and os.waitpid(procpid, os.WNOHANG) != (0, 0)) or \
(not IS_ANDROID and proc.poll() != None):
break
time.sleep(0.02)
else:
# Kill the timed-out process
try:
harshexit.portablekill(procpid)
except:
pass
raise Exception, "Evaluation of code safety exceeded timeout threshold \
("+str(nonportable.getruntime() - starttime)+" seconds)"
if IS_ANDROID:
# Should return ("safe_check", "None")
msg = nonportable.read_message_from_pipe(readhandle)
if type(msg) == tuple and len(msg) == 2 and msg[0] == "safe_check":
rawoutput = msg[1]
else:
rawoutput = ""
else:
# Read the output and close the pipe
rawoutput = proc.stdout.read()
proc.stdout.close()
output = rawoutput
# Check the output, None is success, else it is a failure
if output == "None":
return True
# If there is no output, this is a fatal error condition
elif output == "":
raise Exception, "Fatal error while evaluating code safety!"
else:
# Raise the error from the output
raise exception_hierarchy.SafeException, output
# Get a lock for serial_safe_check
SAFE_CHECK_LOCK = threading.Lock()
# Wraps safe_check to serialize calls
def serial_safe_check(code):
"""
<Purpose>
Serializes calls to safe_check_subprocess(). This is because safe_check_subprocess()
creates a new process which may take many seconds to return. This prevents us from
creating many new python processes.
<Arguments>
code: See safe_check.
<Exceptions>
As with safe_check.
<Return>
See safe_check.
"""
SAFE_CHECK_LOCK.acquire()
try:
return safe_check_subprocess(code)
finally:
SAFE_CHECK_LOCK.release()
#End of static analysis portion
# This portion of the code is for the safe exec.
# The first step is to create a dictionary with a key for each built-in function
# We then replace all built-in functions with the values in that dictionary.
# We then run our code using that dictionary as our 'context'
# When we're done, we restore the original __builtin__ from a backup
# safe replacement for the built-in function `type()`
_type = type
_compile_type = _type(compile('','','exec'))
def safe_type(*args, **kwargs):
if len(args) != 1 or kwargs:
raise exception_hierarchy.RunBuiltinException(
'type() may only take exactly one non-keyword argument.')
# Fix for #1189
# if _type(args[0]) is _type or _type(args[0]) is _compile_type:
# raise exception_hierarchy.RunBuiltinException(
# 'unsafe type() call.')
# JAC: The above would be reasonable, but it is harsh. The wrapper code for
# the encasement library needs to have a way to check the type of things and
# these might be inadvertantly be types. It is hard to know if something
# is a type
if args[0] == safe_type or args[0] == _type or _type(args[0]) is _type:
return safe_type
if _type(args[0]) is _type or _type(args[0]) is _compile_type:
raise exception_hierarchy.RunBuiltinException(
'unsafe type() call.')
return _type(args[0])
# This dict maps built-in functions to their replacement functions
_BUILTIN_REPLACE = {
'type': safe_type
}
# The list of built-in exceptions can be generated by running the following:
# r = [v for v in dir(__builtin__) if v[0] != '_' and v[0] == v[0].upper()] ; r.sort() ; print r
_BUILTIN_OK = [
'__debug__',
'ArithmeticError', 'AssertionError', 'AttributeError', 'DeprecationWarning',
'EOFError', 'Ellipsis', 'EnvironmentError', 'Exception', 'False',
'FloatingPointError', 'FutureWarning', 'IOError', 'ImportError',
'IndentationError', 'IndexError', 'KeyError', 'KeyboardInterrupt',
'LookupError', 'MemoryError', 'NameError', 'None', 'NotImplemented',
'NotImplementedError', 'OSError', 'OverflowError', 'OverflowWarning',
'PendingDeprecationWarning', 'ReferenceError', 'RuntimeError', 'RuntimeWarning',
'StandardError', 'StopIteration', 'SyntaxError', 'SyntaxWarning', 'SystemError',
'SystemExit', 'TabError', 'True', 'TypeError', 'UnboundLocalError',
'UnicodeDecodeError', 'UnicodeEncodeError', 'UnicodeError',
'UnicodeTranslateError', 'UserWarning', 'ValueError', 'Warning', 'ZeroDivisionError',
'abs', 'bool', 'cmp', 'complex', 'dict', 'divmod', 'filter', 'float',
'frozenset', 'hex', 'id', 'int', 'len', 'list', 'long', 'map', 'max', 'min',
'object', 'oct', 'pow', 'range', 'reduce', 'repr', 'round', 'set', 'slice',
'str', 'sum', 'tuple', 'xrange', 'zip','id',
#Added for repyv2
'isinstance', 'BaseException', 'WindowsError', 'type', 'issubclass',
'ord', 'chr'
]
_BUILTIN_STR = ['copyright','credits','license','__name__','__doc__',]
def _replace_unsafe_builtin(unsafe_call):
# This function will replace any unsafe built-in function
def exceptionraiser(*vargs,**kargs):
raise exception_hierarchy.RunBuiltinException("Unsafe call '" +
str(unsafe_call) + "' with args '" + str(vargs) + "', kwargs '" +
str(kargs) + "'")
return exceptionraiser
# Stores the current list of allowed built-in functions.
_builtin_globals = None
# Stores a backup copy of all the built-in functions
_builtin_globals_backup = None
# Populates `_builtin_globals` with keys for every built-in function
# The values will either be the actual function (if safe), a replacement
# function, or a stub function that raises an exception.
def _builtin_init():
global _builtin_globals, _builtin_globals_backup
# If _builtin_init() was already called there's nothing to do
if _builtin_globals != None:
return
# Create a backup of the built-in functions
#TODO: Perhaps pull this out of the function - Is there a reason to do this more then once?
_builtin_globals_backup = __builtin__.__dict__.copy()
_builtin_globals = {}
for builtin in __builtin__.__dict__.iterkeys():
# It's important to check _BUILTIN_REPLACE before _BUILTIN_OK because
# even if the name is defined in both, there must be a security reason
# why it was supposed to be replaced, and not just allowed.
if builtin in _BUILTIN_REPLACE:
replacewith = _BUILTIN_REPLACE[builtin]
elif builtin in _BUILTIN_OK:
replacewith = __builtin__.__dict__[builtin]
elif builtin in _BUILTIN_STR:
replacewith = ''
else:
# Replace the function with our exception-raising variant
replacewith = _replace_unsafe_builtin(builtin)
_builtin_globals[builtin] = replacewith
# Armon: Make SafeDict available
_builtin_globals["SafeDict"] = get_SafeDict
# Make the repy exception hierarchy available
# This is done by making every exception in _EXPORTED_EXCEPTIONS
# available as a built-in
for exception_name in exception_hierarchy._EXPORTED_EXCEPTIONS:
_builtin_globals[exception_name] = exception_hierarchy.__dict__[exception_name]
# Replace every function in __builtin__ with the one from _builtin_globals.
def _builtin_destroy():
_builtin_init()
for builtin_name, builtin in _builtin_globals.iteritems():
__builtin__.__dict__[builtin_name] = builtin
# Restore every function in __builtin__ with the backup from _builtin_globals_backup.
def _builtin_restore():
for builtin_name, builtin in _builtin_globals_backup.iteritems():
__builtin__.__dict__[builtin_name] = builtin
# Have the builtins already been destroyed?
BUILTINS_DESTROYED = False
def safe_run(code,context=None):
"""
<Purpose>
Executes code with only safe builtins.
If context is passed in, those keys will be available to the code.
<Arguments>
code: A string representation of python code
context: A dictionary of variables to execute 'in'
<Exceptions>
exception_hierarchy.RunBuiltinException if an unsafe call is made
Whatever else the source code may raise
<Return>
None
"""
global BUILTINS_DESTROYED
if context == None:
context = {}
# Destroy the builtins if needed
if not BUILTINS_DESTROYED:
BUILTINS_DESTROYED = True
_builtin_destroy()
try:
context['__builtins__'] = _builtin_globals
exec code in context
finally:
#_builtin_restore()
pass
# Convenience functions
def safe_exec(code, context = None):
"""
<Purpose>
Checks the code for safety. It then executes code with only safe builtins.
This is a wrapper for calling serial_safe_check() and safe_run()
<Arguments>
code: A string representation of python code
context: A dictionary of variables to execute 'in'
<Exceptions>
CheckNodeException if an unsafe node is used
CheckStrException if an attribute has an unsafe string
exception_hierarchy.RunBuiltinException if an unsafe call is made
Whatever else the code may raise
<Return>
None
"""
serial_safe_check(code)
safe_run(code, context)
# This portion of the code defines a SafeDict
# A SafeDict prevents keys which are 'unsafe' strings from being added.
# Functional constructor for SafeDict to allow us to safely map it into the repy context.
def get_SafeDict(*args,**kwargs):
return SafeDict(*args,**kwargs)
class SafeDict(UserDict.DictMixin):
"""
<Purpose>
A dictionary implementation which prohibits "unsafe" keys from being set or
get. This is done by checking the key with _is_string_safe().
SafeDict is used by virtual_namespace (for the safe eval) as the dictionary
of variables that will be accessible to the running code. The reason it is
important to prevent unsafe keys is because it is possible to use them to
break out of the sandbox. For example, it is possible to change an object's
private variables by manually bypassing python's name mangling.
"""
def __init__(self,from_dict=None):
# Create the underlying dictionary
self.__under__ = {}
# Break if we are done...
if from_dict is None:
return
if type(from_dict) is not dict and not isinstance(from_dict,SafeDict):
return
# If we are given a dict, try to copy its keys
for key,value in from_dict.iteritems():
# Skip __builtins__ and __doc__ since safe_run/python inserts that
if key in ["__builtins__","__doc__"]:
continue
# Check the key type
if type(key) is not str and type(key) is not unicode:
raise TypeError, "'SafeDict' keys must be of string type!"
# Check if the key is safe
if _is_string_safe(key):
self.__under__[key] = value
# Throw an exception if the key is unsafe
else:
raise ValueError, "Unsafe key: '"+key+"'"
# Allow getting items
def __getitem__(self,key):
if type(key) is not str and type(key) is not unicode:
raise TypeError, "'SafeDict' keys must be of string type!"
if not _is_string_safe(key):
raise ValueError, "Unsafe key: '"+key+"'"
return self.__under__.__getitem__(key)
# Allow setting items
def __setitem__(self,key,value):
if type(key) is not str and type(key) is not unicode:
raise TypeError, "'SafeDict' keys must be of string type!"
if not _is_string_safe(key):
raise ValueError, "Unsafe key: '"+key+"'"
return self.__under__.__setitem__(key,value)
# Allow deleting items
def __delitem__(self,key):
if type(key) is not str and type(key) is not unicode:
raise TypeError, "'SafeDict' keys must be of string type!"
if not _is_string_safe(key):
raise ValueError, "Unsafe key: '"+key+"'"
return self.__under__.__delitem__(key)
# Allow checking if a key is set
def __contains__(self,key):
if type(key) is not str and type(key) is not unicode:
raise TypeError, "'SafeDict' keys must be of string type!"
if not _is_string_safe(key):
raise ValueError, "Unsafe key: '"+key+"'"
return key in self.__under__
# Return the key set
def keys(self):
# Filter out the unsafe keys from the underlying dict
safe_keys = []
for key in self.__under__.iterkeys():
if _is_string_safe(key):
safe_keys.append(key)
# Return the safe keys
return safe_keys
# allow us to be printed
# this gets around the __repr__ infinite loop issue ( #918 ) for simple cases
# It seems unlikely this is adequate for more complex cases (like safedicts
# that refer to each other)
def __repr__(self):
newdict = {}
for safekey in self.keys():
if self.__under__[safekey] == self:
newdict[safekey] = newdict
else:
newdict[safekey] = self.__under__[safekey]
return newdict.__repr__()
# Allow a copy of us
def copy(self):
# Create a new instance
copy_inst = SafeDict(self.__under__)
# Return the new instance
return copy_inst
# Make our fields read-only
# This means __getattr__ can do its normal thing, but any
# setters need to be overridden to prohibit adding/deleting/updating
def __setattr__(self,name,value):
# Allow setting __under__ on initialization
if name == "__under__" and name not in self.__dict__:
self.__dict__[name] = value
return
raise TypeError,"'SafeDict' attributes are read-only!"
def __delattr__(self,name):
raise TypeError,"'SafeDict' attributes are read-only!"
| 32.263979
| 98
| 0.69711
|
13fdb6f7e51bbf8ba730b49c4f30553457f69db7
| 37
|
py
|
Python
|
week5/keys.py
|
shiqin-liu/ppua6202-python-policy
|
353b5e0044bfa9d6cef62bb436297ff0197e651d
|
[
"MIT"
] | 1
|
2021-12-30T20:06:57.000Z
|
2021-12-30T20:06:57.000Z
|
week5/keys.py
|
shiqin-liu/ppua6202-python-policy
|
353b5e0044bfa9d6cef62bb436297ff0197e651d
|
[
"MIT"
] | null | null | null |
week5/keys.py
|
shiqin-liu/ppua6202-python-policy
|
353b5e0044bfa9d6cef62bb436297ff0197e651d
|
[
"MIT"
] | null | null | null |
census_api_key = 'your-key-code-here'
| 37
| 37
| 0.783784
|
6635178e29909170d7e2e2463db467eebf4a7925
| 1,852
|
py
|
Python
|
plab/smu/sweep_voltage.py
|
joamatab/photonic-coupling-drivers
|
c12581d8e2158a292e1c585e45c0207c8129c0f1
|
[
"MIT"
] | null | null | null |
plab/smu/sweep_voltage.py
|
joamatab/photonic-coupling-drivers
|
c12581d8e2158a292e1c585e45c0207c8129c0f1
|
[
"MIT"
] | null | null | null |
plab/smu/sweep_voltage.py
|
joamatab/photonic-coupling-drivers
|
c12581d8e2158a292e1c585e45c0207c8129c0f1
|
[
"MIT"
] | null | null | null |
from typing import Iterable, Union, Callable
from time import strftime, localtime
import pandas as pd
import numpy as np
from tqdm import tqdm
import qontrol
from plab.config import logger, CONFIG
from plab.measurement import measurement, Measurement
from plab.smu.smu_qontrol import smu_qontrol
@measurement
def sweep_voltage(
vmin: float = 0.0,
vmax: float = 2.0,
vsteps: int = 3,
channels: Union[Iterable[int], int] = 64,
get_instrument: Callable = smu_qontrol,
**kwargs,
) -> Measurement:
"""Sweep voltage and measure current.
Args:
vmin: min voltage
vmax: max voltage
vsteps: number of steps
channels: number of channels to sweep or specific channels (iterable)
**kwargs: captures labstate metadata in @measurement
"""
q = get_instrument()
voltages = np.linspace(vmin, vmax, vsteps)
df = pd.DataFrame(dict(v=voltages))
if isinstance(channels, int):
channels = range(channels)
for channel in tqdm(channels):
currents = np.zeros_like(voltages)
for j, voltage in enumerate(voltages):
q.v[channel] = float(voltage)
currents[j] = q.i[channel]
q.v[channel] = 0
df[f"i_{channel}"] = currents
df.set_index(df["v"], inplace=True)
df.pop("v")
return df
def get_current(channel: int, voltage: float) -> float:
"""Sets voltage for a channel and returns measured current.
Args:
channel:
voltage:
"""
q = smu_qontrol()
q.v[channel] = float(voltage)
return q.i[channel]
def zero_voltage() -> None:
"""Sets all voltage channels to zero."""
q = smu_qontrol()
q.v[:] = 0
return
if __name__ == "__main__":
zero_voltage()
# print(get_current(62, 0.1))
# m = sweep_voltage(vmax=3, channels=(1,))
# m.write()
| 24.051948
| 77
| 0.636609
|
3a7edc74eb6c3105f275b385479351086c5bd056
| 895
|
py
|
Python
|
texts/migrations/0003_auto_20150908_2049.py
|
ben174/showertexts
|
b05e43dc2f0336f006a219ee7e30b94d4afd1311
|
[
"MIT"
] | 13
|
2015-08-13T19:38:52.000Z
|
2021-04-07T18:52:26.000Z
|
texts/migrations/0003_auto_20150908_2049.py
|
ben174/showertexts
|
b05e43dc2f0336f006a219ee7e30b94d4afd1311
|
[
"MIT"
] | 2
|
2015-09-14T17:50:17.000Z
|
2021-06-10T17:58:37.000Z
|
texts/migrations/0003_auto_20150908_2049.py
|
ben174/showertexts
|
b05e43dc2f0336f006a219ee7e30b94d4afd1311
|
[
"MIT"
] | 3
|
2018-09-10T10:04:57.000Z
|
2021-04-07T18:52:34.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('texts', '0002_auto_20150811_0432'),
]
operations = [
migrations.AddField(
model_name='subscriber',
name='date_renewed',
field=models.DateTimeField(default=datetime.datetime(2015, 9, 9, 3, 49, 37, 469623, tzinfo=utc), auto_now_add=True),
preserve_default=False,
),
migrations.AddField(
model_name='subscriber',
name='expired',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='subscriber',
name='lifetime',
field=models.BooleanField(default=False),
),
]
| 27.121212
| 128
| 0.605587
|
f1d8b82e587d465011be35d70a452fab21c1d7fa
| 5,688
|
py
|
Python
|
rllib/examples/remote_envs_with_inference_done_on_main_node.py
|
jianoaix/ray
|
1701b923bc83905f8961c06a6a173e3eba46a936
|
[
"Apache-2.0"
] | null | null | null |
rllib/examples/remote_envs_with_inference_done_on_main_node.py
|
jianoaix/ray
|
1701b923bc83905f8961c06a6a173e3eba46a936
|
[
"Apache-2.0"
] | null | null | null |
rllib/examples/remote_envs_with_inference_done_on_main_node.py
|
jianoaix/ray
|
1701b923bc83905f8961c06a6a173e3eba46a936
|
[
"Apache-2.0"
] | null | null | null |
"""
This script demonstrates how one can specify n (vectorized) envs
as ray remote (actors), such that stepping through these occurs in parallel.
Also, actions for each env step will be calculated on the "main" node.
This can be useful if the "main" node is a GPU machine and we would like to
speed up batched action calculations, similar to DeepMind's SEED
architecture, described here:
https://ai.googleblog.com/2020/03/massively-scaling-reinforcement.html
"""
import argparse
import os
import ray
from ray.rllib.algorithms.ppo import PPO
from ray.rllib.algorithms.algorithm import Algorithm
from ray.rllib.utils.annotations import override
from ray.rllib.utils.test_utils import check_learning_achieved
from ray import tune
from ray.tune import PlacementGroupFactory
from ray.tune.logger import pretty_print
def get_cli_args():
"""Create CLI parser and return parsed arguments"""
parser = argparse.ArgumentParser()
# example-specific args
# This should be >1, otherwise, remote envs make no sense.
parser.add_argument("--num-envs-per-worker", type=int, default=4)
# general args
parser.add_argument(
"--framework",
choices=["tf", "tf2", "tfe", "torch"],
default="tf",
help="The DL framework specifier.",
)
parser.add_argument(
"--as-test",
action="store_true",
help="Whether this script should be run as a test: --stop-reward must "
"be achieved within --stop-timesteps AND --stop-iters.",
)
parser.add_argument(
"--stop-iters", type=int, default=50, help="Number of iterations to train."
)
parser.add_argument(
"--stop-timesteps",
type=int,
default=100000,
help="Number of timesteps to train.",
)
parser.add_argument(
"--stop-reward",
type=float,
default=150.0,
help="Reward at which we stop training.",
)
parser.add_argument(
"--no-tune",
action="store_true",
help="Run without Tune using a manual train loop instead. Here,"
"there is no TensorBoard support.",
)
parser.add_argument(
"--local-mode",
action="store_true",
help="Init Ray in local mode for easier debugging.",
)
args = parser.parse_args()
print(f"Running with following CLI args: {args}")
return args
# The modified Trainer class we will use. This is the exact same
# as a PPO, but with the additional default_resource_request
# override, telling tune that it's ok (not mandatory) to place our
# n remote envs on a different node (each env using 1 CPU).
class PPORemoteInference(PPO):
@classmethod
@override(Algorithm)
def default_resource_request(cls, config):
cf = dict(cls.get_default_config(), **config)
# Return PlacementGroupFactory containing all needed resources
# (already properly defined as device bundles).
return PlacementGroupFactory(
bundles=[
{
# Single CPU for the local worker. This CPU will host the
# main model in this example (num_workers=0).
"CPU": 1,
# Possibly add n GPUs to this.
"GPU": cf["num_gpus"],
},
{
# Different bundle (meaning: possibly different node)
# for your n "remote" envs (set remote_worker_envs=True).
"CPU": cf["num_envs_per_worker"],
},
],
strategy=config.get("placement_strategy", "PACK"),
)
if __name__ == "__main__":
args = get_cli_args()
ray.init(num_cpus=6, local_mode=args.local_mode)
config = {
"env": "CartPole-v0",
# Force sub-envs to be ray.actor.ActorHandles, so we can step
# through them in parallel.
"remote_worker_envs": True,
# Set the number of CPUs used by the (local) worker, aka "driver"
# to match the number of ray remote envs.
"num_cpus_for_driver": args.num_envs_per_worker + 1,
# Use a single worker (however, with n parallelized remote envs, maybe
# even running on another node).
# Action computations will occur on the "main" (GPU?) node, while
# the envs run on one or more CPU node(s).
"num_workers": 0,
"num_envs_per_worker": args.num_envs_per_worker,
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
"num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
"framework": args.framework,
}
# Run as manual training loop.
if args.no_tune:
# manual training loop using PPO and manually keeping track of state
algo = PPORemoteInference(config=config)
# run manual training loop and print results after each iteration
for _ in range(args.stop_iters):
result = algo.train()
print(pretty_print(result))
# Stop training if the target train steps or reward are reached.
if (
result["timesteps_total"] >= args.stop_timesteps
or result["episode_reward_mean"] >= args.stop_reward
):
break
# Run with Tune for auto env and trainer creation and TensorBoard.
else:
stop = {
"training_iteration": args.stop_iters,
"timesteps_total": args.stop_timesteps,
"episode_reward_mean": args.stop_reward,
}
results = tune.run(PPORemoteInference, config=config, stop=stop, verbose=1)
if args.as_test:
check_learning_achieved(results, args.stop_reward)
ray.shutdown()
| 35.111111
| 83
| 0.627461
|
facfc3179406c042f3c52506b5741ddfbbdf0e4b
| 1,643
|
py
|
Python
|
allure-pytest/setup.py
|
reinaldorossetti/allure-python
|
76c1bf80714cb95f69f9fed401e564ef055f37de
|
[
"Apache-2.0"
] | 1
|
2021-04-25T16:44:40.000Z
|
2021-04-25T16:44:40.000Z
|
allure-pytest/setup.py
|
reinaldorossetti/allure-python
|
76c1bf80714cb95f69f9fed401e564ef055f37de
|
[
"Apache-2.0"
] | null | null | null |
allure-pytest/setup.py
|
reinaldorossetti/allure-python
|
76c1bf80714cb95f69f9fed401e564ef055f37de
|
[
"Apache-2.0"
] | null | null | null |
import os,sys
from setuptools import setup
from pkg_resources import require, DistributionNotFound, VersionConflict
try:
require('pytest-allure-adaptor')
print("""
You have pytest-allure-adaptor installed.
You need to remove pytest-allure-adaptor from your site-packages
before installing allure-pytest, or conflicts may result.
""")
sys.exit()
except (DistributionNotFound, VersionConflict):
pass
PACKAGE = "allure-pytest"
VERSION = "2.3.2b1"
classifiers = [
'Development Status :: 4 - Beta',
'Framework :: Pytest',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
]
install_requires = [
"pytest>=3.3.0",
"six>=1.9.0",
"allure-python-commons==2.3.2b1"
]
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def main():
setup(
name=PACKAGE,
version=VERSION,
description="Allure pytest integration",
url="https://github.com/allure-framework/allure-python",
author="QAMetaSoftware, Stanislav Seliverstov",
author_email="sseliverstov@qameta.io",
license="Apache-2.0",
classifiers=classifiers,
keywords="allure reporting pytest",
long_description=read('README.rst'),
packages=["allure_pytest"],
package_dir={"allure_pytest": "src"},
entry_points={"pytest11": ["allure_pytest = allure_pytest.plugin"]},
install_requires=install_requires
)
if __name__ == '__main__':
main()
| 27.383333
| 76
| 0.664638
|
8ad266f6f86e7b4bba9f3eb9a32812c60d45de7f
| 2,901
|
py
|
Python
|
nostocalean/est/fixest.py
|
jiafengkevinchen/nostocalean
|
e666dd980009bc42abb46d474bcc74c01b94617a
|
[
"MIT"
] | null | null | null |
nostocalean/est/fixest.py
|
jiafengkevinchen/nostocalean
|
e666dd980009bc42abb46d474bcc74c01b94617a
|
[
"MIT"
] | null | null | null |
nostocalean/est/fixest.py
|
jiafengkevinchen/nostocalean
|
e666dd980009bc42abb46d474bcc74c01b94617a
|
[
"MIT"
] | 1
|
2021-11-19T14:55:20.000Z
|
2021-11-19T14:55:20.000Z
|
"""Methods for calling fixest using rpy2."""
import re
from typing import Optional
from rpy2 import robjects
from rpy2.robjects import packages
import pandas as pd
from nostocalean.functions import clean_name, suppress
RegressionResult = robjects.vectors.ListVector
base = packages.importr("base")
fixest = packages.importr("fixest")
class FixestResult:
"""Accessors for a fixest result."""
def __init__(self, result: robjects.vectors.ListVector, se: str):
self.result = result
self.rx = self.result.rx
self.se = se
def summary(self, **kwargs) -> str:
"""Return a string summary of a feols result."""
if "se" not in kwargs:
kwargs["se"] = self.se
with suppress():
return str(base.summary(self.result, **kwargs)) # pylint: disable=no-member
def get_table(self) -> pd.DataFrame:
"""Return the coefficient table from a feols regression result."""
return (
self.result.rx["coeftable"][0]
.rename(columns=clean_name)
.rename(columns={"pr_t": "p_value"})
)
def feols(
fml: str,
data: pd.DataFrame,
se: Optional[str] = None,
**kwargs,
) -> FixestResult:
"""Wrapper for calling fixest::feols in R."""
if se is None:
se = "cluster" if "cluster" in kwargs else "hetero"
columns = set(re.findall(r"[\w']+", fml))
columns = [column for column in columns if column != "1"]
if "cluster" in kwargs:
columns = columns + list(set(re.findall(r"[\w']+", kwargs["cluster"])))
result = fixest.feols( # pylint: disable=no-member
robjects.Formula(fml),
data=data[columns].dropna(subset=columns),
se=se,
**kwargs,
)
return FixestResult(result, se=se)
def feglm(
fml: str,
data: pd.DataFrame,
se: Optional[str] = None,
**kwargs,
) -> FixestResult:
"""Wrapper for calling fixest::feglm in R."""
if se is None:
se = "cluster" if "cluster" in kwargs else "hetero"
columns = set(re.findall(r"[\w']+", fml))
columns = [column for column in columns if column != "1"]
if "cluster" in kwargs:
columns = columns + list(set(re.findall(r"[\w']+", kwargs["cluster"])))
result = fixest.feglm( # pylint: disable=no-member
robjects.Formula(fml),
data=data[columns].dropna(subset=columns),
se=se,
**kwargs,
)
return FixestResult(result, se=se)
def reg(*args, **kwargs) -> str:
"""Run a feols regression and return the summary."""
return feols(*args, **kwargs).summary()
def preg(*args, **kwargs) -> None:
"""Run a feols regression and print the summary."""
print(feols(*args, **kwargs).summary())
def treg(*args, **kwargs) -> pd.DataFrame:
"""Run a feols regression and return the coefficient table."""
return feols(*args, **kwargs).get_table()
| 26.614679
| 88
| 0.613237
|
3656cab44b971cc68a2561efdd667d02fb35d8b4
| 442
|
py
|
Python
|
areaofrectangle.py
|
Ahmad-Aiman/Calculate-Area-of-Rectangle
|
ff33a2eab14bffc1a8c29a9134cabea48b69538b
|
[
"MIT"
] | null | null | null |
areaofrectangle.py
|
Ahmad-Aiman/Calculate-Area-of-Rectangle
|
ff33a2eab14bffc1a8c29a9134cabea48b69538b
|
[
"MIT"
] | null | null | null |
areaofrectangle.py
|
Ahmad-Aiman/Calculate-Area-of-Rectangle
|
ff33a2eab14bffc1a8c29a9134cabea48b69538b
|
[
"MIT"
] | null | null | null |
#Area of a rectangle = width x length
#Perimeter of a rectangle = 2 x [length + width#
width_input = float (input("\nPlease enter width: "))
length_input = float (input("Please enter length: "))
areaofRectangle = width_input * length_input
perimeterofRectangle = 2 * (width_input * length_input)
print ("\nArea of Rectangle is: " , areaofRectangle, "CM")
print("\nPerimeter of Rectangle is: ", perimeterofRectangle, "CM")
| 29.466667
| 67
| 0.70362
|
fcafdb6c9f73a76e9be42a2bc4387b6a0ece7631
| 145
|
py
|
Python
|
Server/evaluate_model.py
|
TomerGibor/Final-Project-OCR
|
826fc471a528ca4e8ab0a54c7921723b58f01485
|
[
"MIT"
] | 8
|
2021-01-04T20:01:16.000Z
|
2022-02-16T21:36:02.000Z
|
Server/evaluate_model.py
|
TomerGibor/Final-Project-OCR
|
826fc471a528ca4e8ab0a54c7921723b58f01485
|
[
"MIT"
] | null | null | null |
Server/evaluate_model.py
|
TomerGibor/Final-Project-OCR
|
826fc471a528ca4e8ab0a54c7921723b58f01485
|
[
"MIT"
] | 3
|
2021-01-07T08:23:31.000Z
|
2022-01-03T21:03:25.000Z
|
"""Module used to run the evaluation of the OCR model."""
from ocr_model import OCRModel
model = OCRModel()
model.load_model()
model.evaluate()
| 20.714286
| 57
| 0.751724
|
d75b538645ae85fe9adab4e8079ec96835ff9748
| 1,239
|
py
|
Python
|
serializer.py
|
JucianoC/emotion-biosignal-collector
|
9a5c9554c3885ef50178f6f94d32a5d8fd881515
|
[
"MIT"
] | null | null | null |
serializer.py
|
JucianoC/emotion-biosignal-collector
|
9a5c9554c3885ef50178f6f94d32a5d8fd881515
|
[
"MIT"
] | null | null | null |
serializer.py
|
JucianoC/emotion-biosignal-collector
|
9a5c9554c3885ef50178f6f94d32a5d8fd881515
|
[
"MIT"
] | null | null | null |
import traceback
from typing import List
from typing import Tuple
from datetime import datetime
from loguru import logger
from models.engine import Session
from models.signals import Signals
class Serializer:
def __init__(self, id_subject: int, id_session: int) -> None:
self.session = Session()
self.id_subject = id_subject
self.id_session = id_session
def serialize(self, id_collect: int, id_iaps: str, valence: float,
arousal: float,
signals: List[Tuple[datetime, float, float, float]]) -> None:
instances = [
Signals(
id_subject=self.id_subject,
id_session=self.id_session,
id_collect=id_collect,
date_time=signal[0],
ppg_signal=signal[1],
eda_signal=signal[2],
skt_signal=signal[3],
sam_arousal=arousal,
sam_valence=valence,
id_iaps=id_iaps) for signal in signals
]
try:
self.session.add_all(instances)
self.session.commit()
except BaseException:
self.session.rollback()
logger.error(traceback.format_exc())
| 30.219512
| 79
| 0.588378
|
33836902897ca675faa94c5619774f73aa6dca3b
| 759
|
py
|
Python
|
src/compas_rv2/ui/Rhino/RV2/dev/RV2settings_cmd.py
|
selinabitting/compas-RV2
|
0884cc00d09c8f4a75eb2b97614105e4c8bfd818
|
[
"MIT"
] | 34
|
2020-04-27T13:54:38.000Z
|
2022-01-17T19:16:27.000Z
|
src/compas_rv2/ui/Rhino/RV2/dev/RV2settings_cmd.py
|
selinabitting/compas-RV2
|
0884cc00d09c8f4a75eb2b97614105e4c8bfd818
|
[
"MIT"
] | 306
|
2020-04-27T12:00:54.000Z
|
2022-03-23T22:28:54.000Z
|
src/compas_rv2/ui/Rhino/RV2/dev/RV2settings_cmd.py
|
selinabitting/compas-RV2
|
0884cc00d09c8f4a75eb2b97614105e4c8bfd818
|
[
"MIT"
] | 11
|
2020-06-30T08:23:40.000Z
|
2022-02-01T20:47:39.000Z
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas_rv2.rhino import get_scene
from compas_rv2.rhino import SettingsForm
from compas_rv2.rhino import rv2_error
__commandname__ = "RV2settings"
@rv2_error()
def RunCommand(is_interactive):
scene = get_scene()
if not scene:
return
SettingsForm.from_scene(scene, object_types=["PatternObject", "FormObject", "ForceObject", "ThrustObject"], global_settings=["RV2", "Solvers"])
scene.update()
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
RunCommand(True)
| 23.71875
| 147
| 0.594203
|
69955ebe3f3faa5115d8834dabfe0570f2f4ddb0
| 479
|
py
|
Python
|
custom_components/hacs/repositories/removed.py
|
constructorfleet/HomeAssistant-Config-Security
|
af6aec51fc7534cc21a7ab20b1af4d8052f737af
|
[
"MIT"
] | 297
|
2018-02-12T09:36:12.000Z
|
2022-03-25T22:14:06.000Z
|
custom_components/hacs/repositories/removed.py
|
ludeeus/integration
|
05c05291de5c938ce122f1d48c542938bceef83e
|
[
"MIT"
] | 55
|
2019-07-05T01:06:44.000Z
|
2020-06-08T02:20:50.000Z
|
custom_components/hacs/repositories/removed.py
|
elsingaa/Home-Assistant-Config
|
d20a2dabf5deeef8f087fa6d34a371617da9c4cd
|
[
"MIT"
] | 64
|
2018-10-31T13:39:20.000Z
|
2022-03-29T10:55:30.000Z
|
"""Object for removed repositories."""
import attr
@attr.s(auto_attribs=True)
class RemovedRepository:
repository: str = None
reason: str = None
link: str = None
removal_type: str = None # archived, not_compliant, critical, dev, broken
acknowledged: bool = False
def update_data(self, data: dict):
"""Update data of the repository."""
for key in data:
if key in self.__dict__:
setattr(self, key, data[key])
| 26.611111
| 78
| 0.632568
|
56aea732e0bcce795f86daac14b950553c315fbd
| 5,039
|
py
|
Python
|
google/ads/google_ads/v6/proto/errors/change_status_error_pb2.py
|
jphanwebstaurant/google-ads-python
|
600812b2afcc4d57f00b47dfe436620ce50bfe9b
|
[
"Apache-2.0"
] | 1
|
2021-04-09T04:28:47.000Z
|
2021-04-09T04:28:47.000Z
|
google/ads/google_ads/v6/proto/errors/change_status_error_pb2.py
|
jphanwebstaurant/google-ads-python
|
600812b2afcc4d57f00b47dfe436620ce50bfe9b
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v6/proto/errors/change_status_error_pb2.py
|
jphanwebstaurant/google-ads-python
|
600812b2afcc4d57f00b47dfe436620ce50bfe9b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v6/proto/errors/change_status_error.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v6/proto/errors/change_status_error.proto',
package='google.ads.googleads.v6.errors',
syntax='proto3',
serialized_options=b'\n\"com.google.ads.googleads.v6.errorsB\026ChangeStatusErrorProtoP\001ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v6/errors;errors\242\002\003GAA\252\002\036Google.Ads.GoogleAds.V6.Errors\312\002\036Google\\Ads\\GoogleAds\\V6\\Errors\352\002\"Google::Ads::GoogleAds::V6::Errors',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n>google/ads/googleads_v6/proto/errors/change_status_error.proto\x12\x1egoogle.ads.googleads.v6.errors\x1a\x1cgoogle/api/annotations.proto\"\xd6\x01\n\x15\x43hangeStatusErrorEnum\"\xbc\x01\n\x11\x43hangeStatusError\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x16\n\x12START_DATE_TOO_OLD\x10\x03\x12\x1e\n\x1a\x43HANGE_DATE_RANGE_INFINITE\x10\x04\x12\x1e\n\x1a\x43HANGE_DATE_RANGE_NEGATIVE\x10\x05\x12\x17\n\x13LIMIT_NOT_SPECIFIED\x10\x06\x12\x18\n\x14INVALID_LIMIT_CLAUSE\x10\x07\x42\xf1\x01\n\"com.google.ads.googleads.v6.errorsB\x16\x43hangeStatusErrorProtoP\x01ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v6/errors;errors\xa2\x02\x03GAA\xaa\x02\x1eGoogle.Ads.GoogleAds.V6.Errors\xca\x02\x1eGoogle\\Ads\\GoogleAds\\V6\\Errors\xea\x02\"Google::Ads::GoogleAds::V6::Errorsb\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_CHANGESTATUSERRORENUM_CHANGESTATUSERROR = _descriptor.EnumDescriptor(
name='ChangeStatusError',
full_name='google.ads.googleads.v6.errors.ChangeStatusErrorEnum.ChangeStatusError',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='START_DATE_TOO_OLD', index=2, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CHANGE_DATE_RANGE_INFINITE', index=3, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CHANGE_DATE_RANGE_NEGATIVE', index=4, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LIMIT_NOT_SPECIFIED', index=5, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INVALID_LIMIT_CLAUSE', index=6, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=155,
serialized_end=343,
)
_sym_db.RegisterEnumDescriptor(_CHANGESTATUSERRORENUM_CHANGESTATUSERROR)
_CHANGESTATUSERRORENUM = _descriptor.Descriptor(
name='ChangeStatusErrorEnum',
full_name='google.ads.googleads.v6.errors.ChangeStatusErrorEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_CHANGESTATUSERRORENUM_CHANGESTATUSERROR,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=129,
serialized_end=343,
)
_CHANGESTATUSERRORENUM_CHANGESTATUSERROR.containing_type = _CHANGESTATUSERRORENUM
DESCRIPTOR.message_types_by_name['ChangeStatusErrorEnum'] = _CHANGESTATUSERRORENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ChangeStatusErrorEnum = _reflection.GeneratedProtocolMessageType('ChangeStatusErrorEnum', (_message.Message,), {
'DESCRIPTOR' : _CHANGESTATUSERRORENUM,
'__module__' : 'google.ads.googleads_v6.proto.errors.change_status_error_pb2'
,
'__doc__': """Container for enum describing possible change status errors.""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.errors.ChangeStatusErrorEnum)
})
_sym_db.RegisterMessage(ChangeStatusErrorEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 41.644628
| 834
| 0.789045
|
b0ccfa9326cd0708f5672ef9427882b76268f291
| 2,934
|
py
|
Python
|
App/gui/avatar_dialog.py
|
Wizard-collab/wizard
|
c2ec623fe011626716493c232b895fb0513f68ff
|
[
"MIT"
] | null | null | null |
App/gui/avatar_dialog.py
|
Wizard-collab/wizard
|
c2ec623fe011626716493c232b895fb0513f68ff
|
[
"MIT"
] | null | null | null |
App/gui/avatar_dialog.py
|
Wizard-collab/wizard
|
c2ec623fe011626716493c232b895fb0513f68ff
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\leo\Documents\Script\Wizard\App\gui\ui_files\avatar_dialog.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(1031, 336)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.avatars_scrollArea = QtWidgets.QScrollArea(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.avatars_scrollArea.sizePolicy().hasHeightForWidth())
self.avatars_scrollArea.setSizePolicy(sizePolicy)
self.avatars_scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.avatars_scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.avatars_scrollArea.setWidgetResizable(True)
self.avatars_scrollArea.setObjectName("avatars_scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 1005, 266))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.scrollAreaWidgetContents)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.avatars_scrollArea.setWidget(self.scrollAreaWidgetContents)
self.horizontalLayout.addWidget(self.avatars_scrollArea)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label.setText(_translate("Dialog", "Choose your avatar"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| 45.84375
| 125
| 0.74199
|
2d00c96de6db76b86755306a3089ba17411ef559
| 6,016
|
py
|
Python
|
plot_publication.py
|
FLAMEGPU/circles-benchmark-vis
|
c96973e28f2732999d3eba245e9e518bf51b6852
|
[
"MIT"
] | null | null | null |
plot_publication.py
|
FLAMEGPU/circles-benchmark-vis
|
c96973e28f2732999d3eba245e9e518bf51b6852
|
[
"MIT"
] | null | null | null |
plot_publication.py
|
FLAMEGPU/circles-benchmark-vis
|
c96973e28f2732999d3eba245e9e518bf51b6852
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.image as mpimg
import argparse
import pathlib
# Default DPI
DEFAULT_DPI = 300
# Default directory for visualisation images
DEFAULT_INPUT_DIR="."
# Default directory for visualisation images
DEFAULT_VISUALISATION_DIR = "./sample/figures/visualisation"
# Visualisation images used in the figure (4 required)
VISUALISATION_IMAGE_FILENAMES = ['0.png', '350.png', '650.png', '2500.png']
# Drift csv filename from simulation output
DRIFT_CSV_FILENAME = "drift_perStepPerSimulationCSV.csv"
def cli():
parser = argparse.ArgumentParser(description="Python script to generate figure from csv files")
parser.add_argument(
'-o',
'--output-dir',
type=str,
help='directory to output figures into.',
default='.'
)
parser.add_argument(
'--dpi',
type=int,
help='DPI for output file',
default=DEFAULT_DPI
)
parser.add_argument(
'-i',
'--input-dir',
type=str,
help='Input directory, containing the csv files',
default='.'
)
parser.add_argument(
'-v',
'--vis-dir',
type=str,
help="Input directory, containing the visualisation files",
default=DEFAULT_VISUALISATION_DIR
)
args = parser.parse_args()
return args
def validate_args(args):
valid = True
# If output_dir is passed, create it, error if can't create it.
if args.output_dir is not None:
p = pathlib.Path(args.output_dir)
try:
p.mkdir(exist_ok=True)
except Exception as e:
print(f"Error: Could not create output directory {p}: {e}")
valid = False
# DPI must be positive, and add a max.
if args.dpi is not None:
if args.dpi < 1:
print(f"Error: --dpi must be a positive value. {args.dpi}")
valid = False
# Ensure that the input directory exists, and that all required input is present.
if args.input_dir is not None:
input_dir = pathlib.Path(args.input_dir)
if input_dir.is_dir():
csv_path = input_dir / DRIFT_CSV_FILENAME
if not csv_path.is_file():
print(f"Error: {input_dir} does not contain {DRIFT_CSV_FILENAME}:")
else:
print(f"Error: Invalid input_dir provided {args.input_dir}")
valid = False
# Ensure that the visualisation input directory exists, and that all required images are present.
vis_dir = pathlib.Path(args.vis_dir)
if vis_dir.is_dir():
missing_files = []
for vis_filename in VISUALISATION_IMAGE_FILENAMES:
vis_file_path = vis_dir / vis_filename
if not vis_file_path.is_file():
missing_files.append(vis_file_path)
valid = False
if len(missing_files) > 0:
print(f"Error: {vis_dir} does not contain required files:")
for missing_file in missing_files:
print(f" {missing_file}")
else:
print(f"Error: Invalid vis_dir provided {args.vis_dir}")
valid = False
# Additional check on number of visualisation files
if len(VISUALISATION_IMAGE_FILENAMES) != 4:
print(f"Error: VISUALISATION_IMAGE_FILENAMES does not contain 4 files")
valid = False
return valid
def main():
# Validate cli
args = cli()
valid_args = validate_args(args)
if not valid_args:
return False
# Set figure theme
sns.set_theme(style='white')
# setup sub plot using mosaic layout
gs_kw = dict(width_ratios=[2, 1, 1], height_ratios=[1, 1])
f, ax = plt.subplot_mosaic([['drift', 'v1', 'v2'],
['drift', 'v3', 'v4']],
gridspec_kw=gs_kw, figsize=(10, 5),
constrained_layout=True)
# Load per time step data into data frame
input_dir = pathlib.Path(args.input_dir)
step_df = pd.read_csv(input_dir/DRIFT_CSV_FILENAME, sep=',', quotechar='"')
# Strip any white space from column names
step_df.columns = step_df.columns.str.strip()
# rename comm_radius to 'r'
step_df.rename(columns={'comm_radius': 'r'}, inplace=True)
# Plot group by communication radius (r)
plt_drift = sns.lineplot(x='step', y='s_drift', hue='r', data=step_df, ax=ax['drift'])
plt_drift.set(xlabel='Simulation steps', ylabel='Mean drift')
ax['drift'].set_title(label='A', loc='left', fontweight="bold")
# visualisation path
visualisation_dir = pathlib.Path(args.vis_dir)
# Plot vis for time step = 0
v1 = mpimg.imread(visualisation_dir / VISUALISATION_IMAGE_FILENAMES[0])
ax['v1'].imshow(v1)
ax['v1'].set_axis_off()
ax['v1'].set_title(label='B', loc='left', fontweight="bold")
# Plot vis for time step = 350
v1 = mpimg.imread(visualisation_dir / VISUALISATION_IMAGE_FILENAMES[1])
ax['v2'].imshow(v1)
ax['v2'].set_axis_off()
ax['v2'].set_title(label='C', loc='left', fontweight="bold")
# Plot vis for time step = 850
v1 = mpimg.imread(visualisation_dir / VISUALISATION_IMAGE_FILENAMES[2])
ax['v3'].imshow(v1)
ax['v3'].set_axis_off()
ax['v3'].set_title(label='D', loc='left', fontweight="bold")
# Plot vis for time step = 2500
v1 = mpimg.imread(visualisation_dir / VISUALISATION_IMAGE_FILENAMES[3])
ax['v4'].imshow(v1)
ax['v4'].set_axis_off()
ax['v4'].set_title(label='E', loc='left', fontweight="bold")
# Save to image
#f.tight_layout()
output_dir = pathlib.Path(args.output_dir)
f.savefig(output_dir/"figure.png", dpi=args.dpi)
f.savefig(output_dir/"figure.pdf", format='pdf', dpi=args.dpi)
# Run the main method if this was not included as a module
if __name__ == "__main__":
main()
| 32.518919
| 101
| 0.625997
|
bbf0c2c22f53ebbbc02e692a263960a049427722
| 5,280
|
py
|
Python
|
adder.py
|
shazada-shariar/tele.add
|
02279107d0e30f605e7868dfc1d048fe5f3061d9
|
[
"MIT"
] | null | null | null |
adder.py
|
shazada-shariar/tele.add
|
02279107d0e30f605e7868dfc1d048fe5f3061d9
|
[
"MIT"
] | null | null | null |
adder.py
|
shazada-shariar/tele.add
|
02279107d0e30f605e7868dfc1d048fe5f3061d9
|
[
"MIT"
] | null | null | null |
import base64
exec(base64.b64decode('ZnJvbSB0ZWxldGhvbi5zeW5jIGltcG9ydCBUZWxlZ3JhbUNsaWVudApmcm9tIHRlbGV0aG9uLnRsLmZ1bmN0aW9ucy5tZXNzYWdlcyBpbXBvcnQgR2V0RGlhbG9nc1JlcXVlc3QKZnJvbSB0ZWxldGhvbi50bC50eXBlcyBpbXBvcnQgSW5wdXRQZWVyRW1wdHksIElucHV0UGVlckNoYW5uZWwsIElucHV0UGVlclVzZXIKZnJvbSB0ZWxldGhvbi5lcnJvcnMucnBjZXJyb3JsaXN0IGltcG9ydCBQZWVyRmxvb2RFcnJvciwgVXNlclByaXZhY3lSZXN0cmljdGVkRXJyb3IKZnJvbSB0ZWxldGhvbi50bC5mdW5jdGlvbnMuY2hhbm5lbHMgaW1wb3J0IEludml0ZVRvQ2hhbm5lbFJlcXVlc3QKaW1wb3J0IGNvbmZpZ3BhcnNlcgppbXBvcnQgb3MKaW1wb3J0IHN5cwppbXBvcnQgY3N2CmltcG9ydCB0cmFjZWJhY2sKaW1wb3J0IHRpbWUKaW1wb3J0IHJhbmRvbQoKcmU9IlwwMzNbMTszMW0iCmdyPSJcMDMzWzE7MzJtIgpjeT0iXDAzM1sxOzM2bSIKCnByaW50IChyZSsiIDo6Ojo6Ojo6Ojo6Ojo6Ojo6Ojo6Ojo6Ojo6Ojo6Ojo6Ojo6Ojo6Ojo6OiIpCnByaW50IChncisiIDo6ICAgICAgICAgICAgICBTICBVICBMICBUIEEgTiAgICAgICAgICA6OiIpCnByaW50IChyZSsiIDo6Ojo6Ojo6Ojo6Ojo6Ojo6Ojo6Ojo6Ojo6Ojo6Ojo6Ojo6Ojo6Ojo6OiIpCgpwcmludCAoY3krInZlcnNpb24gOiAxLjAxIikKcHJpbnQgKGN5KyJUSElTIFNDUklQVCBDQVJFVEVEIEJZIFNVTFRBTiBTSEFSSUFSIikKcHJpbnQgKGN5KyJTSEFaQURBIFNIQVJJQVIiKQoKcHJpbnQgKHJlKyJOT1RFIDoiKQpwcmludCAoIjEuIFRlbGVncmFtIG9ubHkgYWxsb3cgdG8gYWRkIDIwMCBtZW1iZXJzIGluIGdyb3VwIGJ5IG9uZSB1c2VyLiIpCnByaW50ICgiMi4gWW91IGNhbiBVc2UgbXVsdGlwbGUgVGVsZWdyYW0gYWNjb3VudHMgZm9yIGFkZCBtb3JlIG1lbWJlcnMuIikKcHJpbnQgKCIzLiBBZGQgb25seSA1MCBtZW1iZXJzIGluIGdyb3VwIGVhY2ggdGltZSBvdGhlcndpc2UgeW91IHdpbGwgZ2V0IGZsb29kIGVycm9yLiIpCnByaW50ICgiNC4gVGhlbiB3YWl0IGZvciAxNS0zMCBtaW5pdXRlIHRoZW4gYWRkIG1lbWJlcnMgYWdhaW4uIikKcHJpbnQgKCI1LiBNYWtlIHN1cmUgeW91IGVuYWJsZSBBZGQgVXNlciBQZXJtaXNzaW9uIGluIHlvdXIgZ3JvdXAiKQoKY3Bhc3MgPSBjb25maWdwYXJzZXIuUmF3Q29uZmlnUGFyc2VyKCkKY3Bhc3MucmVhZCgnY29uZmlnLmRhdGEnKQoKdHJ5OgogICAgYXBpX2lkID0gY3Bhc3NbJ2NyZWQnXVsnaWQnXQogICAgYXBpX2hhc2ggPSBjcGFzc1snY3JlZCddWydoYXNoJ10KICAgIHBob25lID0gY3Bhc3NbJ2NyZWQnXVsncGhvbmUnXQogICAgY2xpZW50ID0gVGVsZWdyYW1DbGllbnQocGhvbmUsIGFwaV9pZCwgYXBpX2hhc2gpCmV4Y2VwdCBLZXlFcnJvcjoKICAgIG9zLnN5c3RlbSgnY2xlYXInKQogICAgYmFubmVyKCkKICAgIHByaW50KHJlKyJbIV0gcnVuIHB5dGhvbiBzZXR1cC5weSBmaXJzdCAhIVxuIikKICAgIHN5cy5leGl0KDEpCgpjbGllbnQuY29ubmVjdCgpCmlmIG5vdCBjbGllbnQuaXNfdXNlcl9hdXRob3JpemVkKCk6CiAgICBjbGllbnQuc2VuZF9jb2RlX3JlcXVlc3QocGhvbmUpCiAgICBvcy5zeXN0ZW0oJ2NsZWFyJykKICAgIGJhbm5lcigpCiAgICBjbGllbnQuc2lnbl9pbihwaG9uZSwgaW5wdXQoZ3IrJ1srXSBFbnRlciB0aGUgY29kZTogJytyZSkpCgp1c2VycyA9IFtdCndpdGggb3BlbihyIm1lbWJlcnMuY3N2IiwgZW5jb2Rpbmc9J1VURi04JykgYXMgZjogICNFbnRlciB5b3VyIGZpbGUgbmFtZQogICAgcm93cyA9IGNzdi5yZWFkZXIoZixkZWxpbWl0ZXI9IiwiLGxpbmV0ZXJtaW5hdG9yPSJcbiIpCiAgICBuZXh0KHJvd3MsIE5vbmUpCiAgICBmb3Igcm93IGluIHJvd3M6CiAgICAgICAgdXNlciA9IHt9CiAgICAgICAgdXNlclsndXNlcm5hbWUnXSA9IHJvd1swXQogICAgICAgIHVzZXJbJ2lkJ10gPSBpbnQocm93WzFdKQogICAgICAgIHVzZXJbJ2FjY2Vzc19oYXNoJ10gPSBpbnQocm93WzJdKQogICAgICAgIHVzZXJbJ25hbWUnXSA9IHJvd1szXQogICAgICAgIHVzZXJzLmFwcGVuZCh1c2VyKQoKY2hhdHMgPSBbXQpsYXN0X2RhdGUgPSBOb25lCmNodW5rX3NpemUgPSAyMDAKZ3JvdXBzID0gW10KCnJlc3VsdCA9IGNsaWVudChHZXREaWFsb2dzUmVxdWVzdCgKICAgIG9mZnNldF9kYXRlPWxhc3RfZGF0ZSwKICAgIG9mZnNldF9pZD0wLAogICAgb2Zmc2V0X3BlZXI9SW5wdXRQZWVyRW1wdHkoKSwKICAgIGxpbWl0PWNodW5rX3NpemUsCiAgICBoYXNoPTAKKSkKY2hhdHMuZXh0ZW5kKHJlc3VsdC5jaGF0cykKCmZvciBjaGF0IGluIGNoYXRzOgogICAgdHJ5OgogICAgICAgIGlmIGNoYXQubWVnYWdyb3VwID09IFRydWU6CiAgICAgICAgICAgIGdyb3Vwcy5hcHBlbmQoY2hhdCkKICAgIGV4Y2VwdDoKICAgICAgICBjb250aW51ZQoKcHJpbnQoZ3IrJ0Nob29zZSBhIGdyb3VwIHRvIGFkZCBtZW1iZXJzOicrY3kpCmkgPSAwCmZvciBncm91cCBpbiBncm91cHM6CiAgICBwcmludChzdHIoaSkgKyAnLSAnICsgZ3JvdXAudGl0bGUpCiAgICBpICs9IDEKCmdfaW5kZXggPSBpbnB1dChncisiRW50ZXIgYSBOdW1iZXI6ICIrcmUpCnRhcmdldF9ncm91cCA9IGdyb3Vwc1tpbnQoZ19pbmRleCldCgp0YXJnZXRfZ3JvdXBfZW50aXR5ID0gSW5wdXRQZWVyQ2hhbm5lbCh0YXJnZXRfZ3JvdXAuaWQsIHRhcmdldF9ncm91cC5hY2Nlc3NfaGFzaCkKCm1vZGUgPSBpbnQoaW5wdXQoZ3IrIkVudGVyIDEgdG8gYWRkIGJ5IHVzZXJuYW1lIG9yIDIgdG8gYWRkIGJ5IElEOiAiK2N5KSkKCm4gPSAwCgpmb3IgdXNlciBpbiB1c2VyczoKICAgIG4gKz0gMQogICAgaWYgbiAlIDgwID09IDA6CiAgICAgICAgc2xlZXAoNjApCiAgICB0cnk6CiAgICAgICAgcHJpbnQoIkFkZGluZyB7fSIuZm9ybWF0KHVzZXJbJ2lkJ10pKQogICAgICAgIGlmIG1vZGUgPT0gMToKICAgICAgICAgICAgaWYgdXNlclsndXNlcm5hbWUnXSA9PSAiIjoKICAgICAgICAgICAgICAgIGNvbnRpbnVlCiAgICAgICAgICAgIHVzZXJfdG9fYWRkID0gY2xpZW50LmdldF9pbnB1dF9lbnRpdHkodXNlclsndXNlcm5hbWUnXSkKICAgICAgICBlbGlmIG1vZGUgPT0gMjoKICAgICAgICAgICAgdXNlcl90b19hZGQgPSBJbnB1dFBlZXJVc2VyKHVzZXJbJ2lkJ10sIHVzZXJbJ2FjY2Vzc19oYXNoJ10pCiAgICAgICAgZWxzZToKICAgICAgICAgICAgc3lzLmV4aXQoIkludmFsaWQgTW9kZSBTZWxlY3RlZC4gUGxlYXNlIFRyeSBBZ2Fpbi4iKQogICAgICAgIGNsaWVudChJbnZpdGVUb0NoYW5uZWxSZXF1ZXN0KHRhcmdldF9ncm91cF9lbnRpdHksIFt1c2VyX3RvX2FkZF0pKQogICAgICAgIHByaW50KCJXYWl0aW5nIGZvciA2MC0xODAgU2Vjb25kcy4uLiIpCiAgICAgICAgdGltZS5zbGVlcChyYW5kb20ucmFuZHJhbmdlKDAsIDUpKQogICAgZXhjZXB0IFBlZXJGbG9vZEVycm9yOgogICAgICAgIHByaW50KCJHZXR0aW5nIEZsb29kIEVycm9yIGZyb20gdGVsZWdyYW0uIFNjcmlwdCBpcyBzdG9wcGluZyBub3cuIFBsZWFzZSB0cnkgYWdhaW4gYWZ0ZXIgc29tZSB0aW1lLiIpCiAgICAgICAgcHJpbnQoIldhaXRpbmcge30gc2Vjb25kcyIuZm9ybWF0KFNMRUVQX1RJTUVfMikpCiAgICAgICAgdGltZS5zbGVlcChTTEVFUF9USU1FXzIpCiAgICBleGNlcHQgVXNlclByaXZhY3lSZXN0cmljdGVkRXJyb3I6CiAgICAgICAgcHJpbnQoIlRoZSB1c2VyJ3MgcHJpdmFjeSBzZXR0aW5ncyBkbyBub3QgYWxsb3cgeW91IHRvIGRvIHRoaXMuIFNraXBwaW5nLiIpCiAgICAgICAgcHJpbnQoIldhaXRpbmcgZm9yIDUgU2Vjb25kcy4uLiIpCiAgICAgICAgdGltZS5zbGVlcChyYW5kb20ucmFuZHJhbmdlKDAsIDUpKQogICAgZXhjZXB0OgogICAgICAgIHRyYWNlYmFjay5wcmludF9leGMoKQogICAgICAgIHByaW50KCJVbmV4cGVjdGVkIEVycm9yIikKICAgICAgICBjb250aW51ZQo='))
| 2,640
| 5,266
| 0.998106
|
7c3ad35e214411b9b656f97c70c68dc12515e8af
| 3,553
|
py
|
Python
|
tutorials/positional_tracking.py
|
aflyingnoob/zed-python
|
1674098e27377554dc250c69fa44510ec1c13f5b
|
[
"MIT"
] | 1
|
2020-01-07T05:19:51.000Z
|
2020-01-07T05:19:51.000Z
|
tutorials/positional_tracking.py
|
LMAInspectionCrawler/LMA_ZED_Subtraction_Python
|
24a48ea6e8a2e1a47a0663e4ba8bbfa36d6ffe91
|
[
"MIT"
] | null | null | null |
tutorials/positional_tracking.py
|
LMAInspectionCrawler/LMA_ZED_Subtraction_Python
|
24a48ea6e8a2e1a47a0663e4ba8bbfa36d6ffe91
|
[
"MIT"
] | 1
|
2020-11-04T08:42:05.000Z
|
2020-11-04T08:42:05.000Z
|
########################################################################
#
# Copyright (c) 2017, STEREOLABS.
#
# All rights reserved.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
########################################################################
import pyzed.camera as zcam
import pyzed.defines as sl
import pyzed.types as tp
import pyzed.core as core
def main():
# Create a PyZEDCamera object
zed = zcam.PyZEDCamera()
# Create a PyInitParameters object and set configuration parameters
init_params = zcam.PyInitParameters()
init_params.camera_resolution = sl.PyRESOLUTION.PyRESOLUTION_HD720 # Use HD720 video mode (default fps: 60)
# Use a right-handed Y-up coordinate system
init_params.coordinate_system = sl.PyCOORDINATE_SYSTEM.PyCOORDINATE_SYSTEM_RIGHT_HANDED_Y_UP
init_params.coordinate_units = sl.PyUNIT.PyUNIT_METER # Set units in meters
# Open the camera
err = zed.open(init_params)
if err != tp.PyERROR_CODE.PySUCCESS:
exit(1)
# Enable positional tracking with default parameters
py_transform = core.PyTransform() # First create a PyTransform object for PyTrackingParameters object
tracking_parameters = zcam.PyTrackingParameters(init_pos=py_transform)
err = zed.enable_tracking(tracking_parameters)
if err != tp.PyERROR_CODE.PySUCCESS:
exit(1)
# Track the camera position during 1000 frames
i = 0
zed_pose = zcam.PyPose()
runtime_parameters = zcam.PyRuntimeParameters()
while i < 1000:
if zed.grab(runtime_parameters) == tp.PyERROR_CODE.PySUCCESS:
# Get the pose of the left eye of the camera with reference to the world frame
zed.get_position(zed_pose, sl.PyREFERENCE_FRAME.PyREFERENCE_FRAME_WORLD)
# Display the translation and timestamp
py_translation = core.PyTranslation()
tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
print("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".format(tx, ty, tz, zed_pose.timestamp))
# Display the orientation quaternion
py_orientation = core.PyOrientation()
ox = round(zed_pose.get_orientation(py_orientation).get()[0], 3)
oy = round(zed_pose.get_orientation(py_orientation).get()[1], 3)
oz = round(zed_pose.get_orientation(py_orientation).get()[2], 3)
ow = round(zed_pose.get_orientation(py_orientation).get()[3], 3)
print("Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(ox, oy, oz, ow))
i = i + 1
# Close the camera
zed.close()
if __name__ == "__main__":
main()
| 43.329268
| 115
| 0.673234
|
1d08780545bbab5356821f5668014fdccebab596
| 1,090
|
py
|
Python
|
Sketches/RJL/Util/Lagger.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 12
|
2015-10-20T10:22:01.000Z
|
2021-07-19T10:09:44.000Z
|
Sketches/RJL/Util/Lagger.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 2
|
2015-10-20T10:22:55.000Z
|
2017-02-13T11:05:25.000Z
|
Sketches/RJL/Util/Lagger.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 6
|
2015-03-09T12:51:59.000Z
|
2020-03-01T13:06:21.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Axon.Component import component
from time import sleep
class Lagger(component):
def __init__(self, sleeptime = 0.01):
super(Lagger, self).__init__()
self.sleeptime = sleeptime
def main(self):
while 1:
yield 1
sleep(self.sleeptime)
| 33.030303
| 78
| 0.721101
|
466fc64423919a8115999ecc03cc41371a7302a6
| 2,145
|
py
|
Python
|
sinchSMS/main.py
|
msdeep14/stayUpdated
|
b4165743b9bc6f03cb4c1b70e5c4a1a68e5eb667
|
[
"MIT"
] | 16
|
2017-09-13T17:19:41.000Z
|
2022-01-01T07:44:50.000Z
|
sinchSMS/main.py
|
msdeep14/stayUpdated
|
b4165743b9bc6f03cb4c1b70e5c4a1a68e5eb667
|
[
"MIT"
] | null | null | null |
sinchSMS/main.py
|
msdeep14/stayUpdated
|
b4165743b9bc6f03cb4c1b70e5c4a1a68e5eb667
|
[
"MIT"
] | 7
|
2017-11-15T10:18:01.000Z
|
2021-08-28T21:09:38.000Z
|
import urllib, urllib2, cookielib
import requests
from lxml import html
import time
from time import sleep
from sinchsms import SinchSMS
import sched
# set your credentials
username = 'your_username'
password = 'your_password'
number = 'your_mobile_number'
app_key = 'your_app_key'
app_secret = 'your_app_secret'
titlelist = []
# update time in seconds
delay = 5
s = sched.scheduler(time.time, time.sleep)
def sendSMS(inset):
# extract message from set
message = 'updates on aitplacements.com'
i = 1
for e in inset:
news = str(e.encode('utf-8'))
message = message + "\n"+str(i)+". "+ news
i = i + 1
# print message
client = SinchSMS(app_key, app_secret)
print("Sending '%s' to %s" % (message, number))
response = client.send_message(number, message)
message_id = response['messageId']
response = client.check_status(message_id)
while response['status'] != 'Successful':
print(response['status'])
time.sleep(1)
response = client.check_status(message_id)
print(response['status'])
global titlelist
titlelist = list(inset)
# print titlelist
def checkForUpdate(mylist):
# compare with mylist
s1 = set(mylist)
s2 = set(titlelist)
# print mylist
# print titlelist
s3 = s1.union(s2) - s1.intersection(s2)
if len(s3) is not 0:
sendSMS(s3)
else:
print "no updates recently!"
def loginToWebsite(sc):
print "checking for updates "
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
login_data = urllib.urlencode({'log' : username, 'pwd' :password})
opener.open('http://aitplacements.com/wp-login.php', login_data)
resp = opener.open('http://aitplacements.com/news/')
# print resp.read()
doc = html.fromstring(resp.read())
# print html.tostring(doc1, pretty_print=True)
raw_title = doc.xpath('//h1[@class="entry-title"]/a/text()')
# print raw_title
checkForUpdate(raw_title)
s.enter(delay, 1, loginToWebsite, (sc,))
if __name__ == "__main__":
s.enter(delay, 1, loginToWebsite, (s,))
s.run()
| 27.151899
| 70
| 0.662937
|
365d7542ce36a6408cb8adcaf4b1681f3e2cf310
| 8,391
|
py
|
Python
|
utilities.py
|
mheen/ocean_data
|
2db23bc63f8fd6405c9347da4783fa13a2adabe3
|
[
"MIT"
] | 1
|
2020-09-24T09:43:12.000Z
|
2020-09-24T09:43:12.000Z
|
utilities.py
|
mheen/ocean_data
|
2db23bc63f8fd6405c9347da4783fa13a2adabe3
|
[
"MIT"
] | null | null | null |
utilities.py
|
mheen/ocean_data
|
2db23bc63f8fd6405c9347da4783fa13a2adabe3
|
[
"MIT"
] | null | null | null |
from datetime import datetime,timedelta
import numpy as np
import json
from urllib.request import urlopen
from html.parser import HTMLParser
import os
# -----------------------------------------------
# General
# -----------------------------------------------
def get_dir(dirname,json_file='input/dirs.json'):
with open(json_file,'r') as f:
all_dirs = json.load(f)
return all_dirs[dirname]
def get_variable_name(model,variable,json_file='input/variables.json'):
with open(json_file,'r') as f:
all_models = json.load(f)
model = all_models[model]
return model[variable]
def get_ecmwf_variable_code(variable,json_file='input/ecmwf_codes.json'):
with open(json_file,'r') as f:
all_codes = json.load(f)
return all_codes[variable]
def get_variable_name_reverse(model,variable,json_file='input/variables.json'):
with open(json_file,'r') as f:
all_models = json.load(f)
model = all_models[model]
model_variable_names = list(model.values())
variable_names = list(model.keys())
i_variable = model_variable_names.index(variable)
return variable_names[i_variable]
def get_urls(model,json_file='input/urls.json'):
with open(json_file,'r') as f:
all_urls = json.load(f)
return all_urls[model]
def get_logins(model,json_file='input/logins.json'):
with open(json_file,'r') as f:
all_logins = json.load(f)
return all_logins[model]
def get_ncfiles_in_dir(input_dir):
ncfiles = []
for filename in os.listdir(input_dir):
if filename.endswith('.nc'):
ncfiles.append(filename)
return ncfiles
def get_ncfiles_in_time_range(input_dir,start_date,end_date,including_end=1,timeformat='%Y%m%d'):
all_ncfiles = get_ncfiles_in_dir(input_dir)
ndays = (end_date-start_date).days+including_end
ncfiles = []
for n in range(ndays):
date = start_date+timedelta(days=n)
for ncfile in all_ncfiles:
if ncfile.startswith(date.strftime(timeformat)):
ncfiles.append(ncfile)
return ncfiles
def get_start_and_end_indices(array,start_value,end_value):
l_indices = np.logical_and(array>=start_value,array<=end_value)
indices = np.where(l_indices)[0]
start_index = indices[0]
end_index = indices[-1]
return (start_index,end_index)
def get_closest_index(A,target):
# A must be sorted!
idx = A.searchsorted(target)
idx = np.clip(idx,1,len(A)-1)
left = A[idx-1]
right = A[idx]
idx -= target-left < right-target
return idx
def rename_ncfiles_in_dir(input_dir: str, filename_indices: list, filename_format: str, new_filename_format: str):
ncfiles = get_ncfiles_in_dir(input_dir)
for ncfile in ncfiles:
input_path = input_dir+ncfile
ncfile_date = datetime.strptime(ncfile[filename_indices[0]:filename_indices[1]],filename_format)
output_path = input_dir+ncfile_date.strftime(new_filename_format)+'.nc'
os.rename(input_path,output_path)
# -----------------------------------------------
# Timeseries
# -----------------------------------------------
def get_time_index(time_array,time):
'''Returns exact index of a requested time, raises
error if this does not exist.'''
t = np.where(time_array==time)[0]
if len(t) > 1:
raise ValueError('Multiple times found in time array that equal requested time.')
elif len(t) == 0:
raise ValueError('Requested time not found in time array.')
else:
return t[0]
def get_time_indices(timeseries,time):
i_times = []
for i,t in enumerate(timeseries):
if t.date() == time.date():
i_times.append(i)
if len(i_times) == 0:
raise ValueError(f'Time {time.strftime("%d-%m-%Y")} not found in timeseries.')
return i_times
def get_closest_time_index(time_array,time):
'''Returns exact index of a requested time if is exists,
otherwise returns the index of the closest time.'''
dt = abs(time_array-time)
i_closest = np.where(dt == dt.min())[0][0]
return i_closest
def get_l_time_range(time,start_time,end_time):
if type(start_time) is datetime.date:
start_time = datetime.datetime(start_time.year,start_time.month,start_time.day)
if type(end_time) is datetime.date:
end_time = datetime.datetime(end_time.year,end_time.month,end_time.day)
l_start = time >= start_time
l_end = time <= end_time
l_time = l_start & l_end
return l_time
def get_n_months(start_date,end_date):
n_months = end_date.month-start_date.month
n_years = end_date.year-start_date.year
if not n_years == 0:
n_months = n_months+12*n_years
return n_months
def add_month_to_timestamp(timestamp,n_month):
month = timestamp.month - 1 + n_month
year = timestamp.year + month // 12
month = month % 12 + 1
return datetime(year,month,timestamp.day)
def convert_time_to_datetime(time_org,time_units):
time = []
i_start_time = time_units.index('since')+len('since')+1
if 'T' in time_units: # YYYY-mm-ddTHH:MM format used by Parcels
i_end_time = i_start_time+len('YYYY-mm-ddTHH:MM')
base_time = datetime.strptime(time_units[i_start_time:i_end_time],'%Y-%m-%dT%H:%M')
else: # YYYY-mm-dd format used by multiple numerical models
i_end_time = i_start_time+len('YYYY-mm-dd')
base_time = datetime.strptime(time_units[i_start_time:i_end_time],'%Y-%m-%d')
if time_units.startswith('seconds'):
if time_org.shape == ():
time = base_time+timedelta(seconds=float(time_org))
return time
for t in time_org:
if not np.isnan(t):
time.append(base_time+timedelta(seconds=float(t)))
else:
time.append(np.nan)
return np.array(time)
elif time_units.startswith('hours'):
if time_org.shape == ():
time = base_time+timedelta(hours=float(time_org))
return time
for t in time_org:
if not np.isnan(t):
time.append(base_time+timedelta(hours=float(t)))
else:
time.append(np.nan)
return np.array(time)
elif time_units.startswith('days'):
if time_org.shape == ():
time = base_time+timedelta(days=float(time_org))
return time
for t in time_org:
if not np.isnan(t):
time.append(base_time+timedelta(days=float(t)))
else:
time.append(np.nan)
return np.array(time)
else:
raise ValueError('Unknown time units for time conversion to datetime.')
def convert_datetime_to_time(time_org,time_units='seconds',time_origin=datetime(1995,1,1)):
time = []
if time_units == 'seconds':
conversion = 1
elif time_units == 'hours':
conversion = 60*60
elif time_units == 'days':
conversion = 24*60*60
else:
raise ValueError('Unknown time units requested fro time conversion from datetime.')
for t in time_org:
time.append((t-time_origin).total_seconds()/conversion)
units = f'{time_units} since {time_origin.strftime("%Y-%m-%d %H:%M")}'
return np.array(time),units
# -----------------------------------------------
# Coordinates
# -----------------------------------------------
def convert_lon_360_to_180(lon):
lon180 = np.copy(lon)
lon180[lon180>180] = lon180[lon180>180]-360
i_lon = np.argsort(lon180)
lon180 = lon180[i_lon]
return lon180,i_lon
# -----------------------------------------------
# OpenDAP
# -----------------------------------------------
def get_ncfiles_from_opendap_catalog(catalog_url):
catalog_content = download_html(catalog_url)
parsed_html = parse_html(catalog_content)
ncfiles = []
for line in parsed_html:
if line.endswith('.nc'):
ncfiles.append(line)
return ncfiles
def download_html(url):
catalog_response = urlopen(url)
return catalog_response.read().decode('UTF-8')
def parse_html(html_text):
parsed_catalog = OpendapHtmlParser()
parsed_catalog.feed(html_text)
return parsed_catalog.data
class OpendapHtmlParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.data = []
def handle_data(self,data):
self.data.append(data)
| 35.858974
| 114
| 0.633298
|
1ef2e40260455b0ce348728bcae959b226253484
| 8,126
|
py
|
Python
|
src/pymordemos/analyze_pickle.py
|
TiKeil/pymor
|
5c6b3b6e1714b5ede11ce7cf03399780ab29d252
|
[
"Unlicense"
] | null | null | null |
src/pymordemos/analyze_pickle.py
|
TiKeil/pymor
|
5c6b3b6e1714b5ede11ce7cf03399780ab29d252
|
[
"Unlicense"
] | null | null | null |
src/pymordemos/analyze_pickle.py
|
TiKeil/pymor
|
5c6b3b6e1714b5ede11ce7cf03399780ab29d252
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2020 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
from typer import Argument, Option, Typer
from pymor.core.pickle import load
app = Typer(help='''
This demo loads a pickled reduced model, solves for random
parameters, estimates the reduction errors and then visualizes these
estimates. If the detailed model and the reductor are
also provided, the estimated error is visualized in comparison to
the real reduction error.
The needed data files are created by the thermal block demo, by
setting the '--pickle' option.
'''[1:])
REDUCED_DATA = Argument(..., help='File containing the pickled reduced model.')
SAMPLES = Argument(..., min=1, help='Number of parameter samples to test with. ')
ERROR_NORM = Option(None, help='Name of norm in which to compute the errors.')
@app.command()
def histogram(
reduced_data: str = REDUCED_DATA,
samples: int = SAMPLES,
detailed_data: str = Option(None, help='File containing the high-dimensional model and the reductor.'),
error_norm: str = ERROR_NORM
):
print('Loading reduced model ...')
rom, parameter_space = load(open(reduced_data, 'rb'))
mus = parameter_space.sample_randomly(samples)
us = []
for mu in mus:
print(f'Solving reduced for {mu} ... ', end='')
sys.stdout.flush()
us.append(rom.solve(mu))
print('done')
print()
if hasattr(rom, 'estimate'):
ests = []
for mu in mus:
print(f'Estimating error for {mu} ... ', end='')
sys.stdout.flush()
ests.append(rom.estimate_error(mu))
print('done')
if detailed_data:
print('Loading high-dimensional data ...')
fom, reductor = load(open(detailed_data, 'rb'))
errs = []
for u, mu in zip(us, mus):
print(f'Calculating error for {mu} ... ')
sys.stdout.flush()
err = fom.solve(mu) - reductor.reconstruct(u)
if error_norm:
errs.append(np.max(getattr(fom, error_norm + '_norm')(err)))
else:
errs.append(np.max(err.norm()))
print('done')
print()
try:
plt.style.use('ggplot')
except AttributeError:
pass # plt.style is only available in newer matplotlib versions
if hasattr(rom, 'estimate') and detailed_data:
# setup axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left+width+0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
plt.figure(1, figsize=(8, 8))
axScatter = plt.axes(rect_scatter)
axHistx = plt.axes(rect_histx)
axHisty = plt.axes(rect_histy)
# scatter plot
total_min = min(np.min(ests), np.min(errs)) * 0.9
total_max = max(np.max(ests), np.max(errs)) * 1.1
axScatter.set_xscale('log')
axScatter.set_yscale('log')
axScatter.set_xlim([total_min, total_max])
axScatter.set_ylim([total_min, total_max])
axScatter.set_xlabel('errors')
axScatter.set_ylabel('estimates')
axScatter.plot([total_min, total_max], [total_min, total_max], 'r')
axScatter.scatter(errs, ests)
# plot histograms
x_hist, x_bin_edges = np.histogram(errs, bins=_bins(total_min, total_max))
axHistx.bar(x_bin_edges[1:], x_hist, width=x_bin_edges[:-1] - x_bin_edges[1:], color='blue')
y_hist, y_bin_edges = np.histogram(ests, bins=_bins(total_min, total_max))
axHisty.barh(y_bin_edges[1:], y_hist, height=y_bin_edges[:-1] - y_bin_edges[1:], color='blue')
axHistx.set_xscale('log')
axHisty.set_yscale('log')
axHistx.set_xticklabels([])
axHisty.set_yticklabels([])
axHistx.set_xlim(axScatter.get_xlim())
axHisty.set_ylim(axScatter.get_ylim())
axHistx.set_ylim([0, max(np.max(x_hist), np.max(y_hist))])
axHisty.set_xlim([0, max(np.max(x_hist), np.max(y_hist))])
plt.show()
elif hasattr(rom, 'estimate'):
total_min = np.min(ests) * 0.9
total_max = np.max(ests) * 1.1
hist, bin_edges = np.histogram(ests, bins=_bins(total_min, total_max))
plt.bar(bin_edges[1:], hist, width=bin_edges[:-1] - bin_edges[1:], color='blue')
plt.xlim([total_min, total_max])
plt.xscale('log')
plt.xlabel('estimated error')
plt.show()
elif detailed_data:
total_min = np.min(ests) * 0.9
total_max = np.max(ests) * 1.1
hist, bin_edges = np.histogram(errs, bins=_bins(total_min, total_max))
plt.bar(bin_edges[1:], hist, width=bin_edges[:-1] - bin_edges[1:], color='blue')
plt.xlim([total_min, total_max])
plt.xscale('log')
plt.xlabel('error')
plt.show()
else:
raise ValueError('Nothing to plot!')
@app.command()
def convergence(
reduced_data: str = REDUCED_DATA,
detailed_data: str = Argument(..., help='File containing the high-dimensional model and the reductor.'),
samples: int = SAMPLES,
error_norm: str = ERROR_NORM,
ndim: int = Option(None, help='Number of reduced basis dimensions for which to estimate the error.')
):
print('Loading reduced model ...')
rom, parameter_space = load(open(reduced_data, 'rb'))
print('Loading high-dimensional data ...')
fom, reductor = load(open(detailed_data, 'rb'))
fom.enable_caching('disk')
dim = rom.solution_space.dim
if ndim:
dims = np.linspace(0, dim, ndim, dtype=np.int)
else:
dims = np.arange(dim + 1)
mus = parameter_space.sample_randomly(samples)
ESTS = []
ERRS = []
T_SOLVES = []
T_ESTS = []
for N in dims:
rom = reductor.reduce(N)
print(f'N = {N:3} ', end='')
us = []
print('solve ', end='')
sys.stdout.flush()
start = time.perf_counter()
for mu in mus:
us.append(rom.solve(mu))
T_SOLVES.append((time.perf_counter() - start) * 1000. / len(mus))
print('estimate ', end='')
sys.stdout.flush()
if hasattr(rom, 'estimate'):
ests = []
start = time.perf_counter()
for mu in mus:
# print('e', end='')
# sys.stdout.flush()
ests.append(rom.estimate_error(mu))
ESTS.append(max(ests))
T_ESTS.append((time.perf_counter() - start) * 1000. / len(mus))
print('errors', end='')
sys.stdout.flush()
errs = []
for u, mu in zip(us, mus):
err = fom.solve(mu) - reductor.reconstruct(u)
if error_norm:
errs.append(np.max(getattr(fom, error_norm + '_norm')(err)))
else:
errs.append(np.max(err.norm()))
ERRS.append(max(errs))
print()
print()
try:
plt.style.use('ggplot')
except AttributeError:
pass # plt.style is only available in newer matplotlib versions
plt.subplot(1, 2, 1)
if hasattr(rom, 'estimate'):
plt.semilogy(dims, ESTS, label='max. estimate')
plt.semilogy(dims, ERRS, label='max. error')
plt.xlabel('dimension')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(dims, T_SOLVES, label='avg. solve time')
if hasattr(rom, 'estimate'):
plt.plot(dims, T_ESTS, label='avg. estimate time')
plt.xlabel('dimension')
plt.ylabel('milliseconds')
plt.legend()
plt.show()
def _bins(start, stop, steps=100):
''' numpy has a quirk in unreleased master where logspace
might sometimes not return a 1d array
'''
bins = np.logspace(np.log10(start), np.log10(stop), steps)
if bins.shape == (steps, 1):
bins = bins[:, 0]
return bins
if __name__ == '__main__':
app()
| 31.866667
| 108
| 0.603495
|
bd298e40c6fe8349aaa6a346c16159494396cfc9
| 3,261
|
py
|
Python
|
tracker/tracker_tiny.py
|
DJT777/Tensorrt_Detection_With-SQL
|
7762eb832af926861f9a6116f9dbc79280e177f3
|
[
"MIT"
] | null | null | null |
tracker/tracker_tiny.py
|
DJT777/Tensorrt_Detection_With-SQL
|
7762eb832af926861f9a6116f9dbc79280e177f3
|
[
"MIT"
] | null | null | null |
tracker/tracker_tiny.py
|
DJT777/Tensorrt_Detection_With-SQL
|
7762eb832af926861f9a6116f9dbc79280e177f3
|
[
"MIT"
] | null | null | null |
import tensorrt as trt
from utils import common
from utils.data_processing import *
from utils.draw import draw_boxes
from deep_sort import build_tracker
TRT_LOGGER = trt.Logger()
def get_engine(engine_file_path):
# If a serialized engine exists, use it instead of building an engine.
print("Reading engine from file {}".format(engine_file_path))
with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
class Tracker_tiny():
def __init__(self, cfg, engine_file_path):
self.cfg = cfg
# self.args = args
self.deepsort = build_tracker(cfg, use_cuda=True)
#---tensorrt----#
self.engine = get_engine(engine_file_path)
self.context = self.engine.create_execution_context()
self.inputs, self.outputs, self.bindings, self.stream = common.allocate_buffers(self.engine)
# ---tensorrt----#
#---input info for yolov3-416------#
self.input_resolution_yolov3_HW = (416, 416)
self.preprocessor = PreprocessYOLO(self.input_resolution_yolov3_HW)
# self.image_raw, self.image = self.preprocessor.process(ori_im)
# self.shape_orig_WH = image_raw.size
#TODO tiny
self.output_shapes = [(1, 255, 13, 13), (1, 255, 26, 26)]
self.postprocessor_args = {"yolo_masks": [ (3, 4, 5), (0, 1, 2)],
# A list of 3 three-dimensional tuples for the YOLO masks
"yolo_anchors": [(10, 14), (23, 27), (37, 58),
(81, 82), (135, 169), (344, 319)],
"obj_threshold": 0.6, # Threshold for object coverage, float value between 0 and 1
"nms_threshold": 0.3,
# Threshold for non-max suppression algorithm, float value between 0 and 1
"yolo_input_resolution": self.input_resolution_yolov3_HW}
self.postprocessor = PostprocessYOLO(**self.postprocessor_args)
def run(self, ori_im):
image_raw, image = self.preprocessor.process(ori_im)
shape_orig_WH = image_raw.size
# print('type of image:', type(image))
self.inputs[0].host = image
trt_outputs = common.do_inference(
self.context, bindings=self.bindings, inputs=self.inputs, outputs=self.outputs, stream=self.stream)
trt_outputs = [output.reshape(shape) for output, shape in zip(trt_outputs, self.output_shapes)]
bbox_xywh, cls_ids, cls_conf = self.postprocessor.process(trt_outputs, (shape_orig_WH))
if bbox_xywh is not None:
# select person class
mask = cls_ids == 0
bbox_xywh = bbox_xywh[mask]
bbox_xywh[:, 3:] *= 1.2
cls_conf = cls_conf[mask]
# print('hahahat', bbox_xywh.dtype)
# do tracking
outputs = self.deepsort.update(bbox_xywh, cls_conf, ori_im)
# draw boxes for visualization
if len(outputs) > 0:
bbox_xyxy = outputs[:, :4]
identities = outputs[:, -1]
ori_im = draw_boxes(ori_im, bbox_xyxy, identities)
return ori_im
| 39.768293
| 113
| 0.604722
|
e7fc5508cdccd5e934053ee40ff49c692daaa841
| 2,502
|
py
|
Python
|
src/forecastmgmt/model/forecast_originator.py
|
vvladych/forecastmgmt
|
9eea272d00bb42031f49b5bb5af01388ecce31cf
|
[
"Unlicense"
] | null | null | null |
src/forecastmgmt/model/forecast_originator.py
|
vvladych/forecastmgmt
|
9eea272d00bb42031f49b5bb5af01388ecce31cf
|
[
"Unlicense"
] | 37
|
2015-07-01T22:18:51.000Z
|
2016-03-11T21:17:12.000Z
|
src/forecastmgmt/model/forecast_originator.py
|
vvladych/forecastmgmt
|
9eea272d00bb42031f49b5bb5af01388ecce31cf
|
[
"Unlicense"
] | null | null | null |
'''
Created on 14.05.2015
@author: vvladych
'''
from MDO import MDO
class ForecastOriginator(MDO):
sql_dict={"get_all":"SELECT sid, forecast_sid, originator_sid FROM fc_forecast_originator",
#"get_all_foreign_key":"SELECT sid, forecast_sid, originator_sid FROM fc_forecast_originator WHERE forecast_sid=%s",
"get_all_foreign_key":"""SELECT
fc_person.sid as sid, forecast_sid, fc_person.common_name, fc_originator_person.originator_sid,'person' as origin_type
FROM
fc_forecast_originator, fc_originator_person, fc_person
WHERE
fc_forecast_originator.forecast_sid=%s AND
fc_forecast_originator.originator_sid=fc_originator_person.originator_sid AND
fc_originator_person.person_sid=fc_person.sid
UNION
SELECT
fc_organization.sid as sid, forecast_sid, fc_organization.common_name, fc_originator_organisation.originator_sid,'organisation' as origin_type
FROM
fc_forecast_originator, fc_originator_organisation, fc_organization
WHERE
fc_forecast_originator.forecast_sid=%s AND
fc_forecast_originator.originator_sid=fc_originator_organisation.originator_sid AND
fc_originator_organisation.organisation_sid=fc_organization.sid
""",
"delete":"DELETE FROM fc_forecast_originator WHERE sid=%s",
"insert":"INSERT INTO fc_forecast_originator(forecast_sid, originator_sid) VALUES(%s, %s) RETURNING sid",
"load":"SELECT sid, forecast_sid, originator_sid FROM fc_forecast_originator WHERE sid=%s"}
def __init__(self, sid=None, uuid=None, forecast_sid=None, originator_sid=None):
super(ForecastOriginator, self).__init__(ForecastOriginator.sql_dict,sid,uuid)
self.forecast_sid=forecast_sid
self.originator_sid=originator_sid
def load_object_from_db(self,rec):
self.forecast_sid=rec.forecast_sid
self.originator_sid=rec.originator_sid
def get_insert_data(self):
return (self.forecast_sid, self.originator_sid,)
def fabric_method(self,rec):
return ForecastOriginator(rec.sid, None, rec.forecast_sid, rec.originator_sid)
| 51.061224
| 169
| 0.645883
|
e5469e3550215bea4b069a3c0f3d89d7cd69237e
| 2,236
|
py
|
Python
|
dfa/server/services/firewall/native/drivers/base.py
|
CiscoSystems/fabric_enabler
|
d5318624dd15692197a7212ecd4b0ceea42dc73e
|
[
"Apache-2.0"
] | 1
|
2015-03-05T02:48:15.000Z
|
2015-03-05T02:48:15.000Z
|
dfa/server/services/firewall/native/drivers/base.py
|
CiscoSystems/fabric_enabler
|
d5318624dd15692197a7212ecd4b0ceea42dc73e
|
[
"Apache-2.0"
] | 1
|
2016-05-25T22:13:43.000Z
|
2016-07-21T20:49:48.000Z
|
dfa/server/services/firewall/native/drivers/base.py
|
CiscoSystems/fabric_enabler
|
d5318624dd15692197a7212ecd4b0ceea42dc73e
|
[
"Apache-2.0"
] | 2
|
2017-05-02T21:32:46.000Z
|
2018-08-22T16:52:40.000Z
|
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Padmanabhan Krishnan, Cisco Systems, Inc.
import abc
import six
from dfa.common import dfa_logger as logging
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class BaseDrvr(object):
'''Base Driver class for FW driver classes.'''
# def __init__(self):
# Pass
@abc.abstractmethod
def initialize(self):
'''Initialize method'''
pass
@abc.abstractmethod
def pop_evnt_que(self):
'''Pop Event Queue'''
pass
@abc.abstractmethod
def pop_dcnm_obj(self):
'''Pop DCNM Obj'''
pass
@abc.abstractmethod
def nwk_create_notif(self, tenant_id, tenant_name, cidr):
'''Nwk Create Notification'''
pass
@abc.abstractmethod
def nwk_delete_notif(self, tenant_id, tenant_name, net_id):
'''Nwk Delete Notification'''
pass
@abc.abstractmethod
def is_device_virtual(self):
'''Return False if device is physical, True otherwise'''
pass
@abc.abstractmethod
def get_name(self):
'''Return the name of the driver service'''
pass
@abc.abstractmethod
def get_max_quota(self):
'''Retrieves the maximumnumber of FW that could be created'''
pass
@abc.abstractmethod
def create_fw(self, tenant_id, data):
'''Create the Firewall'''
pass
@abc.abstractmethod
def delete_fw(self, tenant_id, data):
'''Create the Firewall'''
pass
@abc.abstractmethod
def modify_fw(self, tenant_id, data):
'''Create the Firewall'''
pass
| 25.409091
| 78
| 0.653399
|
1f09094421945b6bf3c989b90b22520b00313b24
| 794
|
py
|
Python
|
gallery/urls.py
|
nadineuwineza/my_gallery
|
fa51ffb2ed0f53ff285fd2241150e5aad414c20f
|
[
"MIT"
] | null | null | null |
gallery/urls.py
|
nadineuwineza/my_gallery
|
fa51ffb2ed0f53ff285fd2241150e5aad414c20f
|
[
"MIT"
] | null | null | null |
gallery/urls.py
|
nadineuwineza/my_gallery
|
fa51ffb2ed0f53ff285fd2241150e5aad414c20f
|
[
"MIT"
] | null | null | null |
"""gallery URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('photos.urls'))
]
| 33.083333
| 77
| 0.70403
|
db97aedbcec5ac0129e464eee910562e2732791f
| 1,808
|
py
|
Python
|
utilities/composite_data_npy_to_csv.py
|
JoeyTeng/topology-and-meta-learning
|
61fe5a231a0062d9939d1ccdfc0babcbe9562867
|
[
"MIT"
] | 2
|
2017-06-07T15:59:58.000Z
|
2019-05-24T14:00:33.000Z
|
utilities/composite_data_npy_to_csv.py
|
JoeyTeng/Algorithm-Selection-for-Classification-Problems-via-Cluster-based-Meta-features
|
61fe5a231a0062d9939d1ccdfc0babcbe9562867
|
[
"MIT"
] | null | null | null |
utilities/composite_data_npy_to_csv.py
|
JoeyTeng/Algorithm-Selection-for-Classification-Problems-via-Cluster-based-Meta-features
|
61fe5a231a0062d9939d1ccdfc0babcbe9562867
|
[
"MIT"
] | 2
|
2020-04-09T10:50:50.000Z
|
2021-09-28T00:50:23.000Z
|
# @Author: Joey Teng
# @Email: joey.teng.dev@gmail.com
# @Filename: composite_data_npy_to_csv.py
# @Last modified by: Joey Teng
# @Last modified time: 11-Apr-2018
import csv
import os
import sys
import numpy
CLUSTER_FEATURES = [
"Average Size",
"Standard Deviation of Size",
"Average of Natural Logarithm of Inverse of Density",
"Standard Deviation of Natural Logarithm of Inverse of Density",
"Number of Clusters"]
META_FEATURE_NAMES = ['ClassEnt', 'AttrEnt', 'JointEnt', 'MutInfo',
'EquiAttr', 'NoiseRatio', 'StandardDev', 'Skewness',
'Kurtosis', 'treewidth', 'treeheight', 'NoNode',
'NoLeave', 'maxLevel', 'meanLevel', 'devLevel',
'ShortBranch', 'meanBranch', 'devBranch', 'maxAtt',
'minAtt', 'meanAtt', 'devAtt'] + CLUSTER_FEATURES
def main(path):
# TODO:
print(path, flush=True)
files = ['{0}/{1}'.format(
path.strip(), file[:-len('.cluster.npy')])
for file in os.listdir(path)
if file.find('.cluster.npy') != -1]
files.sort()
table = []
for file in files:
print("Loaded: {}".format(file), flush=True)
row = [file[file.rfind('/') + 1:]] +\
list(numpy.load("{}.npy".format(file))) +\
list(numpy.load("{}.cluster.npy".format(file)))
table.append(row)
print("Writing into csv file...", flush=True)
with open("{}/composited.csv".format(path), 'w', newline='') as csvfile:
fieldnames = ["Dataset"] + META_FEATURE_NAMES
writer = csv.writer(csvfile, dialect='excel')
writer.writerow(fieldnames)
writer.writerows(table)
print("Completed.", flush=True)
if __name__ == '__main__':
for path in sys.argv[1:]:
main(path)
| 31.719298
| 76
| 0.587389
|
ea170acfd02801138d1ccb2d4c03e4fc7df6f44f
| 728
|
py
|
Python
|
src/hpilo_exporter/main.py
|
wardellc/hpilo-exporter
|
20e69b723cb76f3f77b1a49d48c1ef479a2613e6
|
[
"MIT"
] | null | null | null |
src/hpilo_exporter/main.py
|
wardellc/hpilo-exporter
|
20e69b723cb76f3f77b1a49d48c1ef479a2613e6
|
[
"MIT"
] | null | null | null |
src/hpilo_exporter/main.py
|
wardellc/hpilo-exporter
|
20e69b723cb76f3f77b1a49d48c1ef479a2613e6
|
[
"MIT"
] | null | null | null |
"""
Entrypoint for the application
"""
import argparse
from hpilo_exporter.exporter import ILOExporterServer
def main():
parser = argparse.ArgumentParser(description='Exports ilo heath_at_a_glance state to Prometheus')
parser.add_argument('--address', type=str, dest='address', default='0.0.0.0', help='address to serve on')
parser.add_argument('--port', type=int, dest='port', default='9416', help='port to bind')
parser.add_argument('--endpoint', type=str, dest='endpoint', default='/metrics',
help='endpoint where metrics will be published')
args = parser.parse_args()
exporter = ILOExporterServer(**vars(args))
exporter.run()
if __name__ == '__main__':
main()
| 29.12
| 109
| 0.686813
|
30636c16821e201f2d511fc46c9525c0b05d78a7
| 2,348
|
py
|
Python
|
tests/base/test_DataMisfit.py
|
kimjaed/simpeg
|
b8d716f86a4ea07ba3085fabb24c2bc974788040
|
[
"MIT"
] | 1
|
2021-02-13T18:15:12.000Z
|
2021-02-13T18:15:12.000Z
|
tests/base/test_DataMisfit.py
|
kimjaed/simpeg
|
b8d716f86a4ea07ba3085fabb24c2bc974788040
|
[
"MIT"
] | null | null | null |
tests/base/test_DataMisfit.py
|
kimjaed/simpeg
|
b8d716f86a4ea07ba3085fabb24c2bc974788040
|
[
"MIT"
] | 1
|
2020-05-26T17:00:53.000Z
|
2020-05-26T17:00:53.000Z
|
from __future__ import print_function
import unittest
import numpy as np
import scipy.sparse as sp
from SimPEG import Mesh, DataMisfit, Maps, Utils
from SimPEG.EM.Static import DC
np.random.seed(17)
class DataMisfitTest(unittest.TestCase):
def setUp(self):
mesh = Mesh.TensorMesh([30, 30], x0=[-0.5, -1.])
sigma = np.ones(mesh.nC)
model = np.log(sigma)
prob = DC.Problem3D_CC(mesh, rhoMap=Maps.ExpMap(mesh))
rx = DC.Rx.Pole(
Utils.ndgrid([mesh.vectorCCx, np.r_[mesh.vectorCCy.max()]])
)
src = DC.Src.Dipole(
[rx], np.r_[-0.25, mesh.vectorCCy.max()],
np.r_[0.25, mesh.vectorCCy.max()]
)
survey = DC.Survey([src])
prob.pair(survey)
self.std = 0.01
survey.std = self.std
dobs = survey.makeSyntheticData(model)
self.eps = 1e-8 * np.min(np.abs(dobs))
survey.eps = self.eps
dmis = DataMisfit.l2_DataMisfit(survey)
self.model = model
self.mesh = mesh
self.survey = survey
self.prob = prob
self.dobs = dobs
self.dmis = dmis
def test_Wd_depreciation(self):
with self.assertRaises(Exception):
print(self.dmis.Wd)
with self.assertRaises(Exception):
self.dmis.Wd = Utils.Identity()
def test_DataMisfit_nP(self):
self.assertTrue(self.dmis.nP == self.mesh.nC)
def test_setting_W(self):
Worig = self.dmis.W
v = np.random.rand(self.survey.nD)
self.dmis.W = v
self.assertTrue(self.dmis.W.shape == (self.survey.nD, self.survey.nD))
self.assertTrue(np.all(self.dmis.W.diagonal() == v))
with self.assertRaises(Exception):
self.dmis.W = np.random.rand(self.survey.nD + 10)
self.dmis.W = Worig
def test_DataMisfitOrder(self):
self.dmis.test(x=self.model)
def test_std_eps(self):
stdtest = np.all(self.survey.std == self.dmis.std)
epstest = (self.survey.eps == self.dmis.eps)
Wtest = np.allclose(
np.abs(np.dot(self.dmis.W.todense(), self.dobs)),
1./self.std,
atol=self.eps
)
self.assertTrue(stdtest)
self.assertTrue(epstest)
self.assertTrue(Wtest)
if __name__ == '__main__':
unittest.main()
| 26.382022
| 78
| 0.587734
|
e84fef20d3bd47063f542c205af48ac54ae8cbb9
| 625
|
py
|
Python
|
tests/test_all.py
|
odra/pyco
|
2c85fa0d74e7ef891815400c301a2bf55e9638ac
|
[
"MIT"
] | null | null | null |
tests/test_all.py
|
odra/pyco
|
2c85fa0d74e7ef891815400c301a2bf55e9638ac
|
[
"MIT"
] | null | null | null |
tests/test_all.py
|
odra/pyco
|
2c85fa0d74e7ef891815400c301a2bf55e9638ac
|
[
"MIT"
] | null | null | null |
import inspect
import pyco
def test_from_fn(hello_world_fn):
fn = pyco.from_fn(hello_world_fn)
assert fn() == 'hello world'
def test_from_code(hello_world_fn):
fn = pyco.from_code(hello_world_fn.__code__)
assert fn() == 'hello world'
def test_from_fn_defaults(complex_fn):
fn = pyco.from_fn(complex_fn)
assert fn('odra') == 'odra is 32 years old and lives in nowhere'
def test_from_code_defaults(complex_fn):
spec = inspect.getargspec(complex_fn)
fn = pyco.from_code(complex_fn.__code__, defaults=spec.defaults)
assert fn('odra', country='anywhere') == 'odra is 32 years old and lives in anywhere'
| 25
| 87
| 0.7456
|
2e4df589ee38c0092c49e0acc516fabfc4625208
| 6,850
|
py
|
Python
|
python/examples/plasma/sorting/sort_df.py
|
danielcompton/arrow
|
bce0ca40922278644ce9c610fc87c0761a95f2c4
|
[
"Apache-2.0"
] | 3
|
2018-11-19T13:38:21.000Z
|
2019-08-28T14:56:37.000Z
|
python/examples/plasma/sorting/sort_df.py
|
danielcompton/arrow
|
bce0ca40922278644ce9c610fc87c0761a95f2c4
|
[
"Apache-2.0"
] | 1
|
2021-01-21T01:33:30.000Z
|
2021-01-21T01:33:30.000Z
|
python/examples/plasma/sorting/sort_df.py
|
danielcompton/arrow
|
bce0ca40922278644ce9c610fc87c0761a95f2c4
|
[
"Apache-2.0"
] | 1
|
2019-11-17T00:46:32.000Z
|
2019-11-17T00:46:32.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from multiprocessing import Pool
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.plasma as plasma
import subprocess
import time
import multimerge
# To run this example, you will first need to run "python setup.py install" in
# this directory to build the Cython module.
#
# You will only see speedups if you run this code on more data, this is just a
# small example that can run on a laptop.
#
# The values we used to get a speedup (on a m4.10xlarge instance on EC2) were
# object_store_size = 84 * 10 ** 9
# num_cores = 20
# num_rows = 10 ** 9
# num_cols = 1
client = None
object_store_size = 2 * 10 ** 9 # 2 GB
num_cores = 8
num_rows = 200000
num_cols = 2
column_names = [str(i) for i in range(num_cols)]
column_to_sort = column_names[0]
# Connect to clients
def connect():
global client
client = plasma.connect('/tmp/store', '', 0)
np.random.seed(int(time.time() * 10e7) % 10000000)
def put_df(df):
record_batch = pa.RecordBatch.from_pandas(df)
# Get size of record batch and schema
mock_sink = pa.MockOutputStream()
stream_writer = pa.RecordBatchStreamWriter(mock_sink, record_batch.schema)
stream_writer.write_batch(record_batch)
data_size = mock_sink.size()
# Generate an ID and allocate a buffer in the object store for the
# serialized DataFrame
object_id = plasma.ObjectID(np.random.bytes(20))
buf = client.create(object_id, data_size)
# Write the serialized DataFrame to the object store
sink = pa.FixedSizeBufferWriter(buf)
stream_writer = pa.RecordBatchStreamWriter(sink, record_batch.schema)
stream_writer.write_batch(record_batch)
# Seal the object
client.seal(object_id)
return object_id
def get_dfs(object_ids):
"""Retrieve dataframes from the object store given their object IDs."""
buffers = client.get_buffers(object_ids)
return [pa.RecordBatchStreamReader(buf).read_next_batch().to_pandas()
for buf in buffers]
def local_sort(object_id):
"""Sort a partition of a dataframe."""
# Get the dataframe from the object store.
[df] = get_dfs([object_id])
# Sort the dataframe.
sorted_df = df.sort_values(by=column_to_sort)
# Get evenly spaced values from the dataframe.
indices = np.linspace(0, len(df) - 1, num=num_cores, dtype=np.int64)
# Put the sorted dataframe in the object store and return the corresponding
# object ID as well as the sampled values.
return put_df(sorted_df), sorted_df.as_matrix().take(indices)
def local_partitions(object_id_and_pivots):
"""Take a sorted partition of a dataframe and split it into more pieces."""
object_id, pivots = object_id_and_pivots
[df] = get_dfs([object_id])
split_at = df[column_to_sort].searchsorted(pivots)
split_at = [0] + list(split_at) + [len(df)]
# Partition the sorted dataframe and put each partition into the object
# store.
return [put_df(df[i:j]) for i, j in zip(split_at[:-1], split_at[1:])]
def merge(object_ids):
"""Merge a number of sorted dataframes into a single sorted dataframe."""
dfs = get_dfs(object_ids)
# In order to use our multimerge code, we have to convert the arrays from
# the Fortran format to the C format.
arrays = [np.ascontiguousarray(df.as_matrix()) for df in dfs]
for a in arrays:
assert a.dtype == np.float64
assert not np.isfortran(a)
# Filter out empty arrays.
arrays = [a for a in arrays if a.shape[0] > 0]
if len(arrays) == 0:
return None
resulting_array = multimerge.multimerge2d(*arrays)
merged_df2 = pd.DataFrame(resulting_array, columns=column_names)
return put_df(merged_df2)
if __name__ == '__main__':
# Start the plasma store.
p = subprocess.Popen(['plasma_store',
'-s', '/tmp/store',
'-m', str(object_store_size)])
# Connect to the plasma store.
connect()
# Connect the processes in the pool.
pool = Pool(initializer=connect, initargs=(), processes=num_cores)
# Create a DataFrame from a numpy array.
df = pd.DataFrame(np.random.randn(num_rows, num_cols),
columns=column_names)
partition_ids = [put_df(partition) for partition
in np.split(df, num_cores)]
# Begin timing the parallel sort example.
parallel_sort_start = time.time()
# Sort each partition and subsample them. The subsampled values will be
# used to create buckets.
sorted_df_ids, pivot_groups = list(zip(*pool.map(local_sort,
partition_ids)))
# Choose the pivots.
all_pivots = np.concatenate(pivot_groups)
indices = np.linspace(0, len(all_pivots) - 1, num=num_cores,
dtype=np.int64)
pivots = np.take(np.sort(all_pivots), indices)
# Break all of the sorted partitions into even smaller partitions. Group
# the object IDs from each bucket together.
results = list(zip(*pool.map(local_partitions,
zip(sorted_df_ids,
len(sorted_df_ids) * [pivots]))))
# Merge each of the buckets and store the results in the object store.
object_ids = pool.map(merge, results)
resulting_ids = [object_id for object_id in object_ids
if object_id is not None]
# Stop timing the paralle sort example.
parallel_sort_end = time.time()
print('Parallel sort took {} seconds.'
.format(parallel_sort_end - parallel_sort_start))
serial_sort_start = time.time()
original_sorted_df = df.sort_values(by=column_to_sort)
serial_sort_end = time.time()
# Check that we sorted the DataFrame properly.
sorted_dfs = get_dfs(resulting_ids)
sorted_df = pd.concat(sorted_dfs)
print('Serial sort took {} seconds.'
.format(serial_sort_end - serial_sort_start))
assert np.allclose(sorted_df.values, original_sorted_df.values)
# Kill the object store.
p.kill()
| 33.578431
| 79
| 0.685255
|
dc4f00c589df14b2663772070ce1a1c79cf450d6
| 44,628
|
py
|
Python
|
lib-python/3/test/test_warnings/__init__.py
|
yxzoro/pypy
|
6e47b3d3e5513d9639a21554963a6ace172ccfee
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
lib-python/3/test/test_warnings/__init__.py
|
yxzoro/pypy
|
6e47b3d3e5513d9639a21554963a6ace172ccfee
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
lib-python/3/test/test_warnings/__init__.py
|
yxzoro/pypy
|
6e47b3d3e5513d9639a21554963a6ace172ccfee
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
from contextlib import contextmanager
import linecache
import os
from io import StringIO
import sys
import unittest
from test import support
from test.support.script_helper import assert_python_ok, assert_python_failure
from test.test_warnings.data import stacklevel as warning_tests
import warnings as original_warnings
py_warnings = support.import_fresh_module('warnings', blocked=['_warnings'])
c_warnings = support.import_fresh_module('warnings', fresh=['_warnings'])
@contextmanager
def warnings_state(module):
"""Use a specific warnings implementation in warning_tests."""
global __warningregistry__
for to_clear in (sys, warning_tests):
try:
to_clear.__warningregistry__.clear()
except AttributeError:
pass
try:
__warningregistry__.clear()
except NameError:
pass
original_warnings = warning_tests.warnings
original_filters = module.filters
try:
module.filters = original_filters[:]
module.simplefilter("once")
warning_tests.warnings = module
yield
finally:
warning_tests.warnings = original_warnings
module.filters = original_filters
class BaseTest:
"""Basic bookkeeping required for testing."""
def setUp(self):
self.old_unittest_module = unittest.case.warnings
# The __warningregistry__ needs to be in a pristine state for tests
# to work properly.
if '__warningregistry__' in globals():
del globals()['__warningregistry__']
if hasattr(warning_tests, '__warningregistry__'):
del warning_tests.__warningregistry__
if hasattr(sys, '__warningregistry__'):
del sys.__warningregistry__
# The 'warnings' module must be explicitly set so that the proper
# interaction between _warnings and 'warnings' can be controlled.
sys.modules['warnings'] = self.module
# Ensure that unittest.TestCase.assertWarns() uses the same warnings
# module than warnings.catch_warnings(). Otherwise,
# warnings.catch_warnings() will be unable to remove the added filter.
unittest.case.warnings = self.module
super(BaseTest, self).setUp()
def tearDown(self):
sys.modules['warnings'] = original_warnings
unittest.case.warnings = self.old_unittest_module
super(BaseTest, self).tearDown()
class PublicAPITests(BaseTest):
"""Ensures that the correct values are exposed in the
public API.
"""
def test_module_all_attribute(self):
self.assertTrue(hasattr(self.module, '__all__'))
target_api = ["warn", "warn_explicit", "showwarning",
"formatwarning", "filterwarnings", "simplefilter",
"resetwarnings", "catch_warnings"]
self.assertSetEqual(set(self.module.__all__),
set(target_api))
class CPublicAPITests(PublicAPITests, unittest.TestCase):
module = c_warnings
class PyPublicAPITests(PublicAPITests, unittest.TestCase):
module = py_warnings
class FilterTests(BaseTest):
"""Testing the filtering functionality."""
def test_error(self):
with original_warnings.catch_warnings(module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("error", category=UserWarning)
self.assertRaises(UserWarning, self.module.warn,
"FilterTests.test_error")
def test_error_after_default(self):
with original_warnings.catch_warnings(module=self.module) as w:
self.module.resetwarnings()
message = "FilterTests.test_ignore_after_default"
def f():
self.module.warn(message, UserWarning)
with support.captured_stderr() as stderr:
f()
stderr = stderr.getvalue()
self.assertIn("UserWarning: FilterTests.test_ignore_after_default",
stderr)
self.assertIn("self.module.warn(message, UserWarning)",
stderr)
self.module.filterwarnings("error", category=UserWarning)
self.assertRaises(UserWarning, f)
def test_ignore(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("ignore", category=UserWarning)
self.module.warn("FilterTests.test_ignore", UserWarning)
self.assertEqual(len(w), 0)
def test_ignore_after_default(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
message = "FilterTests.test_ignore_after_default"
def f():
self.module.warn(message, UserWarning)
f()
self.module.filterwarnings("ignore", category=UserWarning)
f()
f()
self.assertEqual(len(w), 1)
def test_always(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("always", category=UserWarning)
message = "FilterTests.test_always"
self.module.warn(message, UserWarning)
self.assertTrue(message, w[-1].message)
self.module.warn(message, UserWarning)
self.assertTrue(w[-1].message, message)
def test_always_after_default(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
message = "FilterTests.test_always_after_ignore"
def f():
self.module.warn(message, UserWarning)
f()
self.assertEqual(len(w), 1)
self.assertEqual(w[-1].message.args[0], message)
f()
self.assertEqual(len(w), 1)
self.module.filterwarnings("always", category=UserWarning)
f()
self.assertEqual(len(w), 2)
self.assertEqual(w[-1].message.args[0], message)
f()
self.assertEqual(len(w), 3)
self.assertEqual(w[-1].message.args[0], message)
def test_default(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("default", category=UserWarning)
message = UserWarning("FilterTests.test_default")
for x in range(2):
self.module.warn(message, UserWarning)
if x == 0:
self.assertEqual(w[-1].message, message)
del w[:]
elif x == 1:
self.assertEqual(len(w), 0)
else:
raise ValueError("loop variant unhandled")
def test_module(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("module", category=UserWarning)
message = UserWarning("FilterTests.test_module")
self.module.warn(message, UserWarning)
self.assertEqual(w[-1].message, message)
del w[:]
self.module.warn(message, UserWarning)
self.assertEqual(len(w), 0)
def test_once(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("once", category=UserWarning)
message = UserWarning("FilterTests.test_once")
self.module.warn_explicit(message, UserWarning, "__init__.py",
42)
self.assertEqual(w[-1].message, message)
del w[:]
self.module.warn_explicit(message, UserWarning, "__init__.py",
13)
self.assertEqual(len(w), 0)
self.module.warn_explicit(message, UserWarning, "test_warnings2.py",
42)
self.assertEqual(len(w), 0)
def test_inheritance(self):
with original_warnings.catch_warnings(module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("error", category=Warning)
self.assertRaises(UserWarning, self.module.warn,
"FilterTests.test_inheritance", UserWarning)
def test_ordering(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("ignore", category=UserWarning)
self.module.filterwarnings("error", category=UserWarning,
append=True)
del w[:]
try:
self.module.warn("FilterTests.test_ordering", UserWarning)
except UserWarning:
self.fail("order handling for actions failed")
self.assertEqual(len(w), 0)
def test_filterwarnings(self):
# Test filterwarnings().
# Implicitly also tests resetwarnings().
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.filterwarnings("error", "", Warning, "", 0)
self.assertRaises(UserWarning, self.module.warn, 'convert to error')
self.module.resetwarnings()
text = 'handle normally'
self.module.warn(text)
self.assertEqual(str(w[-1].message), text)
self.assertTrue(w[-1].category is UserWarning)
self.module.filterwarnings("ignore", "", Warning, "", 0)
text = 'filtered out'
self.module.warn(text)
self.assertNotEqual(str(w[-1].message), text)
self.module.resetwarnings()
self.module.filterwarnings("error", "hex*", Warning, "", 0)
self.assertRaises(UserWarning, self.module.warn, 'hex/oct')
text = 'nonmatching text'
self.module.warn(text)
self.assertEqual(str(w[-1].message), text)
self.assertTrue(w[-1].category is UserWarning)
def test_message_matching(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.simplefilter("ignore", UserWarning)
self.module.filterwarnings("error", "match", UserWarning)
self.assertRaises(UserWarning, self.module.warn, "match")
self.assertRaises(UserWarning, self.module.warn, "match prefix")
self.module.warn("suffix match")
self.assertEqual(w, [])
self.module.warn("something completely different")
self.assertEqual(w, [])
def test_mutate_filter_list(self):
class X:
def match(self, a):
L[:] = []
L = [("default",X(),UserWarning,X(),0) for i in range(2)]
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.filters = L
self.module.warn_explicit(UserWarning("b"), None, "f.py", 42)
self.assertEqual(str(w[-1].message), "b")
def test_filterwarnings_duplicate_filters(self):
with original_warnings.catch_warnings(module=self.module):
self.module.resetwarnings()
self.module.filterwarnings("error", category=UserWarning)
self.assertEqual(len(self.module.filters), 1)
self.module.filterwarnings("ignore", category=UserWarning)
self.module.filterwarnings("error", category=UserWarning)
self.assertEqual(
len(self.module.filters), 2,
"filterwarnings inserted duplicate filter"
)
self.assertEqual(
self.module.filters[0][0], "error",
"filterwarnings did not promote filter to "
"the beginning of list"
)
def test_simplefilter_duplicate_filters(self):
with original_warnings.catch_warnings(module=self.module):
self.module.resetwarnings()
self.module.simplefilter("error", category=UserWarning)
self.assertEqual(len(self.module.filters), 1)
self.module.simplefilter("ignore", category=UserWarning)
self.module.simplefilter("error", category=UserWarning)
self.assertEqual(
len(self.module.filters), 2,
"simplefilter inserted duplicate filter"
)
self.assertEqual(
self.module.filters[0][0], "error",
"simplefilter did not promote filter to the beginning of list"
)
def test_append_duplicate(self):
with original_warnings.catch_warnings(module=self.module,
record=True) as w:
self.module.resetwarnings()
self.module.simplefilter("ignore")
self.module.simplefilter("error", append=True)
self.module.simplefilter("ignore", append=True)
self.module.warn("test_append_duplicate", category=UserWarning)
self.assertEqual(len(self.module.filters), 2,
"simplefilter inserted duplicate filter"
)
self.assertEqual(len(w), 0,
"appended duplicate changed order of filters"
)
class CFilterTests(FilterTests, unittest.TestCase):
module = c_warnings
class PyFilterTests(FilterTests, unittest.TestCase):
module = py_warnings
class WarnTests(BaseTest):
"""Test warnings.warn() and warnings.warn_explicit()."""
def test_message(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.simplefilter("once")
for i in range(4):
text = 'multi %d' %i # Different text on each call.
self.module.warn(text)
self.assertEqual(str(w[-1].message), text)
self.assertTrue(w[-1].category is UserWarning)
# Issue 3639
def test_warn_nonstandard_types(self):
# warn() should handle non-standard types without issue.
for ob in (Warning, None, 42):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.simplefilter("once")
self.module.warn(ob)
# Don't directly compare objects since
# ``Warning() != Warning()``.
self.assertEqual(str(w[-1].message), str(UserWarning(ob)))
def test_filename(self):
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner("spam1")
self.assertEqual(os.path.basename(w[-1].filename),
"stacklevel.py")
warning_tests.outer("spam2")
self.assertEqual(os.path.basename(w[-1].filename),
"stacklevel.py")
def test_stacklevel(self):
# Test stacklevel argument
# make sure all messages are different, so the warning won't be skipped
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner("spam3", stacklevel=1)
self.assertEqual(os.path.basename(w[-1].filename),
"stacklevel.py")
warning_tests.outer("spam4", stacklevel=1)
self.assertEqual(os.path.basename(w[-1].filename),
"stacklevel.py")
warning_tests.inner("spam5", stacklevel=2)
self.assertEqual(os.path.basename(w[-1].filename),
"__init__.py")
warning_tests.outer("spam6", stacklevel=2)
self.assertEqual(os.path.basename(w[-1].filename),
"stacklevel.py")
warning_tests.outer("spam6.5", stacklevel=3)
self.assertEqual(os.path.basename(w[-1].filename),
"__init__.py")
warning_tests.inner("spam7", stacklevel=9999)
self.assertEqual(os.path.basename(w[-1].filename),
"sys")
def test_stacklevel_import(self):
# Issue #24305: With stacklevel=2, module-level warnings should work.
support.unload('test.test_warnings.data.import_warning')
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.simplefilter('always')
import test.test_warnings.data.import_warning
self.assertEqual(len(w), 1)
self.assertEqual(w[0].filename, __file__)
def test_missing_filename_not_main(self):
# If __file__ is not specified and __main__ is not the module name,
# then __file__ should be set to the module name.
filename = warning_tests.__file__
try:
del warning_tests.__file__
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner("spam8", stacklevel=1)
self.assertEqual(w[-1].filename, warning_tests.__name__)
finally:
warning_tests.__file__ = filename
@unittest.skipUnless(hasattr(sys, 'argv'), 'test needs sys.argv')
def test_missing_filename_main_with_argv(self):
# If __file__ is not specified and the caller is __main__ and sys.argv
# exists, then use sys.argv[0] as the file.
filename = warning_tests.__file__
module_name = warning_tests.__name__
try:
del warning_tests.__file__
warning_tests.__name__ = '__main__'
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner('spam9', stacklevel=1)
self.assertEqual(w[-1].filename, sys.argv[0])
finally:
warning_tests.__file__ = filename
warning_tests.__name__ = module_name
def test_missing_filename_main_without_argv(self):
# If __file__ is not specified, the caller is __main__, and sys.argv
# is not set, then '__main__' is the file name.
filename = warning_tests.__file__
module_name = warning_tests.__name__
argv = sys.argv
try:
del warning_tests.__file__
warning_tests.__name__ = '__main__'
del sys.argv
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner('spam10', stacklevel=1)
self.assertEqual(w[-1].filename, '__main__')
finally:
warning_tests.__file__ = filename
warning_tests.__name__ = module_name
sys.argv = argv
def test_missing_filename_main_with_argv_empty_string(self):
# If __file__ is not specified, the caller is __main__, and sys.argv[0]
# is the empty string, then '__main__ is the file name.
# Tests issue 2743.
file_name = warning_tests.__file__
module_name = warning_tests.__name__
argv = sys.argv
try:
del warning_tests.__file__
warning_tests.__name__ = '__main__'
sys.argv = ['']
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner('spam11', stacklevel=1)
self.assertEqual(w[-1].filename, '__main__')
finally:
warning_tests.__file__ = file_name
warning_tests.__name__ = module_name
sys.argv = argv
def test_warn_explicit_non_ascii_filename(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("always", category=UserWarning)
for filename in ("nonascii\xe9\u20ac", "surrogate\udc80"):
try:
os.fsencode(filename)
except UnicodeEncodeError:
continue
self.module.warn_explicit("text", UserWarning, filename, 1)
self.assertEqual(w[-1].filename, filename)
def test_warn_explicit_type_errors(self):
# warn_explicit() should error out gracefully if it is given objects
# of the wrong types.
# lineno is expected to be an integer.
self.assertRaises(TypeError, self.module.warn_explicit,
None, UserWarning, None, None)
# Either 'message' needs to be an instance of Warning or 'category'
# needs to be a subclass.
self.assertRaises(TypeError, self.module.warn_explicit,
None, None, None, 1)
# 'registry' must be a dict or None.
self.assertRaises((TypeError, AttributeError),
self.module.warn_explicit,
None, Warning, None, 1, registry=42)
def test_bad_str(self):
# issue 6415
# Warnings instance with a bad format string for __str__ should not
# trigger a bus error.
class BadStrWarning(Warning):
"""Warning with a bad format string for __str__."""
def __str__(self):
return ("A bad formatted string %(err)" %
{"err" : "there is no %(err)s"})
with self.assertRaises(ValueError):
self.module.warn(BadStrWarning())
def test_warning_classes(self):
class MyWarningClass(Warning):
pass
class NonWarningSubclass:
pass
# passing a non-subclass of Warning should raise a TypeError
with self.assertRaises(TypeError) as cm:
self.module.warn('bad warning category', '')
self.assertIn('category must be a Warning subclass, not ',
str(cm.exception))
with self.assertRaises(TypeError) as cm:
self.module.warn('bad warning category', NonWarningSubclass)
self.assertIn('category must be a Warning subclass, not ',
str(cm.exception))
# check that warning instances also raise a TypeError
with self.assertRaises(TypeError) as cm:
self.module.warn('bad warning category', MyWarningClass())
self.assertIn('category must be a Warning subclass, not ',
str(cm.exception))
with original_warnings.catch_warnings(module=self.module):
self.module.resetwarnings()
self.module.filterwarnings('default')
with self.assertWarns(MyWarningClass) as cm:
self.module.warn('good warning category', MyWarningClass)
self.assertEqual('good warning category', str(cm.warning))
with self.assertWarns(UserWarning) as cm:
self.module.warn('good warning category', None)
self.assertEqual('good warning category', str(cm.warning))
with self.assertWarns(MyWarningClass) as cm:
self.module.warn('good warning category', MyWarningClass)
self.assertIsInstance(cm.warning, Warning)
class CWarnTests(WarnTests, unittest.TestCase):
module = c_warnings
# As an early adopter, we sanity check the
# test.support.import_fresh_module utility function
def test_accelerated(self):
import types
self.assertFalse(original_warnings is self.module)
self.assertIs(type(self.module.warn), types.BuiltinFunctionType)
class PyWarnTests(WarnTests, unittest.TestCase):
module = py_warnings
# As an early adopter, we sanity check the
# test.support.import_fresh_module utility function
def test_pure_python(self):
import types
self.assertFalse(original_warnings is self.module)
self.assertIs(type(self.module.warn), types.FunctionType)
class WCmdLineTests(BaseTest):
def test_improper_input(self):
# Uses the private _setoption() function to test the parsing
# of command-line warning arguments
with original_warnings.catch_warnings(module=self.module):
self.assertRaises(self.module._OptionError,
self.module._setoption, '1:2:3:4:5:6')
self.assertRaises(self.module._OptionError,
self.module._setoption, 'bogus::Warning')
self.assertRaises(self.module._OptionError,
self.module._setoption, 'ignore:2::4:-5')
self.module._setoption('error::Warning::0')
self.assertRaises(UserWarning, self.module.warn, 'convert to error')
class CWCmdLineTests(WCmdLineTests, unittest.TestCase):
module = c_warnings
class PyWCmdLineTests(WCmdLineTests, unittest.TestCase):
module = py_warnings
def test_improper_option(self):
# Same as above, but check that the message is printed out when
# the interpreter is executed. This also checks that options are
# actually parsed at all.
rc, out, err = assert_python_ok("-Wxxx", "-c", "pass")
self.assertIn(b"Invalid -W option ignored: invalid action: 'xxx'", err)
def test_warnings_bootstrap(self):
# Check that the warnings module does get loaded when -W<some option>
# is used (see issue #10372 for an example of silent bootstrap failure).
rc, out, err = assert_python_ok("-Wi", "-c",
"import sys; sys.modules['warnings'].warn('foo', RuntimeWarning)")
# '-Wi' was observed
self.assertFalse(out.strip())
self.assertNotIn(b'RuntimeWarning', err)
class _WarningsTests(BaseTest, unittest.TestCase):
"""Tests specific to the _warnings module."""
module = c_warnings
def test_filter(self):
# Everything should function even if 'filters' is not in warnings.
with original_warnings.catch_warnings(module=self.module) as w:
self.module.filterwarnings("error", "", Warning, "", 0)
self.assertRaises(UserWarning, self.module.warn,
'convert to error')
del self.module.filters
self.assertRaises(UserWarning, self.module.warn,
'convert to error')
def test_onceregistry(self):
# Replacing or removing the onceregistry should be okay.
global __warningregistry__
message = UserWarning('onceregistry test')
try:
original_registry = self.module.onceregistry
__warningregistry__ = {}
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("once", category=UserWarning)
self.module.warn_explicit(message, UserWarning, "file", 42)
self.assertEqual(w[-1].message, message)
del w[:]
self.module.warn_explicit(message, UserWarning, "file", 42)
self.assertEqual(len(w), 0)
# Test the resetting of onceregistry.
self.module.onceregistry = {}
__warningregistry__ = {}
self.module.warn('onceregistry test')
self.assertEqual(w[-1].message.args, message.args)
# Removal of onceregistry is okay.
del w[:]
del self.module.onceregistry
__warningregistry__ = {}
self.module.warn_explicit(message, UserWarning, "file", 42)
self.assertEqual(len(w), 0)
finally:
self.module.onceregistry = original_registry
def test_default_action(self):
# Replacing or removing defaultaction should be okay.
message = UserWarning("defaultaction test")
original = self.module.defaultaction
try:
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
registry = {}
self.module.warn_explicit(message, UserWarning, "<test>", 42,
registry=registry)
self.assertEqual(w[-1].message, message)
self.assertEqual(len(w), 1)
# One actual registry key plus the "version" key
self.assertEqual(len(registry), 2)
self.assertIn("version", registry)
del w[:]
# Test removal.
del self.module.defaultaction
__warningregistry__ = {}
registry = {}
self.module.warn_explicit(message, UserWarning, "<test>", 43,
registry=registry)
self.assertEqual(w[-1].message, message)
self.assertEqual(len(w), 1)
self.assertEqual(len(registry), 2)
del w[:]
# Test setting.
self.module.defaultaction = "ignore"
__warningregistry__ = {}
registry = {}
self.module.warn_explicit(message, UserWarning, "<test>", 44,
registry=registry)
self.assertEqual(len(w), 0)
finally:
self.module.defaultaction = original
def test_showwarning_missing(self):
# Test that showwarning() missing is okay.
text = 'del showwarning test'
with original_warnings.catch_warnings(module=self.module):
self.module.filterwarnings("always", category=UserWarning)
del self.module.showwarning
with support.captured_output('stderr') as stream:
self.module.warn(text)
result = stream.getvalue()
self.assertIn(text, result)
def test_showwarning_not_callable(self):
with original_warnings.catch_warnings(module=self.module):
self.module.filterwarnings("always", category=UserWarning)
self.module.showwarning = print
with support.captured_output('stdout'):
self.module.warn('Warning!')
self.module.showwarning = 23
self.assertRaises(TypeError, self.module.warn, "Warning!")
def test_show_warning_output(self):
# With showarning() missing, make sure that output is okay.
text = 'test show_warning'
with original_warnings.catch_warnings(module=self.module):
self.module.filterwarnings("always", category=UserWarning)
del self.module.showwarning
with support.captured_output('stderr') as stream:
warning_tests.inner(text)
result = stream.getvalue()
self.assertEqual(result.count('\n'), 2,
"Too many newlines in %r" % result)
first_line, second_line = result.split('\n', 1)
expected_file = os.path.splitext(warning_tests.__file__)[0] + '.py'
first_line_parts = first_line.rsplit(':', 3)
path, line, warning_class, message = first_line_parts
line = int(line)
self.assertEqual(expected_file, path)
self.assertEqual(warning_class, ' ' + UserWarning.__name__)
self.assertEqual(message, ' ' + text)
expected_line = ' ' + linecache.getline(path, line).strip() + '\n'
assert expected_line
self.assertEqual(second_line, expected_line)
def test_filename_none(self):
# issue #12467: race condition if a warning is emitted at shutdown
globals_dict = globals()
oldfile = globals_dict['__file__']
try:
catch = original_warnings.catch_warnings(record=True,
module=self.module)
with catch as w:
self.module.filterwarnings("always", category=UserWarning)
globals_dict['__file__'] = None
original_warnings.warn('test', UserWarning)
self.assertTrue(len(w))
finally:
globals_dict['__file__'] = oldfile
def test_stderr_none(self):
rc, stdout, stderr = assert_python_ok("-c",
"import sys; sys.stderr = None; "
"import warnings; warnings.simplefilter('always'); "
"warnings.warn('Warning!')")
self.assertEqual(stdout, b'')
self.assertNotIn(b'Warning!', stderr)
self.assertNotIn(b'Error', stderr)
class WarningsDisplayTests(BaseTest):
"""Test the displaying of warnings and the ability to overload functions
related to displaying warnings."""
def test_formatwarning(self):
message = "msg"
category = Warning
file_name = os.path.splitext(warning_tests.__file__)[0] + '.py'
line_num = 3
file_line = linecache.getline(file_name, line_num).strip()
format = "%s:%s: %s: %s\n %s\n"
expect = format % (file_name, line_num, category.__name__, message,
file_line)
self.assertEqual(expect, self.module.formatwarning(message,
category, file_name, line_num))
# Test the 'line' argument.
file_line += " for the win!"
expect = format % (file_name, line_num, category.__name__, message,
file_line)
self.assertEqual(expect, self.module.formatwarning(message,
category, file_name, line_num, file_line))
def test_showwarning(self):
file_name = os.path.splitext(warning_tests.__file__)[0] + '.py'
line_num = 3
expected_file_line = linecache.getline(file_name, line_num).strip()
message = 'msg'
category = Warning
file_object = StringIO()
expect = self.module.formatwarning(message, category, file_name,
line_num)
self.module.showwarning(message, category, file_name, line_num,
file_object)
self.assertEqual(file_object.getvalue(), expect)
# Test 'line' argument.
expected_file_line += "for the win!"
expect = self.module.formatwarning(message, category, file_name,
line_num, expected_file_line)
file_object = StringIO()
self.module.showwarning(message, category, file_name, line_num,
file_object, expected_file_line)
self.assertEqual(expect, file_object.getvalue())
class CWarningsDisplayTests(WarningsDisplayTests, unittest.TestCase):
module = c_warnings
class PyWarningsDisplayTests(WarningsDisplayTests, unittest.TestCase):
module = py_warnings
class CatchWarningTests(BaseTest):
"""Test catch_warnings()."""
def test_catch_warnings_restore(self):
wmod = self.module
orig_filters = wmod.filters
orig_showwarning = wmod.showwarning
# Ensure both showwarning and filters are restored when recording
with wmod.catch_warnings(module=wmod, record=True):
wmod.filters = wmod.showwarning = object()
self.assertTrue(wmod.filters is orig_filters)
self.assertTrue(wmod.showwarning is orig_showwarning)
# Same test, but with recording disabled
with wmod.catch_warnings(module=wmod, record=False):
wmod.filters = wmod.showwarning = object()
self.assertTrue(wmod.filters is orig_filters)
self.assertTrue(wmod.showwarning is orig_showwarning)
def test_catch_warnings_recording(self):
wmod = self.module
# Ensure warnings are recorded when requested
with wmod.catch_warnings(module=wmod, record=True) as w:
self.assertEqual(w, [])
self.assertTrue(type(w) is list)
wmod.simplefilter("always")
wmod.warn("foo")
self.assertEqual(str(w[-1].message), "foo")
wmod.warn("bar")
self.assertEqual(str(w[-1].message), "bar")
self.assertEqual(str(w[0].message), "foo")
self.assertEqual(str(w[1].message), "bar")
del w[:]
self.assertEqual(w, [])
# Ensure warnings are not recorded when not requested
orig_showwarning = wmod.showwarning
with wmod.catch_warnings(module=wmod, record=False) as w:
self.assertTrue(w is None)
self.assertTrue(wmod.showwarning is orig_showwarning)
def test_catch_warnings_reentry_guard(self):
wmod = self.module
# Ensure catch_warnings is protected against incorrect usage
x = wmod.catch_warnings(module=wmod, record=True)
self.assertRaises(RuntimeError, x.__exit__)
with x:
self.assertRaises(RuntimeError, x.__enter__)
# Same test, but with recording disabled
x = wmod.catch_warnings(module=wmod, record=False)
self.assertRaises(RuntimeError, x.__exit__)
with x:
self.assertRaises(RuntimeError, x.__enter__)
def test_catch_warnings_defaults(self):
wmod = self.module
orig_filters = wmod.filters
orig_showwarning = wmod.showwarning
# Ensure default behaviour is not to record warnings
with wmod.catch_warnings(module=wmod) as w:
self.assertTrue(w is None)
self.assertTrue(wmod.showwarning is orig_showwarning)
self.assertTrue(wmod.filters is not orig_filters)
self.assertTrue(wmod.filters is orig_filters)
if wmod is sys.modules['warnings']:
# Ensure the default module is this one
with wmod.catch_warnings() as w:
self.assertTrue(w is None)
self.assertTrue(wmod.showwarning is orig_showwarning)
self.assertTrue(wmod.filters is not orig_filters)
self.assertTrue(wmod.filters is orig_filters)
def test_check_warnings(self):
# Explicit tests for the test.support convenience wrapper
wmod = self.module
if wmod is not sys.modules['warnings']:
self.skipTest('module to test is not loaded warnings module')
with support.check_warnings(quiet=False) as w:
self.assertEqual(w.warnings, [])
wmod.simplefilter("always")
wmod.warn("foo")
self.assertEqual(str(w.message), "foo")
wmod.warn("bar")
self.assertEqual(str(w.message), "bar")
self.assertEqual(str(w.warnings[0].message), "foo")
self.assertEqual(str(w.warnings[1].message), "bar")
w.reset()
self.assertEqual(w.warnings, [])
with support.check_warnings():
# defaults to quiet=True without argument
pass
with support.check_warnings(('foo', UserWarning)):
wmod.warn("foo")
with self.assertRaises(AssertionError):
with support.check_warnings(('', RuntimeWarning)):
# defaults to quiet=False with argument
pass
with self.assertRaises(AssertionError):
with support.check_warnings(('foo', RuntimeWarning)):
wmod.warn("foo")
class CCatchWarningTests(CatchWarningTests, unittest.TestCase):
module = c_warnings
class PyCatchWarningTests(CatchWarningTests, unittest.TestCase):
module = py_warnings
class EnvironmentVariableTests(BaseTest):
def test_single_warning(self):
rc, stdout, stderr = assert_python_ok("-c",
"import sys; sys.stdout.write(str(sys.warnoptions))",
PYTHONWARNINGS="ignore::DeprecationWarning")
self.assertEqual(stdout, b"['ignore::DeprecationWarning']")
def test_comma_separated_warnings(self):
rc, stdout, stderr = assert_python_ok("-c",
"import sys; sys.stdout.write(str(sys.warnoptions))",
PYTHONWARNINGS="ignore::DeprecationWarning,ignore::UnicodeWarning")
self.assertEqual(stdout,
b"['ignore::DeprecationWarning', 'ignore::UnicodeWarning']")
def test_envvar_and_command_line(self):
rc, stdout, stderr = assert_python_ok("-Wignore::UnicodeWarning", "-c",
"import sys; sys.stdout.write(str(sys.warnoptions))",
PYTHONWARNINGS="ignore::DeprecationWarning")
self.assertEqual(stdout,
b"['ignore::DeprecationWarning', 'ignore::UnicodeWarning']")
def test_conflicting_envvar_and_command_line(self):
rc, stdout, stderr = assert_python_failure("-Werror::DeprecationWarning", "-c",
"import sys, warnings; sys.stdout.write(str(sys.warnoptions)); "
"warnings.warn('Message', DeprecationWarning)",
PYTHONWARNINGS="default::DeprecationWarning")
self.assertEqual(stdout,
b"['default::DeprecationWarning', 'error::DeprecationWarning']")
self.assertEqual(stderr.splitlines(),
[b"Traceback (most recent call last):",
b" File \"<string>\", line 1, in <module>",
b"DeprecationWarning: Message"])
@unittest.skipUnless(sys.getfilesystemencoding() != 'ascii',
'requires non-ascii filesystemencoding')
def test_nonascii(self):
rc, stdout, stderr = assert_python_ok("-c",
"import sys; sys.stdout.write(str(sys.warnoptions))",
PYTHONIOENCODING="utf-8",
PYTHONWARNINGS="ignore:DeprecaciónWarning")
self.assertEqual(stdout,
"['ignore:DeprecaciónWarning']".encode('utf-8'))
class CEnvironmentVariableTests(EnvironmentVariableTests, unittest.TestCase):
module = c_warnings
class PyEnvironmentVariableTests(EnvironmentVariableTests, unittest.TestCase):
module = py_warnings
class BootstrapTest(unittest.TestCase):
def test_issue_8766(self):
# "import encodings" emits a warning whereas the warnings is not loaded
# or not completely loaded (warnings imports indirectly encodings by
# importing linecache) yet
with support.temp_cwd() as cwd, support.temp_cwd('encodings'):
# encodings loaded by initfsencoding()
assert_python_ok('-c', 'pass', PYTHONPATH=cwd)
# Use -W to load warnings module at startup
assert_python_ok('-c', 'pass', '-W', 'always', PYTHONPATH=cwd)
class FinalizationTest(unittest.TestCase):
@support.requires_type_collecting
def test_finalization(self):
# Issue #19421: warnings.warn() should not crash
# during Python finalization
code = """
import warnings
warn = warnings.warn
class A:
def __del__(self):
warn("test")
A()
import gc; gc.collect()
"""
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(err, b'-c:7: UserWarning: test')
@support.cpython_only
def test_late_resource_warning(self):
# Issue #21925: Emitting a ResourceWarning late during the Python
# shutdown must be logged.
expected = b"sys:1: ResourceWarning: unclosed file "
# don't import the warnings module
# (_warnings will try to import it)
code = "f = open(%a)" % __file__
rc, out, err = assert_python_ok("-Wd", "-c", code)
self.assertTrue(err.startswith(expected), ascii(err))
# import the warnings module
code = "import warnings; f = open(%a)" % __file__
rc, out, err = assert_python_ok("-Wd", "-c", code)
self.assertTrue(err.startswith(expected), ascii(err))
def setUpModule():
py_warnings.onceregistry.clear()
c_warnings.onceregistry.clear()
tearDownModule = setUpModule
if __name__ == "__main__":
unittest.main()
| 42.422053
| 87
| 0.607063
|
6a735bcb522a8084052248e0855ded6de1600af9
| 1,830
|
py
|
Python
|
plotlog/datacut.py
|
s-naoya/plotlog
|
278c7e1d6f2af90a55bb9fa121051e00e976c1c0
|
[
"MIT"
] | null | null | null |
plotlog/datacut.py
|
s-naoya/plotlog
|
278c7e1d6f2af90a55bb9fa121051e00e976c1c0
|
[
"MIT"
] | null | null | null |
plotlog/datacut.py
|
s-naoya/plotlog
|
278c7e1d6f2af90a55bb9fa121051e00e976c1c0
|
[
"MIT"
] | null | null | null |
import sys
import pandas as pd
class DataCut:
df = None
x_axis = None
__log_file_path = list()
__x_col = None
def __init__(self):
pass
def dispose(self):
self.__log_file_path = None
self.df = None
self.x_axis = None
def import_file(self, path, header=0, sep=","):
try:
self.df = pd.read_csv(path, header=header, sep=sep)
except:
print(path, "is cannot imported:", sys.exc_info()[0])
return False
if len(self.df.index) == 0:
print(path, "is empty.")
return False
return True
def set_x_axis(self, x_col):
self.x_axis = self.df.loc[:, x_col] if x_col in self.df.columns else self.df.iloc[:, x_col]
self.__x_col = x_col
def shift(self, trig_col, trig_val):
trig_df = self.df.loc[:,
trig_col] if trig_col in self.df.columns else self.df.iloc[:, trig_col]
idx = [0, None]
while trig_val[0] < float(trig_df[idx[0]+1]) < trig_val[1]:
idx[0] += 1
shift_start_time = self.x_axis[idx[0]]
shift_df = self.df.iloc[idx[0]:idx[1], :].reset_index(drop=True)
if self.__x_col in shift_df:
shift_df.loc[:, self.__x_col] -= shift_start_time
else:
shift_df.iloc[:, self.__x_col] -= shift_start_time
self.df = shift_df
self.set_x_axis(self.__x_col)
def slice(self, time):
idx = [0, None]
while float(self.x_axis[idx[0]]) < float(time[0]):
idx[0] += 1
idx[1] = idx[0]
while float(self.x_axis[idx[1]]) <= float(time[1]):
idx[1] += 1
slice_df = self.df.iloc[idx[0]:idx[1], :]
self.df = slice_df.reset_index(drop=True)
self.set_x_axis(self.__x_col)
| 30.5
| 101
| 0.552459
|
4d79fa5c11f358e2b8be2dcc9cb595f1e284c9aa
| 1,556
|
py
|
Python
|
tests/sar_common.py
|
danielsc/Recommenders
|
e81620d5415c38ba146370c79121899eb1947e75
|
[
"MIT"
] | 1
|
2019-01-20T11:55:23.000Z
|
2019-01-20T11:55:23.000Z
|
tests/sar_common.py
|
eisber/Recommenders
|
8389b63e412520af0cea8e1cefbdf7b6cce727b3
|
[
"MIT"
] | null | null | null |
tests/sar_common.py
|
eisber/Recommenders
|
8389b63e412520af0cea8e1cefbdf7b6cce727b3
|
[
"MIT"
] | 1
|
2019-01-22T05:54:30.000Z
|
2019-01-22T05:54:30.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
import urllib.request
import csv
import codecs
import logging
log = logging.getLogger(__name__)
def _csv_reader_url(url, delimiter=",", encoding="utf-8"):
ftpstream = urllib.request.urlopen(url)
csvfile = csv.reader(codecs.iterdecode(ftpstream, encoding), delimiter=delimiter)
return csvfile
def load_affinity(file):
"""Loads user affinities from test dataset"""
reader = _csv_reader_url(file)
items = next(reader)[1:]
affinities = np.array(next(reader)[1:])
return affinities, items
def load_userpred(file, k=10):
"""Loads test predicted items and their SAR scores"""
reader = _csv_reader_url(file)
next(reader)
values = next(reader)
items = values[1 : (k + 1)]
scores = np.array([float(x) for x in values[(k + 1) :]])
return items, scores
def read_matrix(file, row_map=None, col_map=None):
"""read in test matrix and hash it"""
reader = _csv_reader_url(file)
# skip the header
col_ids = next(reader)[1:]
row_ids = []
rows = []
for row in reader:
rows += [row[1:]]
row_ids += [row[0]]
array = np.array(rows)
# now map the rows and columns to the right values
if row_map is not None and col_map is not None:
row_index = [row_map[x] for x in row_ids]
col_index = [col_map[x] for x in col_ids]
array = array[row_index, :]
array = array[:, col_index]
return array, row_ids, col_ids
| 27.298246
| 85
| 0.658098
|
b36d1483c1b224cc84286e06bf506164479046dc
| 828
|
py
|
Python
|
domaintools/komand_domaintools/actions/reverse_name_server/action.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | 1
|
2020-03-18T09:14:55.000Z
|
2020-03-18T09:14:55.000Z
|
domaintools/komand_domaintools/actions/reverse_name_server/action.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | 1
|
2021-02-23T23:57:37.000Z
|
2021-02-23T23:57:37.000Z
|
domaintools/komand_domaintools/actions/reverse_name_server/action.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | null | null | null |
import komand
from .schema import ReverseNameServerInput, ReverseNameServerOutput
# Custom imports below
from komand_domaintools.util import util
class ReverseNameServer(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='reverse_name_server',
description='Provides a list of domain names that share the same primary or secondary name server',
input=ReverseNameServerInput(),
output=ReverseNameServerOutput())
def run(self, params={}):
params = komand.helper.clean_dict(params)
params['query'] = params.pop('domain')
response = utils.make_request(self.connection.api.reverse_name_server, **params)
return response
def test(self):
"""TODO: Test action"""
return {}
| 34.5
| 115
| 0.669082
|
7441d9ec0f320f3855765f5ce5b7b51f9f0fa211
| 7,101
|
py
|
Python
|
.dev_scripts/benchmark_filter.py
|
skymanaditya1/mmdetection_modifications
|
0030b884b691ff6d6429a3024f4e080063b7f1df
|
[
"Apache-2.0"
] | 20,190
|
2018-09-10T01:11:53.000Z
|
2022-03-31T22:31:33.000Z
|
.dev_scripts/benchmark_filter.py
|
skymanaditya1/mmdetection_modifications
|
0030b884b691ff6d6429a3024f4e080063b7f1df
|
[
"Apache-2.0"
] | 6,736
|
2018-09-17T09:45:51.000Z
|
2022-03-31T22:54:10.000Z
|
.dev_scripts/benchmark_filter.py
|
skymanaditya1/mmdetection_modifications
|
0030b884b691ff6d6429a3024f4e080063b7f1df
|
[
"Apache-2.0"
] | 7,837
|
2018-09-11T02:58:23.000Z
|
2022-03-31T22:31:38.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
def parse_args():
parser = argparse.ArgumentParser(description='Filter configs to train')
parser.add_argument(
'--basic-arch',
action='store_true',
help='to train models in basic arch')
parser.add_argument(
'--datasets', action='store_true', help='to train models in dataset')
parser.add_argument(
'--data-pipeline',
action='store_true',
help='to train models related to data pipeline, e.g. augmentations')
parser.add_argument(
'--nn-module',
action='store_true',
help='to train models related to neural network modules')
parser.add_argument(
'--model-options',
nargs='+',
help='custom options to special model benchmark')
parser.add_argument(
'--out',
type=str,
default='batch_train_list.txt',
help='output path of gathered metrics to be stored')
args = parser.parse_args()
return args
basic_arch_root = [
'atss', 'autoassign', 'cascade_rcnn', 'cascade_rpn', 'centripetalnet',
'cornernet', 'detectors', 'deformable_detr', 'detr', 'double_heads',
'dynamic_rcnn', 'faster_rcnn', 'fcos', 'foveabox', 'fp16', 'free_anchor',
'fsaf', 'gfl', 'ghm', 'grid_rcnn', 'guided_anchoring', 'htc', 'ld',
'libra_rcnn', 'mask_rcnn', 'ms_rcnn', 'nas_fcos', 'paa', 'pisa',
'point_rend', 'reppoints', 'retinanet', 'rpn', 'sabl', 'ssd', 'tridentnet',
'vfnet', 'yolact', 'yolo', 'sparse_rcnn', 'scnet', 'yolof', 'centernet'
]
datasets_root = [
'wider_face', 'pascal_voc', 'cityscapes', 'lvis', 'deepfashion'
]
data_pipeline_root = ['albu_example', 'instaboost']
nn_module_root = [
'carafe', 'dcn', 'empirical_attention', 'gcnet', 'gn', 'gn+ws', 'hrnet',
'pafpn', 'nas_fpn', 'regnet', 'resnest', 'res2net', 'groie'
]
benchmark_pool = [
'configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py',
'configs/atss/atss_r50_fpn_1x_coco.py',
'configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py',
'configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py',
'configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
'configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/centernet/centernet_resnet18_dcnv2_140e_coco.py',
'configs/centripetalnet/'
'centripetalnet_hourglass104_mstest_16x6_210e_coco.py',
'configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py',
'configs/cornernet/'
'cornernet_hourglass104_mstest_8x6_210e_coco.py',
'configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py',
'configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py',
'configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py',
'configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py',
'configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py',
'configs/detectors/detectors_htc_r50_1x_coco.py',
'configs/detr/detr_r50_8x2_150e_coco.py',
'configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py',
'configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py',
'configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py', # noqa
'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py',
'configs/fcos/fcos_center_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
'configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',
'configs/fp16/retinanet_r50_fpn_fp16_1x_coco.py',
'configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py',
'configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
'configs/fsaf/fsaf_r50_fpn_1x_coco.py',
'configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py',
'configs/gfl/gfl_r50_fpn_1x_coco.py',
'configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py',
'configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py',
'configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py',
'configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
'configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py',
'configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py',
'configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py',
'configs/htc/htc_r50_fpn_1x_coco.py',
'configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py',
'configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py',
'configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py',
'configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py',
'configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py',
'configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
'configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py',
'configs/paa/paa_r50_fpn_1x_coco.py',
'configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py',
'configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py',
'configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py',
'configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py',
'configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py',
'configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py',
'configs/resnest/'
'mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py',
'configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py',
'configs/rpn/rpn_r50_fpn_1x_coco.py',
'configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py',
'configs/ssd/ssd300_coco.py',
'configs/tridentnet/tridentnet_r50_caffe_1x_coco.py',
'configs/vfnet/vfnet_r50_fpn_1x_coco.py',
'configs/yolact/yolact_r50_1x8_coco.py',
'configs/yolo/yolov3_d53_320_273e_coco.py',
'configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py',
'configs/scnet/scnet_r50_fpn_1x_coco.py',
'configs/yolof/yolof_r50_c5_8x8_1x_coco.py',
]
def main():
args = parse_args()
benchmark_type = []
if args.basic_arch:
benchmark_type += basic_arch_root
if args.datasets:
benchmark_type += datasets_root
if args.data_pipeline:
benchmark_type += data_pipeline_root
if args.nn_module:
benchmark_type += nn_module_root
special_model = args.model_options
if special_model is not None:
benchmark_type += special_model
config_dpath = 'configs/'
benchmark_configs = []
for cfg_root in benchmark_type:
cfg_dir = osp.join(config_dpath, cfg_root)
configs = os.scandir(cfg_dir)
for cfg in configs:
config_path = osp.join(cfg_dir, cfg.name)
if (config_path in benchmark_pool
and config_path not in benchmark_configs):
benchmark_configs.append(config_path)
print(f'Totally found {len(benchmark_configs)} configs to benchmark')
with open(args.out, 'w') as f:
for config in benchmark_configs:
f.write(config + '\n')
if __name__ == '__main__':
main()
| 42.267857
| 92
| 0.732714
|
c61e0c44fa1cb8930611eeb8476139e49c9a096f
| 337
|
py
|
Python
|
python-leetcode/sw_06.py
|
MDGSF/interviews
|
9faa9aacdb0cfbb777d4d3d4d1b14b55ca2c9f76
|
[
"MIT"
] | 12
|
2020-01-16T08:55:27.000Z
|
2021-12-02T14:52:39.000Z
|
python-leetcode/sw_06.py
|
MDGSF/interviews
|
9faa9aacdb0cfbb777d4d3d4d1b14b55ca2c9f76
|
[
"MIT"
] | null | null | null |
python-leetcode/sw_06.py
|
MDGSF/interviews
|
9faa9aacdb0cfbb777d4d3d4d1b14b55ca2c9f76
|
[
"MIT"
] | 1
|
2019-12-11T12:00:38.000Z
|
2019-12-11T12:00:38.000Z
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def reversePrint(self, head: ListNode) -> List[int]:
result, cur = [], head
while cur:
result.append(cur.val)
cur = cur.next
result.reverse()
return result
| 22.466667
| 54
| 0.602374
|
6424fba76be8cc3443bd7237a9028f1dee1b8f56
| 3,934
|
py
|
Python
|
utils/layers.py
|
Fisher33318/yolov3-motebus
|
96bf731cda088a18ebdc2e5a863adfef812e1095
|
[
"MIT"
] | null | null | null |
utils/layers.py
|
Fisher33318/yolov3-motebus
|
96bf731cda088a18ebdc2e5a863adfef812e1095
|
[
"MIT"
] | null | null | null |
utils/layers.py
|
Fisher33318/yolov3-motebus
|
96bf731cda088a18ebdc2e5a863adfef812e1095
|
[
"MIT"
] | 1
|
2021-05-05T07:55:29.000Z
|
2021-05-05T07:55:29.000Z
|
import torch.nn.functional as F
from utils.utils import *
class FeatureConcat(nn.Module):
def __init__(self, layers):
super(FeatureConcat, self).__init__()
self.layers = layers # layer indices
self.multiple = len(layers) > 1 # multiple layers flag
def forward(self, x, outputs):
return torch.cat([outputs[i] for i in self.layers], 1) if self.multiple else outputs[self.layers[0]]
class WeightedFeatureFusion(nn.Module): # weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
def __init__(self, layers, weight=False):
super(WeightedFeatureFusion, self).__init__()
self.layers = layers # layer indices
self.weight = weight # apply weights boolean
self.n = len(layers) + 1 # number of layers
if weight:
self.w = torch.nn.Parameter(torch.zeros(self.n), requires_grad=True) # layer weights
def forward(self, x, outputs):
# Weights
if self.weight:
w = torch.sigmoid(self.w) * (2 / self.n) # sigmoid weights (0-1)
x = x * w[0]
# Fusion
nx = x.shape[1] # input channels
for i in range(self.n - 1):
a = outputs[self.layers[i]] * w[i + 1] if self.weight else outputs[self.layers[i]] # feature to add
na = a.shape[1] # feature channels
# Adjust channels
if nx == na: # same shape
x = x + a
elif nx > na: # slice input
x[:, :na] = x[:, :na] + a # or a = nn.ZeroPad2d((0, 0, 0, 0, 0, dc))(a); x = x + a
else: # slice feature
x = x + a[:, :nx]
return x
class MixConv2d(nn.Module): # MixConv: Mixed Depthwise Convolutional Kernels https://arxiv.org/abs/1907.09595
def __init__(self, in_ch, out_ch, k=(3, 5, 7), stride=1, dilation=1, bias=True, method='equal_params'):
super(MixConv2d, self).__init__()
groups = len(k)
if method == 'equal_ch': # equal channels per group
i = torch.linspace(0, groups - 1E-6, out_ch).floor() # out_ch indices
ch = [(i == g).sum() for g in range(groups)]
else: # 'equal_params': equal parameter count per group
b = [out_ch] + [0] * groups
a = np.eye(groups + 1, groups, k=-1)
a -= np.roll(a, 1, axis=1)
a *= np.array(k) ** 2
a[0] = 1
ch = np.linalg.lstsq(a, b, rcond=None)[0].round().astype(int) # solve for equal weight indices, ax = b
self.m = nn.ModuleList([torch.nn.Conv2d(in_channels=in_ch,
out_channels=ch[g],
kernel_size=k[g],
stride=stride,
padding=(k[g] - 1) // 2, # 'same' pad
dilation=dilation,
bias=bias) for g in range(groups)])
def forward(self, x):
return torch.cat([m(x) for m in self.m], 1)
# Activation functions below -------------------------------------------------------------------------------------------
class SwishImplementation(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
ctx.save_for_backward(i)
return i * torch.sigmoid(i)
@staticmethod
def backward(ctx, grad_output):
sigmoid_i = torch.sigmoid(ctx.saved_variables[0])
return grad_output * (sigmoid_i * (1 + ctx.saved_variables[0] * (1 - sigmoid_i)))
class MemoryEfficientSwish(nn.Module):
def forward(self, x):
return SwishImplementation.apply(x)
class Swish(nn.Module):
def forward(self, x):
return x.mul_(torch.sigmoid(x))
class Mish(nn.Module): # https://github.com/digantamisra98/Mish
def forward(self, x):
return x.mul_(F.softplus(x).tanh())
| 38.568627
| 120
| 0.534316
|
858483b63438f69f45d92ac72a18ad26eaa4995f
| 3,926
|
py
|
Python
|
src/sunstruck/db/migrations/versions/20200810_26a58067e4fa.py
|
la-mar/sunstruck-api
|
90074a55d3b243f7f0eee6e897a98699d2cebc43
|
[
"MIT"
] | 3
|
2021-04-04T07:48:48.000Z
|
2022-02-19T17:42:12.000Z
|
src/sunstruck/db/migrations/versions/20200810_26a58067e4fa.py
|
la-mar/sunstruck-api
|
90074a55d3b243f7f0eee6e897a98699d2cebc43
|
[
"MIT"
] | null | null | null |
src/sunstruck/db/migrations/versions/20200810_26a58067e4fa.py
|
la-mar/sunstruck-api
|
90074a55d3b243f7f0eee6e897a98699d2cebc43
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: 26a58067e4fa
Revises:
Create Date: 2020-08-10 17:37:04.436524+00:00
"""
import sqlalchemy as sa
import sqlalchemy_utils
import sqlalchemy_utils.types.email
from alembic import op
# revision identifiers, used by Alembic.
revision = "26a58067e4fa"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"users",
sa.Column("id", sa.BigInteger(), autoincrement=True, nullable=False),
sa.Column("username", sa.String(length=50), nullable=False),
sa.Column("email", sqlalchemy_utils.types.email.EmailType(), nullable=False),
sa.Column("is_active", sa.Boolean(), nullable=True),
sa.Column("is_superuser", sa.Boolean(), nullable=True),
sa.Column("first_name", sa.String(length=255), nullable=True),
sa.Column("last_name", sa.String(length=255), nullable=True),
sa.Column("phone_number", sa.Unicode(length=20), nullable=True),
sa.Column("country_code", sa.Unicode(length=20), nullable=True),
sa.Column("hashed_password", sa.String(), nullable=False),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=True,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=True,
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_users")),
sa.UniqueConstraint("email", name=op.f("uq_users_email")),
sa.UniqueConstraint("username", name=op.f("uq_users_username")),
)
op.create_index("ix_users_email", "users", ["email"], unique=False)
op.create_index(op.f("ix_users_updated_at"), "users", ["updated_at"], unique=False)
op.create_index("ix_users_username", "users", ["username"], unique=False)
op.create_table(
"oauth2_clients",
sa.Column("id", sa.BigInteger(), autoincrement=True, nullable=False),
sa.Column("client_id", sa.String(length=50), nullable=True),
sa.Column("hashed_client_secret", sa.String(length=150), nullable=True),
sa.Column("owner_id", sa.BigInteger(), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=True,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=True,
),
sa.ForeignKeyConstraint(
["owner_id"], ["users.id"], name=op.f("fk_oauth2_clients_owner_id_users")
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_oauth2_clients")),
sa.UniqueConstraint("client_id", name=op.f("uq_oauth2_clients_client_id")),
)
op.create_index(
"ix_oauth2_clients_client_id", "oauth2_clients", ["client_id"], unique=False
)
op.create_index(
"ix_oauth2_clients_owner_id", "oauth2_clients", ["owner_id"], unique=False
)
op.create_index(
op.f("ix_oauth2_clients_updated_at"),
"oauth2_clients",
["updated_at"],
unique=False,
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_oauth2_clients_updated_at"), table_name="oauth2_clients")
op.drop_index("ix_oauth2_clients_owner_id", table_name="oauth2_clients")
op.drop_index("ix_oauth2_clients_client_id", table_name="oauth2_clients")
op.drop_table("oauth2_clients")
op.drop_index("ix_users_username", table_name="users")
op.drop_index(op.f("ix_users_updated_at"), table_name="users")
op.drop_index("ix_users_email", table_name="users")
op.drop_table("users")
# ### end Alembic commands ###
| 38.116505
| 87
| 0.637799
|
4fe87ffae8a889c0c09f7501344687ce08188fd6
| 11,563
|
py
|
Python
|
whyattend/constants.py
|
chipsi007/World-of-Tanks-Attendance-Tracker
|
576b32586c402a843b88c49bf432a3e8cb3c62dd
|
[
"BSD-2-Clause"
] | null | null | null |
whyattend/constants.py
|
chipsi007/World-of-Tanks-Attendance-Tracker
|
576b32586c402a843b88c49bf432a3e8cb3c62dd
|
[
"BSD-2-Clause"
] | null | null | null |
whyattend/constants.py
|
chipsi007/World-of-Tanks-Attendance-Tracker
|
576b32586c402a843b88c49bf432a3e8cb3c62dd
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Constants
~~~~~~~~~
Constants and translations used in WoT replay files and the API.
"""
MAP_EN_NAME_BY_ID = {
"01_karelia": "Karelia",
"02_malinovka": "Malinovka",
"04_himmelsdorf": "Himmelsdorf",
"05_prohorovka": "Prokhorovka",
"07_lakeville": "Lakeville",
"06_ensk": "Ensk",
"11_murovanka": "Murovanka",
"13_erlenberg": "Erlenberg",
"10_hills": "Mines",
"15_komarin": "Komarin",
"18_cliff": "Cliff",
"19_monastery": "Abbey",
"28_desert": "Sand River",
"35_steppes": "Steppes",
"37_caucasus": "Mountain Pass",
"33_fjord": "Fjords",
"34_redshire": "Redshire",
"36_fishing_bay": "Fisherman's Bay",
"38_mannerheim_line": "Arctic Region",
"08_ruinberg": "Ruinberg",
"14_siegfried_line": "Siegfried Line",
"22_slough": "Swamp",
"23_westfeld": "Westfield",
"29_el_hallouf": "El Halluf",
"31_airfield": "Airfield",
"03_campania": "Province",
"17_munchen": "Widepark",
"44_north_america": "Live Oaks",
"39_crimea": "South Coast",
"45_north_america": "Highway",
"42_north_america": "Port",
"51_asia": "Dragon Ridge",
"47_canada_a": "Serene Coast",
"85_winter": "Belogorsk-19",
"73_asia_korea": "Sacred Valley",
"60_asia_miao": "Pearl River",
"00_tank_tutorial": "Training area",
"86_himmelsdorf_winter": "Himmelsdorf Winter",
"87_ruinberg_on_fire": "Ruinberg on Fire",
"63_tundra": "Tundra",
"84_winter": "Windstorm",
"83_kharkiv": "Kharkov"
}
WOT_TANKS = {
u'A-20': {'tier': 4},
u'A-32': {'tier': 4},
u'A104_M4A3E8A': {'tier': 6},
u'A43': {'tier': 6},
u'A44': {'tier': 7},
u'AMX38': {'tier': 3},
u'AMX40': {'tier': 4},
u'AMX50_Foch': {'tier': 9},
u'AMX_105AM': {'tier': 5},
u'AMX_12t': {'tier': 6},
u'AMX_13F3AM': {'tier': 6},
u'AMX_13_75': {'tier': 7},
u'AMX_13_90': {'tier': 8},
u'AMX_50Fosh_155': {'tier': 10},
u'AMX_50_100': {'tier': 8},
u'AMX_50_120': {'tier': 9},
u'AMX_50_68t': {'tier': 10},
u'AMX_AC_Mle1946': {'tier': 7},
u'AMX_AC_Mle1948': {'tier': 8},
u'AMX_M4_1945': {'tier': 7},
u'AMX_Ob_Am105': {'tier': 4},
u'ARL_44': {'tier': 6},
u'ARL_V39': {'tier': 6},
u'AT-1': {'tier': 2},
u'Auf_Panther': {'tier': 7},
u'B-1bis_captured': {'tier': 4},
u'B1': {'tier': 4},
u'BDR_G1B': {'tier': 5},
u'BT-2': {'tier': 2},
u'BT-7': {'tier': 3},
u'BT-SV': {'tier': 3},
u'Bat_Chatillon155': {'tier': 10},
u'Bat_Chatillon155_55': {'tier': 9},
u'Bat_Chatillon25t': {'tier': 10},
u'Bison_I': {'tier': 3},
u'Ch01_Type59': {'tier': 8},
u'Ch02_Type62': {'tier': 7},
u'Ch04_T34_1': {'tier': 7},
u'Ch05_T34_2': {'tier': 8},
u'Ch06_Renault_NC31': {'tier': 1},
u'Ch07_Vickers_MkE_Type_BT26': {'tier': 2},
u'Ch08_Type97_Chi_Ha': {'tier': 3},
u'Ch09_M5': {'tier': 4},
u'Ch10_IS2': {'tier': 7},
u'Ch11_110': {'tier': 8},
u'Ch12_111_1_2_3': {'tier': 9},
u'Ch14_T34_3': {'tier': 8},
u'Ch15_59_16': {'tier': 6},
u'Ch16_WZ_131': {'tier': 7},
u'Ch17_WZ131_1_WZ132': {'tier': 8},
u'Ch18_WZ-120': {'tier': 9},
u'Ch19_121': {'tier': 10},
u'Ch20_Type58': {'tier': 6},
u'Ch21_T34': {'tier': 5},
u'Ch22_113': {'tier': 10},
u'Ch23_112': {'tier': 8},
u'Ch24_Type64': {'tier': 6},
u'Chi_Ha': {'tier': 3},
u'Chi_He': {'tier': 4},
u'Chi_Ni': {'tier': 2},
u'Chi_Nu': {'tier': 5},
u'Chi_Nu_Kai': {'tier': 5},
u'Chi_Ri': {'tier': 7},
u'Chi_To': {'tier': 6},
u'Churchill_LL': {'tier': 5},
u'D1': {'tier': 2},
u'D2': {'tier': 3},
u'DW_II': {'tier': 4},
u'DickerMax': {'tier': 6},
u'E-100': {'tier': 10},
u'E-25': {'tier': 7},
u'E-50': {'tier': 9},
u'E-75': {'tier': 9},
u'E50_Ausf_M': {'tier': 10},
u'ELC_AMX': {'tier': 5},
u'FCM_36Pak40': {'tier': 3},
u'FCM_50t': {'tier': 8},
u'Ferdinand': {'tier': 8},
u'G101_StuG_III': {'tier': 4},
u'G103_RU_251': {'tier': 8},
u'G20_Marder_II': {'tier': 3},
u'GAZ-74b': {'tier': 4},
u'GB01_Medium_Mark_I': {'tier': 1},
u'GB03_Cruiser_Mk_I': {'tier': 2},
u'GB04_Valentine': {'tier': 4},
u'GB05_Vickers_Medium_Mk_II': {'tier': 2},
u'GB06_Vickers_Medium_Mk_III': {'tier': 3},
u'GB07_Matilda': {'tier': 4},
u'GB08_Churchill_I': {'tier': 5},
u'GB09_Churchill_VII': {'tier': 6},
u'GB10_Black_Prince': {'tier': 7},
u'GB11_Caernarvon': {'tier': 8},
u'GB12_Conqueror': {'tier': 9},
u'GB13_FV215b': {'tier': 10},
u'GB20_Crusader': {'tier': 5},
u'GB21_Cromwell': {'tier': 6},
u'GB22_Comet': {'tier': 7},
u'GB23_Centurion': {'tier': 8},
u'GB24_Centurion_Mk3': {'tier': 9},
u'GB25_Loyd_Carrier': {'tier': 2},
u'GB26_Birch_Gun': {'tier': 4},
u'GB27_Sexton': {'tier': 3},
u'GB28_Bishop': {'tier': 5},
u'GB29_Crusader_5inch': {'tier': 7},
u'GB30_FV3805': {'tier': 9},
u'GB31_Conqueror_Gun': {'tier': 10},
u'GB32_Tortoise': {'tier': 9},
u'GB39_Universal_CarrierQF2': {'tier': 2},
u'GB40_Gun_Carrier_Churchill': {'tier': 6},
u'GB42_Valentine_AT': {'tier': 3},
u'GB48_FV215b_183': {'tier': 10},
u'GB51_Excelsior': {'tier': 5},
u'GB57_Alecto': {'tier': 4},
u'GB58_Cruiser_Mk_III': {'tier': 2},
u'GB59_Cruiser_Mk_IV': {'tier': 3},
u'GB60_Covenanter': {'tier': 4},
u'GB63_TOG_II': {'tier': 6},
u'GB68_Matilda_Black_Prince': {'tier': 5},
u'GB69_Cruiser_Mk_II': {'tier': 3},
u'GB70_FV4202_105': {'tier': 10},
u'GB71_AT_15A': {'tier': 7},
u'GB72_AT15': {'tier': 8},
u'GB73_AT2': {'tier': 5},
u'GB74_AT8': {'tier': 6},
u'GB75_AT7': {'tier': 7},
u'GB76_Mk_VIC': {'tier': 2},
u'GB77_FV304': {'tier': 6},
u'GB78_Sexton_I': {'tier': 3},
u'GB79_FV206': {'tier': 8},
u'GW_Mk_VIe': {'tier': 2},
u'GW_Tiger_P': {'tier': 8},
u'G_E': {'tier': 10},
u'G_Panther': {'tier': 7},
u'G_Tiger': {'tier': 9},
u'Grille': {'tier': 5},
u'H39_captured': {'tier': 2},
u'Ha_Go': {'tier': 2},
u'Hetzer': {'tier': 4},
u'Hummel': {'tier': 6},
u'IS': {'tier': 7},
u'IS-3': {'tier': 8},
u'IS-4': {'tier': 10},
u'IS-6': {'tier': 8},
u'IS-7': {'tier': 10},
u'IS8': {'tier': 9},
u'ISU-152': {'tier': 8},
u'Indien_Panzer': {'tier': 8},
u'JagdPanther': {'tier': 7},
u'JagdPantherII': {'tier': 8},
u'JagdPzIV': {'tier': 6},
u'JagdPz_E100': {'tier': 10},
u'JagdTiger': {'tier': 9},
u'JagdTiger_SdKfz_185': {'tier': 8},
u'KV-13': {'tier': 7},
u'KV-1s': {'tier': 5},
u'KV-220': {'tier': 5},
u'KV-220_test': {'tier': 5},
u'KV-3': {'tier': 7},
u'KV-5': {'tier': 8},
u'KV1': {'tier': 5},
u'KV2': {'tier': 6},
u'KV4': {'tier': 8},
u'Ke_Ho': {'tier': 4},
u'Ke_Ni': {'tier': 3},
u'LTP': {'tier': 3},
u'Leopard1': {'tier': 10},
u'Lorraine155_50': {'tier': 7},
u'Lorraine155_51': {'tier': 8},
u'Lorraine39_L_AM': {'tier': 3},
u'Lorraine40t': {'tier': 9},
u'Lowe': {'tier': 8},
u'Ltraktor': {'tier': 1},
u'M103': {'tier': 9},
u'M10_Wolverine': {'tier': 5},
u'M12': {'tier': 7},
u'M18_Hellcat': {'tier': 6},
u'M22_Locust': {'tier': 3},
u'M24_Chaffee': {'tier': 5},
u'M24_Chaffee_GT': {'tier': 1},
u'M2_lt': {'tier': 2},
u'M2_med': {'tier': 3},
u'M36_Slagger': {'tier': 6},
u'M37': {'tier': 4},
u'M3_Grant': {'tier': 4},
u'M3_Stuart': {'tier': 3},
u'M3_Stuart_LL': {'tier': 3},
u'M40M43': {'tier': 8},
u'M41': {'tier': 5},
u'M41_Bulldog': {'tier': 7},
u'M46_Patton': {'tier': 9},
u'M48A1': {'tier': 10},
u'M4A2E4': {'tier': 5},
u'M4A3E8_Sherman': {'tier': 6},
u'M4_Sherman': {'tier': 5},
u'M53_55': {'tier': 9},
u'M5_Stuart': {'tier': 4},
u'M6': {'tier': 6},
u'M60': {'tier': 10},
u'M6A2E1': {'tier': 8},
u'M7_Priest': {'tier': 3},
u'M7_med': {'tier': 5},
u'M8A1': {'tier': 4},
u'MS-1': {'tier': 1},
u'MT25': {'tier': 6},
u'Marder_III': {'tier': 4},
u'Matilda_II_LL': {'tier': 5},
u'Maus': {'tier': 10},
u'NC27': {'tier': 1},
u'Nashorn': {'tier': 6},
u'Object263': {'tier': 10},
u'Object268': {'tier': 10},
u'Object416': {'tier': 8},
u'Object_140': {'tier': 10},
u'Object_212': {'tier': 9},
u'Object_261': {'tier': 10},
u'Object_430': {'tier': 10},
u'Object_704': {'tier': 9},
u'Object_907': {'tier': 10},
u'Panther_II': {'tier': 8},
u'Panther_M10': {'tier': 7},
u'PanzerJager_I': {'tier': 2},
u'Pershing': {'tier': 8},
u'Pro_Ag_A': {'tier': 9},
u'Pz35t': {'tier': 2},
u'Pz38_NA': {'tier': 4},
u'Pz38t': {'tier': 3},
u'PzI': {'tier': 2},
u'PzII': {'tier': 2},
u'PzIII_A': {'tier': 3},
u'PzIII_AusfJ': {'tier': 4},
u'PzIII_IV': {'tier': 5},
u'PzII_J': {'tier': 3},
u'PzII_Luchs': {'tier': 4},
u'PzIV_Hydro': {'tier': 5},
u'PzIV_schmalturm': {'tier': 6},
u'PzI_ausf_C': {'tier': 3},
u'PzV': {'tier': 7},
u'PzVI': {'tier': 7},
u'PzVIB_Tiger_II': {'tier': 8},
u'PzVI_Tiger_P': {'tier': 7},
u'PzV_PzIV': {'tier': 6},
u'PzV_PzIV_ausf_Alfa': {'tier': 6},
u'Pz_II_AusfG': {'tier': 3},
u'Pz_IV_AusfA': {'tier': 3},
u'Pz_IV_AusfD': {'tier': 4},
u'Pz_IV_AusfH': {'tier': 5},
u'Pz_Sfl_IVb': {'tier': 4},
u'Pz_Sfl_IVc': {'tier': 5},
u'R104_Object_430_II': {'tier': 9},
u'R106_KV85': {'tier': 6},
u'R107_LTB': {'tier': 7},
u'R109_T54S': {'tier': 8},
u'Ram-II': {'tier': 5},
u'RenaultBS': {'tier': 2},
u'RenaultFT': {'tier': 1},
u'RenaultFT_AC': {'tier': 2},
u'RenaultUE57': {'tier': 3},
u'RhB_Waffentrager': {'tier': 8},
u'S-51': {'tier': 7},
u'S35_captured': {'tier': 3},
u'STA_1': {'tier': 8},
u'ST_B1': {'tier': 10},
u'ST_I': {'tier': 9},
u'SU-100': {'tier': 6},
u'SU-101': {'tier': 8},
u'SU-14': {'tier': 8},
u'SU-152': {'tier': 7},
u'SU-18': {'tier': 2},
u'SU-26': {'tier': 3},
u'SU-5': {'tier': 4},
u'SU-76': {'tier': 3},
u'SU-8': {'tier': 6},
u'SU-85': {'tier': 5},
u'SU100M1': {'tier': 7},
u'SU100Y': {'tier': 6},
u'SU122A': {'tier': 5},
u'SU122_44': {'tier': 7},
u'SU122_54': {'tier': 9},
u'SU14_1': {'tier': 7},
u'SU_85I': {'tier': 5},
u'S_35CA': {'tier': 5},
u'Sherman_Jumbo': {'tier': 6},
u'Somua_Sau_40': {'tier': 4},
u'StuG_40_AusfG': {'tier': 5},
u'Sturer_Emil': {'tier': 7},
u'Sturmpanzer_II': {'tier': 4},
u'T-127': {'tier': 3},
u'T-15': {'tier': 3},
u'T-25': {'tier': 5},
u'T-26': {'tier': 2},
u'T-28': {'tier': 4},
u'T-34': {'tier': 5},
u'T-34-85': {'tier': 6},
u'T-43': {'tier': 7},
u'T-44': {'tier': 8},
u'T-46': {'tier': 3},
u'T-50': {'tier': 4},
u'T-54': {'tier': 9},
u'T-60': {'tier': 2},
u'T-70': {'tier': 3},
u'T110': {'tier': 10},
u'T110E3': {'tier': 10},
u'T110E4': {'tier': 10},
u'T14': {'tier': 5},
u'T150': {'tier': 6},
u'T18': {'tier': 2},
u'T1_Cunningham': {'tier': 1},
u'T1_E6': {'tier': 2},
u'T1_hvy': {'tier': 5},
u'T20': {'tier': 7},
u'T21': {'tier': 6},
u'T23E3': {'tier': 7},
u'T25_2': {'tier': 7},
u'T25_AT': {'tier': 7},
u'T26_E4_SuperPershing': {'tier': 8},
u'T28': {'tier': 8},
u'T28_Prototype': {'tier': 8},
u'T29': {'tier': 7},
u'T2_lt': {'tier': 2},
u'T2_med': {'tier': 2},
u'T30': {'tier': 9},
u'T32': {'tier': 8},
u'T34_hvy': {'tier': 8},
u'T37': {'tier': 6},
u'T40': {'tier': 4},
u'T49': {'tier': 8},
u'T54E1': {'tier': 9},
u'T57': {'tier': 2},
u'T57_58': {'tier': 10},
u'T62A': {'tier': 10},
u'T67': {'tier': 5},
u'T69': {'tier': 8},
u'T71': {'tier': 7},
u'T7_Combat_Car': {'tier': 2},
u'T80': {'tier': 4},
u'T82': {'tier': 3},
u'T92': {'tier': 10},
u'T95': {'tier': 9},
u'Te_Ke': {'tier': 2},
u'Tetrarch_LL': {'tier': 2},
u'Type_61': {'tier': 9},
u'VK1602': {'tier': 5},
u'VK2001DB': {'tier': 4},
u'VK2801': {'tier': 6},
u'VK3001H': {'tier': 5},
u'VK3001P': {'tier': 6},
u'VK3002DB': {'tier': 7},
u'VK3002DB_V1': {'tier': 6},
u'VK3002M': {'tier': 6},
u'VK3601H': {'tier': 6},
u'VK4502A': {'tier': 8},
u'VK4502P': {'tier': 9},
u'VK7201': {'tier': 10},
u'Valentine_LL': {'tier': 4},
u'Waffentrager_E100': {'tier': 10},
u'Waffentrager_IV': {'tier': 9},
u'Wespe': {'tier': 3},
u'_105_leFH18B2': {'tier': 5},
u'_Hotchkiss_H35': {'tier': 2},
u'_M44': {'tier': 6}
}
| 27.795673
| 68
| 0.540517
|
665334f6b0e61fa1674cb06c282d3f2e79daf5dd
| 35,266
|
py
|
Python
|
src/ircmsgs.py
|
elieux-contrib/Limnoria
|
2f49362510b0f17dff056af7fc24a11d5e7f975f
|
[
"BSD-3-Clause"
] | null | null | null |
src/ircmsgs.py
|
elieux-contrib/Limnoria
|
2f49362510b0f17dff056af7fc24a11d5e7f975f
|
[
"BSD-3-Clause"
] | null | null | null |
src/ircmsgs.py
|
elieux-contrib/Limnoria
|
2f49362510b0f17dff056af7fc24a11d5e7f975f
|
[
"BSD-3-Clause"
] | null | null | null |
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2010, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
This module provides the basic IrcMsg object used throughout the bot to
represent the actual messages. It also provides several helper functions to
construct such messages in an easier way than the constructor for the IrcMsg
object (which, as you'll read later, is quite...full-featured :))
"""
import re
import time
import base64
import datetime
import warnings
import functools
from . import conf, ircutils, utils
from .utils.iter import all
from .utils import minisix
###
# IrcMsg class -- used for representing IRC messages acquired from a network.
###
class MalformedIrcMsg(ValueError):
pass
# http://ircv3.net/specs/core/message-tags-3.2.html#escaping-values
SERVER_TAG_ESCAPE = [
('\\', '\\\\'), # \ -> \\
(' ', r'\s'),
(';', r'\:'),
('\r', r'\r'),
('\n', r'\n'),
]
escape_server_tag_value = utils.str.MultipleReplacer(
dict(SERVER_TAG_ESCAPE))
unescape_server_tag_value = utils.str.MultipleReplacer(
dict(map(lambda x:(x[1],x[0]), SERVER_TAG_ESCAPE)))
def parse_server_tags(s):
server_tags = {}
for tag in s.split(';'):
if '=' not in tag:
server_tags[tag] = None
else:
(key, value) = tag.split('=', 1)
value = unescape_server_tag_value(value)
if value == '':
# "Implementations MUST interpret empty tag values (e.g. foo=)
# as equivalent to missing tag values (e.g. foo)."
value = None
server_tags[key] = value
return server_tags
def format_server_tags(server_tags):
parts = []
for (key, value) in server_tags.items():
if value is None:
parts.append(key)
else:
parts.append('%s=%s' % (key, escape_server_tag_value(value)))
return '@' + ';'.join(parts)
class IrcMsg(object):
"""Class to represent an IRC message.
As usual, ignore attributes that begin with an underscore. They simply
don't exist. Instances of this class are *not* to be modified, since they
are hashable. Public attributes of this class are .prefix, .command,
.args, .nick, .user, and .host.
The constructor for this class is pretty intricate. It's designed to take
any of three major (sets of) arguments.
Called with no keyword arguments, it takes a single string that is a raw
IRC message (such as one taken straight from the network).
Called with keyword arguments, it *requires* a command parameter. Args is
optional, but with most commands will be necessary. Prefix is obviously
optional, since clients aren't allowed (well, technically, they are, but
only in a completely useless way) to send prefixes to the server.
Since this class isn't to be modified, the constructor also accepts a 'msg'
keyword argument representing a message from which to take all the
attributes not provided otherwise as keyword arguments. So, for instance,
if a programmer wanted to take a PRIVMSG they'd gotten and simply redirect
it to a different source, they could do this:
IrcMsg(prefix='', args=(newSource, otherMsg.args[1]), msg=otherMsg)
"""
# It's too useful to be able to tag IrcMsg objects with extra, unforeseen
# data. Goodbye, __slots__.
# On second thought, let's use methods for tagging.
__slots__ = ('args', 'command', 'host', 'nick', 'prefix', 'user',
'_hash', '_str', '_repr', '_len', 'tags', 'reply_env',
'server_tags', 'time', 'channel')
def __init__(self, s='', command='', args=(), prefix='', msg=None,
reply_env=None):
assert not (msg and s), 'IrcMsg.__init__ cannot accept both s and msg'
if not s and not command and not msg:
raise MalformedIrcMsg('IRC messages require a command.')
self._str = None
self._repr = None
self._hash = None
self._len = None
self.reply_env = reply_env
self.tags = {}
if s:
originalString = s
try:
if not s.endswith('\n'):
s += '\n'
self._str = s
if s[0] == '@':
(server_tags, s) = s.split(' ', 1)
self.server_tags = parse_server_tags(server_tags[1:])
else:
self.server_tags = {}
if s[0] == ':':
self.prefix, s = s[1:].split(None, 1)
else:
self.prefix = ''
if ' :' in s: # Note the space: IPV6 addresses are bad w/o it.
s, last = s.split(' :', 1)
self.args = s.split()
self.args.append(last.rstrip('\r\n'))
else:
self.args = s.split()
self.command = self.args.pop(0)
if 'time' in self.server_tags:
s = self.server_tags['time']
date = datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S.%fZ')
date = minisix.make_datetime_utc(date)
self.time = minisix.datetime__timestamp(date)
else:
self.time = time.time()
except (IndexError, ValueError):
raise MalformedIrcMsg(repr(originalString))
else:
if msg is not None:
if prefix:
self.prefix = prefix
else:
self.prefix = msg.prefix
if command:
self.command = command
else:
self.command = msg.command
if args:
self.args = args
else:
self.args = msg.args
if reply_env:
self.reply_env = reply_env
elif msg.reply_env:
self.reply_env = msg.reply_env.copy()
else:
self.reply_env = None
self.tags = msg.tags.copy()
self.server_tags = msg.server_tags
self.time = msg.time
else:
self.prefix = prefix
self.command = command
assert all(ircutils.isValidArgument, args), args
self.args = args
self.time = None
self.server_tags = {}
self.args = tuple(self.args)
if isUserHostmask(self.prefix):
(self.nick,self.user,self.host)=ircutils.splitHostmask(self.prefix)
else:
(self.nick, self.user, self.host) = (self.prefix,)*3
def __str__(self):
if self._str is not None:
return self._str
if self.prefix:
if len(self.args) > 1:
self._str = ':%s %s %s :%s\r\n' % \
(self.prefix, self.command,
' '.join(self.args[:-1]), self.args[-1])
else:
if self.args:
self._str = ':%s %s :%s\r\n' % \
(self.prefix, self.command, self.args[0])
else:
self._str = ':%s %s\r\n' % (self.prefix, self.command)
else:
if len(self.args) > 1:
self._str = '%s %s :%s\r\n' % \
(self.command,
' '.join(self.args[:-1]), self.args[-1])
else:
if self.args:
self._str = '%s :%s\r\n' % (self.command, self.args[0])
else:
self._str = '%s\r\n' % self.command
return self._str
def __len__(self):
return len(str(self))
def __eq__(self, other):
return isinstance(other, self.__class__) and \
hash(self) == hash(other) and \
self.command == other.command and \
self.prefix == other.prefix and \
self.args == other.args
__req__ = __eq__ # I don't know exactly what this does, but it can't hurt.
def __ne__(self, other):
return not (self == other)
__rne__ = __ne__ # Likewise as above.
def __hash__(self):
if self._hash is not None:
return self._hash
self._hash = hash(self.command) ^ \
hash(self.prefix) ^ \
hash(repr(self.args))
return self._hash
def __repr__(self):
if self._repr is not None:
return self._repr
self._repr = format('IrcMsg(prefix=%q, command=%q, args=%r)',
self.prefix, self.command, self.args)
return self._repr
def __reduce__(self):
return (self.__class__, (str(self),))
def tag(self, tag, value=True):
"""Affect a key:value pair to this message."""
self.tags[tag] = value
def tagged(self, tag):
"""Get the value affected to a tag."""
return self.tags.get(tag) # Returns None if it's not there.
def __getattr__(self, attr):
if attr.startswith('__'): # Since PEP 487, Python calls __set_name__
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, attr))
if attr in self.tags:
warnings.warn("msg.<tagname> is deprecated. Use "
"msg.tagged('<tagname>') or msg.tags['<tagname>']"
"instead.", DeprecationWarning)
return self.tags[attr]
else:
# TODO: make this raise AttributeError
return None
def isCtcp(msg):
"""Returns whether or not msg is a CTCP message."""
return msg.command in ('PRIVMSG', 'NOTICE') and \
msg.args[1].startswith('\x01') and \
msg.args[1].endswith('\x01') and \
len(msg.args[1]) >= 2
def isAction(msg):
"""A predicate returning true if the PRIVMSG in question is an ACTION"""
if isCtcp(msg):
s = msg.args[1]
payload = s[1:-1] # Chop off \x01.
command = payload.split(None, 1)[0]
return command == 'ACTION'
else:
return False
def isSplit(msg):
if msg.command == 'QUIT':
# It's a quit.
quitmsg = msg.args[0]
if not quitmsg.startswith('"') and not quitmsg.endswith('"'):
# It's not a user-generated quitmsg.
servers = quitmsg.split()
if len(servers) == 2:
# We could check if domains match, or if the hostnames actually
# resolve, but we're going to put that off for now.
return True
return False
_unactionre = re.compile(r'^\x01ACTION\s+(.*)\x01$')
def unAction(msg):
"""Returns the payload (i.e., non-ACTION text) of an ACTION msg."""
assert isAction(msg)
return _unactionre.match(msg.args[1]).group(1)
def _escape(s):
s = s.replace('&', '&')
s = s.replace('"', '"')
s = s.replace('<', '<')
s = s.replace('>', '>')
return s
def toXml(msg, pretty=True, includeTime=True):
assert msg.command == _escape(msg.command)
L = []
L.append('<msg command="%s" prefix="%s"'%(msg.command,_escape(msg.prefix)))
if includeTime:
L.append(' time="%s"' % time.time())
L.append('>')
if pretty:
L.append('\n')
for arg in msg.args:
if pretty:
L.append(' ')
L.append('<arg>%s</arg>' % _escape(arg))
if pretty:
L.append('\n')
L.append('</msg>\n')
return ''.join(L)
def prettyPrint(msg, addRecipients=False, timestampFormat=None, showNick=True):
"""Provides a client-friendly string form for messages.
IIRC, I copied BitchX's (or was it XChat's?) format for messages.
"""
def nickorprefix():
return msg.nick or msg.prefix
def nick():
if addRecipients:
return '%s/%s' % (msg.nick, msg.args[0])
else:
return msg.nick
if msg.command == 'PRIVMSG':
m = _unactionre.match(msg.args[1])
if m:
s = '* %s %s' % (nick(), m.group(1))
else:
if not showNick:
s = '%s' % msg.args[1]
else:
s = '<%s> %s' % (nick(), msg.args[1])
elif msg.command == 'NOTICE':
if not showNick:
s = '%s' % msg.args[1]
else:
s = '-%s- %s' % (nick(), msg.args[1])
elif msg.command == 'JOIN':
prefix = msg.prefix
if msg.nick:
prefix = '%s <%s>' % (msg.nick, prefix)
s = '*** %s has joined %s' % (prefix, msg.args[0])
elif msg.command == 'PART':
if len(msg.args) > 1:
partmsg = ' (%s)' % msg.args[1]
else:
partmsg = ''
s = '*** %s <%s> has parted %s%s' % (msg.nick, msg.prefix,
msg.args[0], partmsg)
elif msg.command == 'KICK':
if len(msg.args) > 2:
kickmsg = ' (%s)' % msg.args[1]
else:
kickmsg = ''
s = '*** %s was kicked by %s%s' % (msg.args[1], msg.nick, kickmsg)
elif msg.command == 'MODE':
s = '*** %s sets mode: %s' % (nickorprefix(), ' '.join(msg.args))
elif msg.command == 'QUIT':
if msg.args:
quitmsg = ' (%s)' % msg.args[0]
else:
quitmsg = ''
s = '*** %s <%s> has quit IRC%s' % (msg.nick, msg.prefix, quitmsg)
elif msg.command == 'TOPIC':
s = '*** %s changes topic to %s' % (nickorprefix(), msg.args[1])
elif msg.command == 'NICK':
s = '*** %s is now known as %s' % (msg.nick, msg.args[0])
else:
s = utils.str.format('--- Unknown command %q', ' '.join(msg.args))
at = msg.tagged('receivedAt')
if timestampFormat and at:
s = '%s %s' % (time.strftime(timestampFormat, time.localtime(at)), s)
return s
###
# Various IrcMsg functions
###
isNick = ircutils.isNick
areNicks = ircutils.areNicks
isChannel = ircutils.isChannel
areChannels = ircutils.areChannels
areReceivers = ircutils.areReceivers
isUserHostmask = ircutils.isUserHostmask
def pong(payload, prefix='', msg=None):
"""Takes a payload and returns the proper PONG IrcMsg."""
if conf.supybot.protocols.irc.strictRfc():
assert payload, 'PONG requires a payload'
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='PONG', args=(payload,), msg=msg)
def ping(payload, prefix='', msg=None):
"""Takes a payload and returns the proper PING IrcMsg."""
if conf.supybot.protocols.irc.strictRfc():
assert payload, 'PING requires a payload'
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='PING', args=(payload,), msg=msg)
def op(channel, nick, prefix='', msg=None):
"""Returns a MODE to op nick on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert isNick(nick), repr(nick)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE',
args=(channel, '+o', nick), msg=msg)
def ops(channel, nicks, prefix='', msg=None):
"""Returns a MODE to op each of nicks on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert nicks, 'Nicks must not be empty.'
assert all(isNick, nicks), nicks
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE',
args=(channel, '+' + ('o'*len(nicks))) + tuple(nicks),
msg=msg)
def deop(channel, nick, prefix='', msg=None):
"""Returns a MODE to deop nick on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert isNick(nick), repr(nick)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE',
args=(channel, '-o', nick), msg=msg)
def deops(channel, nicks, prefix='', msg=None):
"""Returns a MODE to deop each of nicks on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert nicks, 'Nicks must not be empty.'
assert all(isNick, nicks), nicks
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE', msg=msg,
args=(channel, '-' + ('o'*len(nicks))) + tuple(nicks))
def halfop(channel, nick, prefix='', msg=None):
"""Returns a MODE to halfop nick on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert isNick(nick), repr(nick)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE',
args=(channel, '+h', nick), msg=msg)
def halfops(channel, nicks, prefix='', msg=None):
"""Returns a MODE to halfop each of nicks on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert nicks, 'Nicks must not be empty.'
assert all(isNick, nicks), nicks
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE', msg=msg,
args=(channel, '+' + ('h'*len(nicks))) + tuple(nicks))
def dehalfop(channel, nick, prefix='', msg=None):
"""Returns a MODE to dehalfop nick on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert isNick(nick), repr(nick)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE',
args=(channel, '-h', nick), msg=msg)
def dehalfops(channel, nicks, prefix='', msg=None):
"""Returns a MODE to dehalfop each of nicks on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert nicks, 'Nicks must not be empty.'
assert all(isNick, nicks), nicks
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE', msg=msg,
args=(channel, '-' + ('h'*len(nicks))) + tuple(nicks))
def voice(channel, nick, prefix='', msg=None):
"""Returns a MODE to voice nick on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert isNick(nick), repr(nick)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE',
args=(channel, '+v', nick), msg=msg)
def voices(channel, nicks, prefix='', msg=None):
"""Returns a MODE to voice each of nicks on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert nicks, 'Nicks must not be empty.'
assert all(isNick, nicks)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE', msg=msg,
args=(channel, '+' + ('v'*len(nicks))) + tuple(nicks))
def devoice(channel, nick, prefix='', msg=None):
"""Returns a MODE to devoice nick on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert isNick(nick), repr(nick)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE',
args=(channel, '-v', nick), msg=msg)
def devoices(channel, nicks, prefix='', msg=None):
"""Returns a MODE to devoice each of nicks on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert nicks, 'Nicks must not be empty.'
assert all(isNick, nicks), nicks
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE', msg=msg,
args=(channel, '-' + ('v'*len(nicks))) + tuple(nicks))
def ban(channel, hostmask, exception='', prefix='', msg=None):
"""Returns a MODE to ban nick on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert isUserHostmask(hostmask), repr(hostmask)
modes = [('+b', hostmask)]
if exception:
modes.append(('+e', exception))
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE',
args=[channel] + ircutils.joinModes(modes), msg=msg)
def bans(channel, hostmasks, exceptions=(), prefix='', msg=None):
"""Returns a MODE to ban each of nicks on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert all(isUserHostmask, hostmasks), hostmasks
modes = [('+b', s) for s in hostmasks] + [('+e', s) for s in exceptions]
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE',
args=[channel] + ircutils.joinModes(modes), msg=msg)
def unban(channel, hostmask, prefix='', msg=None):
"""Returns a MODE to unban nick on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert isUserHostmask(hostmask), repr(hostmask)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE',
args=(channel, '-b', hostmask), msg=msg)
def unbans(channel, hostmasks, prefix='', msg=None):
"""Returns a MODE to unban each of nicks on channel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert all(isUserHostmask, hostmasks), hostmasks
modes = [('-b', s) for s in hostmasks]
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE',
args=[channel] + ircutils.joinModes(modes), msg=msg)
def kick(channel, nick, s='', prefix='', msg=None):
"""Returns a KICK to kick nick from channel with the message msg."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
assert isNick(nick), repr(nick)
if msg and not prefix:
prefix = msg.prefix
if minisix.PY2 and isinstance(s, unicode):
s = s.encode('utf8')
assert isinstance(s, str)
if s:
return IrcMsg(prefix=prefix, command='KICK',
args=(channel, nick, s), msg=msg)
else:
return IrcMsg(prefix=prefix, command='KICK',
args=(channel, nick), msg=msg)
def kicks(channels, nicks, s='', prefix='', msg=None):
"""Returns a KICK to kick each of nicks from channel with the message msg.
"""
if isinstance(channels, str): # Backward compatibility
channels = [channels]
if conf.supybot.protocols.irc.strictRfc():
assert areChannels(channels), repr(channels)
assert areNicks(nicks), repr(nicks)
if msg and not prefix:
prefix = msg.prefix
if minisix.PY2 and isinstance(s, unicode):
s = s.encode('utf8')
assert isinstance(s, str)
if s:
for channel in channels:
return IrcMsg(prefix=prefix, command='KICK',
args=(channel, ','.join(nicks), s), msg=msg)
else:
for channel in channels:
return IrcMsg(prefix=prefix, command='KICK',
args=(channel, ','.join(nicks)), msg=msg)
def privmsg(recipient, s, prefix='', msg=None):
"""Returns a PRIVMSG to recipient with the message msg."""
if conf.supybot.protocols.irc.strictRfc():
assert (areReceivers(recipient)), repr(recipient)
assert s, 's must not be empty.'
if minisix.PY2 and isinstance(s, unicode):
s = s.encode('utf8')
assert isinstance(s, str)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='PRIVMSG',
args=(recipient, s), msg=msg)
def dcc(recipient, kind, *args, **kwargs):
# Stupid Python won't allow (recipient, kind, *args, prefix=''), so we have
# to use the **kwargs form. Blech.
assert isNick(recipient), 'Can\'t DCC a channel.'
kind = kind.upper()
assert kind in ('SEND', 'CHAT', 'RESUME', 'ACCEPT'), 'Invalid DCC command.'
args = (kind,) + args
return IrcMsg(prefix=kwargs.get('prefix', ''), command='PRIVMSG',
args=(recipient, ' '.join(args)))
def action(recipient, s, prefix='', msg=None):
"""Returns a PRIVMSG ACTION to recipient with the message msg."""
if conf.supybot.protocols.irc.strictRfc():
assert (isChannel(recipient) or isNick(recipient)), repr(recipient)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='PRIVMSG',
args=(recipient, '\x01ACTION %s\x01' % s), msg=msg)
def notice(recipient, s, prefix='', msg=None):
"""Returns a NOTICE to recipient with the message msg."""
if conf.supybot.protocols.irc.strictRfc():
assert areReceivers(recipient), repr(recipient)
assert s, 'msg must not be empty.'
if minisix.PY2 and isinstance(s, unicode):
s = s.encode('utf8')
assert isinstance(s, str)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='NOTICE', args=(recipient, s), msg=msg)
def join(channel, key=None, prefix='', msg=None):
"""Returns a JOIN to a channel"""
if conf.supybot.protocols.irc.strictRfc():
assert areChannels(channel), repr(channel)
if msg and not prefix:
prefix = msg.prefix
if key is None:
return IrcMsg(prefix=prefix, command='JOIN', args=(channel,), msg=msg)
else:
if conf.supybot.protocols.irc.strictRfc():
chars = '\x00\r\n\f\t\v '
assert not any([(ord(x) >= 128 or x in chars) for x in key])
return IrcMsg(prefix=prefix, command='JOIN',
args=(channel, key), msg=msg)
def joins(channels, keys=None, prefix='', msg=None):
"""Returns a JOIN to each of channels."""
if conf.supybot.protocols.irc.strictRfc():
assert all(isChannel, channels), channels
if msg and not prefix:
prefix = msg.prefix
if keys is None:
keys = []
assert len(keys) <= len(channels), 'Got more keys than channels.'
if not keys:
return IrcMsg(prefix=prefix,
command='JOIN',
args=(','.join(channels),), msg=msg)
else:
if conf.supybot.protocols.irc.strictRfc():
chars = '\x00\r\n\f\t\v '
for key in keys:
assert not any([(ord(x) >= 128 or x in chars) for x in key])
return IrcMsg(prefix=prefix,
command='JOIN',
args=(','.join(channels), ','.join(keys)), msg=msg)
def part(channel, s='', prefix='', msg=None):
"""Returns a PART from channel with the message msg."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
if msg and not prefix:
prefix = msg.prefix
if minisix.PY2 and isinstance(s, unicode):
s = s.encode('utf8')
assert isinstance(s, str)
if s:
return IrcMsg(prefix=prefix, command='PART',
args=(channel, s), msg=msg)
else:
return IrcMsg(prefix=prefix, command='PART',
args=(channel,), msg=msg)
def parts(channels, s='', prefix='', msg=None):
"""Returns a PART from each of channels with the message msg."""
if conf.supybot.protocols.irc.strictRfc():
assert all(isChannel, channels), channels
if msg and not prefix:
prefix = msg.prefix
if minisix.PY2 and isinstance(s, unicode):
s = s.encode('utf8')
assert isinstance(s, str)
if s:
return IrcMsg(prefix=prefix, command='PART',
args=(','.join(channels), s), msg=msg)
else:
return IrcMsg(prefix=prefix, command='PART',
args=(','.join(channels),), msg=msg)
def quit(s='', prefix='', msg=None):
"""Returns a QUIT with the message msg."""
if msg and not prefix:
prefix = msg.prefix
if s:
return IrcMsg(prefix=prefix, command='QUIT', args=(s,), msg=msg)
else:
return IrcMsg(prefix=prefix, command='QUIT', msg=msg)
def topic(channel, topic=None, prefix='', msg=None):
"""Returns a TOPIC for channel with the topic topic."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
if msg and not prefix:
prefix = msg.prefix
if topic is None:
return IrcMsg(prefix=prefix, command='TOPIC',
args=(channel,), msg=msg)
else:
if minisix.PY2 and isinstance(topic, unicode):
topic = topic.encode('utf8')
assert isinstance(topic, str)
return IrcMsg(prefix=prefix, command='TOPIC',
args=(channel, topic), msg=msg)
def nick(nick, prefix='', msg=None):
"""Returns a NICK with nick nick."""
if conf.supybot.protocols.irc.strictRfc():
assert isNick(nick), repr(nick)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='NICK', args=(nick,), msg=msg)
def user(ident, user, prefix='', msg=None):
"""Returns a USER with ident ident and user user."""
if conf.supybot.protocols.irc.strictRfc():
assert '\x00' not in ident and \
'\r' not in ident and \
'\n' not in ident and \
' ' not in ident and \
'@' not in ident
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='USER',
args=(ident, '0', '*', user), msg=msg)
def who(hostmaskOrChannel, prefix='', msg=None, args=()):
"""Returns a WHO for the hostmask or channel hostmaskOrChannel."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(hostmaskOrChannel) or \
isUserHostmask(hostmaskOrChannel), repr(hostmaskOrChannel)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='WHO',
args=(hostmaskOrChannel,) + args, msg=msg)
def _whois(COMMAND, nick, mask='', prefix='', msg=None):
"""Returns a WHOIS for nick."""
if conf.supybot.protocols.irc.strictRfc():
assert areNicks(nick), repr(nick)
if msg and not prefix:
prefix = msg.prefix
args = (nick,)
if mask:
args = (nick, mask)
return IrcMsg(prefix=prefix, command=COMMAND, args=args, msg=msg)
whois = functools.partial(_whois, 'WHOIS')
whowas = functools.partial(_whois, 'WHOWAS')
def names(channel=None, prefix='', msg=None):
if conf.supybot.protocols.irc.strictRfc():
assert areChannels(channel)
if msg and not prefix:
prefix = msg.prefix
if channel is not None:
return IrcMsg(prefix=prefix, command='NAMES', args=(channel,), msg=msg)
else:
return IrcMsg(prefix=prefix, command='NAMES', msg=msg)
def mode(channel, args=(), prefix='', msg=None):
if msg and not prefix:
prefix = msg.prefix
if isinstance(args, minisix.string_types):
args = (args,)
else:
args = tuple(map(str, args))
return IrcMsg(prefix=prefix, command='MODE', args=(channel,)+args, msg=msg)
def modes(channel, args=(), prefix='', msg=None):
"""Returns a MODE message for the channel for all the (mode, targetOrNone) 2-tuples in 'args'."""
if conf.supybot.protocols.irc.strictRfc():
assert isChannel(channel), repr(channel)
modes = args
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='MODE',
args=[channel] + ircutils.joinModes(modes), msg=msg)
def limit(channel, limit, prefix='', msg=None):
return mode(channel, ['+l', limit], prefix=prefix, msg=msg)
def unlimit(channel, limit, prefix='', msg=None):
return mode(channel, ['-l', limit], prefix=prefix, msg=msg)
def invite(nick, channel, prefix='', msg=None):
"""Returns an INVITE for nick."""
if conf.supybot.protocols.irc.strictRfc():
assert isNick(nick), repr(nick)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='INVITE',
args=(nick, channel), msg=msg)
def password(password, prefix='', msg=None):
"""Returns a PASS command for accessing a server."""
if conf.supybot.protocols.irc.strictRfc():
assert password, 'password must not be empty.'
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='PASS', args=(password,), msg=msg)
def ison(nick, prefix='', msg=None):
if conf.supybot.protocols.irc.strictRfc():
assert isNick(nick), repr(nick)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='ISON', args=(nick,), msg=msg)
def monitor(subcommand, nicks=None, prefix='', msg=None):
if conf.supybot.protocols.irc.strictRfc():
for nick in nicks:
assert isNick(nick), repr(nick)
assert subcommand in '+-CLS'
if subcommand in 'CLS':
assert nicks is None
if msg and not prefix:
prefix = msg.prefix
if not isinstance(nicks, str):
nicks = ','.join(nicks)
return IrcMsg(prefix=prefix, command='MONITOR', args=(subcommand, nicks),
msg=msg)
def error(s, msg=None):
return IrcMsg(command='ERROR', args=(s,), msg=msg)
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| 38.924945
| 101
| 0.588527
|
1284cce51c8fcb93142c64b7650b7c61c4f0e9cc
| 16,738
|
py
|
Python
|
repgen/data/value.py
|
DanielTOsborne/repgen5
|
a13e0005dc2a471bb9c112b53ab5e2e0d2596f72
|
[
"MIT"
] | null | null | null |
repgen/data/value.py
|
DanielTOsborne/repgen5
|
a13e0005dc2a471bb9c112b53ab5e2e0d2596f72
|
[
"MIT"
] | 1
|
2021-12-17T16:45:56.000Z
|
2022-02-02T20:40:57.000Z
|
repgen/data/value.py
|
DanielTOsborne/repgen5
|
a13e0005dc2a471bb9c112b53ab5e2e0d2596f72
|
[
"MIT"
] | 1
|
2021-03-31T21:38:55.000Z
|
2021-03-31T21:38:55.000Z
|
import pytz,datetime,sys
import operator
from inspect import isfunction
# types
string_types = ("".__class__,u"".__class__)
number_types = (int,float,complex)
class Value:
shared = {
"picture" : "NNZ",
"misstr" : "-M-",
"undef" : "-?-",
# shared and updated between calls
"host" : None, # ip address/hostname or file name
"port" : None,
"dbtype" : None, # file or spkjson
"tz" : pytz.utc,
"start": None,
"end": None,
"interval": None,
"value": None, # this value is only used for generating time series
}
def __init__( self, *args, **kwargs ):
self.index = None
self.type="SCALAR"
self.value = None
self.values = []
self.picture="%s"
# go through the keyword args,
# set them as static variables for the next call
# update the shared keywords
for key in kwargs:
value = kwargs[key]
if key.lower()=="tz" and isinstance(value, string_types):
value = pytz.timezone(value)
if (key.lower() == "start" or key.lower() == "end" or key.lower() == "time") and isinstance(value,(Value)):
value = value.value # internally we want the actual datetime
if key.lower() == "time":
Value.shared["start"] = value
Value.shared["end"] = value
Value.shared[key.lower()] = value
# load the keywords for this instance
for key in Value.shared:
self.__dict__[key] = Value.shared[key]
if len( args ) == 1:
self.value = args[0]
return
elif len(args)> 0: raise Exception ("Only 1 non named value is allowed")
self.type = "TIMESERIES"
self.values = [ ] # will be a touple of (time stamp, value, quality )
if self.dbtype is None:
raise Exception("you must enter a scalar quantity if you aren't specifying a data source")
elif self.dbtype.upper() == "FILE":
pass
elif self.dbtype.upper() == "COPY":
pass
elif self.dbtype.upper() == "GENTS":
current_t = self.start
end_t = self.end
while current_t <= end_t:
if isinstance(self.value, number_types):
self.values.append( ( current_t.astimezone(self.tz),self.value,0 ) )
elif isinstance(self.value, Value ):
self.value = self.value.value
self.values.append( ( current_t.astimezone(self.tz),self.value,0 ) )
elif isfunction(self.value):
self.values.append( ( current_t.astimezone(self.tz),self.value(),0 ) )
current_t = current_t + self.interval
elif self.dbtype.upper() == "SPKJSON":
try:
import json
except:
try:
import simplejson as json
except:
print >>sys.stderr, "To run this program you either need to update to a newer python, or install the simplejson module."
import http.client as httplib, urllib.parse as urllib
fmt = "%d-%b-%Y %H%M"
tz = self.tz
units= self.dbunits
ts_name = ".".join( (self.dbloc, self.dbpar, self.dbptyp, self.dbint, self.dbdur, self.dbver) )
sys.stderr.write("Getting %s from %s to %s in tz %s, with units %s\n" % (ts_name,self.start.strftime(fmt),self.end.strftime(fmt),str(tz),units))
query = "/fcgi-bin/get_ts.py?"
params = urllib.urlencode( {
"site": ts_name,
"units": units,
"start_time": self.start.strftime(fmt),
"end_time": self.end.strftime(fmt),
"tz": str(self.tz)
})
try:
conn = httplib.HTTPConnection( self.host + ":" + str(self.port))
conn.request("GET", query+params )
r1 = conn.getresponse()
data =r1.read()
data_dict = json.loads(data)
# get the depth
prev_t = 0
#print repr(data_dict)
for d in data_dict["data"]:
_t = float(d[0])/1000.0 # spkjson returns times in javascript time, milliseconds since epoch, convert to unix time of seconds since epoch
# this seems to work to have the data in the right time
# will need to keep an eye on it though
_dt = datetime.datetime.fromtimestamp(_t,pytz.utc)
#print _dt
#_dt = _dt.astimezone(self.tz)
_dt = _dt.replace(tzinfo=self.tz)
#print _dt
if d[1] is not None:
_v = float(d[1]) # does not currently implement text operations
else:
_v = None
_q = int(d[2])
self.values.append( ( _dt,_v,_q ) )
if self.start == self.end:
self.type = "SCALAR"
self.value = self.values[0][1]
except Exception as err:
print( repr(err) + " : " + str(err) )
elif self.dbtype.upper() == "DSS":
raise Exception("DSS retrieval is not currently implemented")
# math functions
def __add__( self, other ):
return self.domath(operator.add,other)
def __sub__( self, other ):
return self.domath( operator.sub, other )
def __mul__( self, other ):
return self.domath( operator.mul, other)
def __truediv__(self,other):
return self.domath( operator.div,other)
def domath(self,op,other):
typ = Value.shared["dbtype"]
tmp = Value(dbtype="copy")
Value.shared["dbtype"]=typ
print( "Doing Op %s on %s with other %s" % (repr(op),repr(self),repr(other) ) )
if isinstance( other, number_types ) and self.type=="TIMESERIES":
for v in self.values:
if (v is not None) and (other is not None):
tmp.values.append( (v[0],op(v[1], other),v[2]) )
else:
tmp.values.append( ( v[0], None, v[2] ) )
elif isinstance( other, (int,float,complex,datetime.timedelta) ) and self.type=="SCALAR":
if (self.value is not None) and (other is not None):
tmp.value = op(self.value,other)
else:
tmp.value = None
tmp.type="SCALAR"
elif isinstance( other, Value ):
if self.type == "SCALAR" and other.type == "SCALAR":
if (self.value is not None) and (other.value is not None):
tmp.value = op(self.value,other.value)
else:
tmp.value = None
tmp.type = "SCALAR"
elif self.type =="TIMESERIES" and other.type == "SCALAR":
for v in self.values:
if (v[1] is not None) and (other.value is not None):
tmp.values.append( (v[0], op(v[1],other.value), v[1] ) )
else:
tmp.values.append( (v[0], None, v[1] ) )
elif self.type =="SCALAR" and other.type == "TIMESERIES":
for v in other.values:
if (v[1] is not None) and (self.value is not None):
tmp.values.append( (v[0], op(v[1],self.value), v[1] ) )
else:
tmp.values.append( (v[0], None, v[1] ) )
elif self.type=="TIMESERIES" and other.type == "TIMESERIES":
# loop through both arrays
# for now just implement intersection
for v_left in self.values:
for v_right in other.values:
if v_left[0] == v_right[0]: # times match
if (v_left[1] is not None) and (v_right[1] is not None):
tmp.values.append( (v_left[0],op( v_left[1], v_right[1] ), v_left[2] ) )
else:
tmp.values.append( (v_left[0], None, v_left[2] ) )
else:
return NotImplemented
else:
return NotImplemented
return tmp
def __str__(self):
if self.type=="SCALAR":
return self.format(self.value)
else:
return "Unable to process at this time"
def __repr__(self):
return "<Value,type=%s,value=%s,len values=%d, picture=%s>" % (self.type,str(self.value),len(self.values),self.picture)
def format(self,value):
#print repr(value)
if isinstance(value, number_types):
return self.picture % value
elif isinstance(value, datetime.datetime) :
if "%K" in self.picture:
tmp = self.picture.replace("%K","%H")
tmpdt = value.replace(hour=value.hour)
if value.hour == 0 and value.minute==0:
tmp = tmp.replace("%H","24")
tmpdt = tmpdt - datetime.timedelta(hours=1) # get into the previous data
return tmpdt.strftime(tmp)
# special case, 2400 hours needs to be displayed
return value.strftime(self.picture)
# will need implementations of add, radd, mult, div, etc for use in math operations.
def pop(self):
if self.type == "SCALAR":
return self.format(self.value)
elif self.type == "TIMESERIES":
if self.index is None:
self.index = 0
self.index = self.index+1
try:
#print repr(self.values[self.index-1])
return self.format(self.values[self.index-1][1])
except Exception as err:
print(repr(err) + " : " + str(err), file=sys.stderr)
return self.misstr
def datatimes(self):
"""
Returns a new Value where the values are replaced by the datetimes
"""
typ = Value.shared["dbtype"]
tmp = Value(dbtype="copy")
Value.shared["dbtype"]=typ
for v in self.values:
tmp.values.append( (v[0],v[0],v[2]) )
return tmp
def qualities(self):
"""
Returns a new Value where the values are replace by the qualities
"""
typ = Value.shared["dbtype"]
tmp = Value(dbtype="copy")
Value.shared["dbtype"]=typ
for v in self.values:
tmp.values.append( (v[0],v[2],v[2]) )
return tmp
def set_time( self, **kwarg ):
if self.type == "SCALAR" and isinstance( self.value, datetime.datetime ):
self.value = self.value.replace( **kwarg )
else:
raise Exception("Not implemented for the requested change")
def last(self):
if self.type =="TIMESERIES":
typ = Value.shared["dbtype"]
tmp = Value(dbtype="copy")
Value.shared["dbtype"]=typ
tmp.value = None
tmp.type ="SCALAR"
try:
tmp.value = self.values[ len(self.values)-1 ] [1]
except Exception as err:
print >>sys.stderr, "Issue with getting last value -> %s : %s" % (repr(err),str(err))
return tmp
else:
raise Exception("operation only valid on a time series")
def __getitem__( self, arg ):
dt = arg
if isinstance(arg,Value):
dt = arg.value
if self.type == "TIMESERIES":
typ = Value.shared["dbtype"]
tmp = Value(dbtype="copy")
Value.shared["dbtype"]=typ
tmp.value = None
tmp.type ="SCALAR"
haveval =False
for v in self.values:
if v[0] == dt:
tmp.value = v[1]
haveval = True
break
if haveval == True:
return tmp
else:
raise KeyError("no value at %s" % str(dt) )
else:
raise Exception("date index only valid on a timeseries")
"""
The following are static methods as they can be applied to multiple time series/values at a time
all functions should process a keyword treat which determines how the function will reponsd
to missing values
valid values for treat will be the following:
a number - if a value is missing (None) use this number in its place
a touple of numbers - if a value is missing, substitude in order of the arguments these replacement values
"IGNORE" - operate as if that row/value wasn't there
"MISS" - if any given value is missing, the result is missing (This is the default)
Generally args should be either a list of SCALAR values (actual number types are okay)
or a single time series.
"""
@staticmethod
def apply( function, *args, **kwargs ):
"""
apply an arbitrary user function to the provided data.
the inputs to the function must the same number of and in the same order as passed into args
the function is called using function(*args)
the function must return a number or None
the function can be any callable object: function,lambda, class with a __call__ method, etc
"""
returns = 1 #
try:
returns = int(kwargs["returns"])
except:
pass
values = []
typ = Value.shared["dbtype"]
for i in range(0,returns):
tmp = Value(dbtype="copy")
tmp.values = []
tmp.value = None
values.append( tmp )
Value.shared["dbtype"]=typ
times = Value.gettimes(*args,**kwargs)
if len(times)==0:
tmp.type = "SCALAR"
# get the numbers of everything
#tmp.value = function( *args )
ret = function( *args )
if isinstance( ret, (list,tuple) ):
for i in range(len(ret)):
values[i].value = ret[i]
else:
values[0].value = ret
elif len(times) > 0:
for t in times:
vars = []
for arg in args:
if isinstance( arg, (int,float,complex) ):
vars.append(arg)
elif isinstance( arg, Value ) and arg.type=="SCALAR":
vars.append( arg.value) # need to handle missing value (.value is None)
elif isinstance( arg, Value ) and arg.type=="TIMESERIES":
try:
v = arg[t].value
vars.append(v)
except KeyError as err:
vars.append(None) # here we handle the missing value logic
res = None
try:
res = function( *vars )
except Exception as err:
print >> sys.stderr, "Failed to compute a values %s : %s" % (repr(err),repr(str))
if isinstance( res, (list,tuple)):
for i in range(len(res)):
values[i].values.append( (t,res[i],0) )
else:
values[0].values.append( ( t,res,0) )
return values
@staticmethod
def sum( *args, **kwarg ):
"""
this is an exception to the one timeseries rule.
we assume the user wants whatever values passed summed up into
one value
"""
tmp = Value.mktmp()
tmp.value = 0
tmp.type="SCALAR"
treat=Value.gettreat(**kwarg)
for arg in args:
if isinstance( arg, number_types ):
tmp.value += arg
if arg.type =="SCALAR":
if arg.value is not None:
tmp.value += arg.value
else:
if isinstance( treat, number_types):
tmp.value += treat
elif treat=="MISS":
tmp.value = None
return tmp
# else, move to the next value
elif arg.type == "TIMESERIES":
for row in arg.values:
v = row[1]
if v is not None:
tmp.value += v
else:
if isinstance( treat, number_types):
tmp.value += treat
elif treat=="MISS":
tmp.value = None
return tmp
return tmp
@staticmethod
def average( *args, **kwarg ):
tmp = Value.mktmp()
tmp.value = 0
tmp.type="SCALAR"
treat = Value.gettreat(**kwarg)
numvals = 0
if len(args) > 1:
for arg in args:
if arg.type=="TIMESERIES":
raise Exception("Time series not allowed with mulitple values")
if len(args) == 1:
if args[0].type == "SCALAR":
tmp.value = args[0].value
else: # time series
for row in args[0].values:
v = row[1]
if v is not None:
tmp.value += v
numvals += 1
elif treat == "MISS":
tmp.value = None
return tmp
tmp.value = tmp.value/float(numvals)
else:
for arg in args:
if isinstance( arg, number_types ):
tmp.value += arg
numvals += 1
else:
if arg.value is not None:
tmp.value += arg.value
numvals += 1
elif treat=="MISS":
tmp.value = None
return tmp
tmp.value = tmp.value/float(numvals)
return tmp
@staticmethod
def count(*args ):
"""
This function ignores the only 1 timeseries rule and just counts the number of non-missing
values in all the variables passed in.
It also doesn't take any keywords
"""
tmp = Value.mktmp()
tmp.value = 0
tmp.type = "SCALAR"
for arg in args:
if isinstance(arg, number_types):
tmp.value+=1
elif isinstance(arg, Value) and arg.type =="SCALAR" and arg.value is not None:
tmp.value+=1
elif isinstance(arg, Value) and arg.type =="TIMESERIES":
for row in arg.values:
if row[1] is not None:
tmp.value+=1
return tmp
@staticmethod
def accum(arg,**kwarg ):
"""
This function requires a single time series and nothing else
treat
number = use the number
ignore = current value is missing, but otherwise keep accumulating
miss = stop accumulating after the first missing input
"""
tmp = Value.mktmp()
tmp.type="TIMESERIES"
tmp.values = []
treat = Value.gettreat(**kwarg)
accum = 0
previous = 0
for row in arg.values:
dt,v,q = row
cur = None
if v is not None and not ((previous is None) and (treat=="MISS")) :
accum += v
cur = accum
elif v is None and ((previous is None) and (treat=="MISS")):
cur = None
elif isinstance(treat, number_types):
accum += v
else:
cur = None
previous=v
tmp.values.append( (dt,cur,q) )
return tmp
@staticmethod
def gettimes( *args,**kwargs ):
# build a new last that has the interestion or union (user specified, just implement intersection for now
# scalar values will just get copied in time, we do need to maintain the order of the input args.
timesets = []
times = []
for arg in args:
if isinstance( arg, Value) and arg.type == "TIMESERIES":
timesets.append( set( [x[0] for x in arg.values ]) )
if len(timesets) > 0:
if len(timesets)==1:
times = list(timesets[0])
else:
times =list( timesets[0].intersection( *timesets[1:] ) ) # here we should check for intersection or union
times.sort() # make sure everything is in time ascending order
return times
@staticmethod
def mktmp():
typ = Value.shared["dbtype"]
tmp = Value(dbtype="copy")
Value.shared["dbtype"]=typ
return tmp
@staticmethod
def gettreat(**kwarg):
treat = "MISS"
for key in kwarg:
if key.lower() == "treat":
treat= kwarg[key]
if isinstance(treat, string_types):
treat=treat.upper()
return treat
| 29.059028
| 147
| 0.634962
|
c41d9633d90f8254b48c7ff0a74b20ddd4339bce
| 7,302
|
py
|
Python
|
sentence_fluency/train.py
|
gdyp/bert-awesome
|
f0836aadaa23e95b82449570a39768bfe24bdadd
|
[
"Apache-2.0"
] | null | null | null |
sentence_fluency/train.py
|
gdyp/bert-awesome
|
f0836aadaa23e95b82449570a39768bfe24bdadd
|
[
"Apache-2.0"
] | null | null | null |
sentence_fluency/train.py
|
gdyp/bert-awesome
|
f0836aadaa23e95b82449570a39768bfe24bdadd
|
[
"Apache-2.0"
] | null | null | null |
#! -*- coding: utf-8 -*-
import os
import logging
from tqdm import tqdm
import random
import numpy as np
import argparse
import torch
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset, RandomSampler, DistributedSampler
from pytorch_pretrained_bert.modeling import BertForSequenceClassification
from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear
from utils.args import Args
from utils.utils import get_accuracy
from sentence_fluency.processing import Processing, convert_example_to_features
SOURCE = '/home/gump/Software/pycharm-2018.1.6/projects/bert-for-classificaion/sentence_fluency/data/'
args_dict = Args.args
args_dict['data_dir'] = SOURCE
args_dict['train_batch_size'] = 64
args_dict['output_dir'] = SOURCE + 'model/'
args_dict['learning_rate'] = 5e-5
args_dict['num_train_epochs'] = 7
parser = argparse.ArgumentParser()
for key, value in args_dict.items():
parser.add_argument('-'+key, default=value)
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s-%(levelname)s-%(name)s-%(message)s',
datefmt='%m/%d/%Y %H:%M:S',
level=logging.INFO)
logger = logging.getLogger(__name__)
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
processor = Processing(data_dir=args.data_dir)
# load data
logging.info('create train features')
num_train_optimization_steps = None
train_examples = processor.get_train_examples()
train_features = convert_example_to_features(train_examples)
# train_features_data = features_translation(train_features)
all_input_ids = torch.LongTensor([f.input_ids for f in train_features])
all_input_mask = torch.LongTensor([f.input_mask for f in train_features])
all_segment_ids = torch.LongTensor([f.segment_ids for f in train_features])
all_label_ids = torch.LongTensor([f.label_id for f in train_features])
# num_train_optimization_steps = int(len(train_features)/args.train_batch_size/args.gradient_accumulation_steps)*args.epochs
if args.do_train:
num_train_optimization_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
logging.info('create batch data')
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
val_examples = processor.get_dev_examples()
val_features = convert_example_to_features(val_examples)
val_input_ids = torch.LongTensor([f.input_ids for f in train_features])
val_input_mask = torch.LongTensor([f.input_mask for f in train_features])
val_segment_ids = torch.LongTensor([f.segment_ids for f in train_features])
val_label_ids = torch.LongTensor([f.label_id for f in train_features])
val_data = TensorDataset(val_input_ids, val_input_mask, val_segment_ids, val_label_ids)
val_dataloader = DataLoader(val_data, shuffle=True, batch_size=args.eval_batch_size)
# load model
logging.info('create model')
model = BertForSequenceClassification.from_pretrained(args.bert_model, cache_dir='data/cache', num_labels=2)
if args.fp16:
model.half()
if torch.cuda.is_available():
model.cuda()
# optimizer
parameters = list(model.named_parameters())
# parameters = [n for n in parameters if 'pooler' not in n[0]]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in parameters if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in parameters if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError('please install apex')
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=getattr(args, 'lr'),
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
loss_fct = CrossEntropyLoss()
# train
global_step = 0
last_val_loss = 100
epochs = getattr(args, 'num_train_epochs')
for i in range(1, epochs + 1):
training_loss = 0
model.train()
for step, batch in enumerate(tqdm(train_dataloader, desc='train', total=len(train_dataloader))):
if torch.cuda.is_available():
batch = tuple(item.cuda() for item in batch)
input_ids, input_mask, segment_ids, label_ids = batch
logits = model(input_ids, segment_ids, input_mask)
loss = loss_fct(logits.view(-1, 2), label_ids.view(-1))
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
training_loss += loss.item()
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
lr_this_step = args.lr * warmup_linear(global_step / num_train_optimization_steps,
args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
validation_loss = 0
correct_count = 0
model.eval()
for batch in val_dataloader:
if torch.cuda.is_available():
batch = (item.cuda() for item in batch)
input_ids, input_mask, segment_ids, label_ids = batch
with torch.no_grad():
val_logits = model(input_ids, segment_ids, input_mask)
val_loss = loss_fct(val_logits.view(-1, 2), label_ids.view(-1))
correct_count = get_accuracy(val_logits.view(-1, 2), label_ids.view(-1), correct_count)
validation_loss += val_loss.item()
training_loss = training_loss / len(train_dataloader)
validation_loss = validation_loss / len(val_data)
accuracy = correct_count / len(val_data)
logging.info('{}/{}, train loss: {}, validation loss: {}, val_accuracy: {}'.format(
i, epochs, training_loss, validation_loss, accuracy))
if validation_loss < last_val_loss:
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = os.path.join(args.output_dir, 'model_' + str(i) + '.bin')
torch.save(model_to_save.state_dict(), output_model_file)
output_config_file = os.path.join(args.output_dir, 'config_' + str(i) + '.json')
with open(output_config_file, 'w') as f:
f.write(model_to_save.config.to_json_string())
last_val_loss = validation_loss
| 39.684783
| 124
| 0.712271
|
a67296037cfced5a16dfe15a91a35b053913c317
| 2,270
|
py
|
Python
|
aligner/check-alignments.py
|
anoopsarkar/nlp-class-hw-archive
|
04c92930ae0be9c5cdf3c41234604af97335e943
|
[
"MIT"
] | null | null | null |
aligner/check-alignments.py
|
anoopsarkar/nlp-class-hw-archive
|
04c92930ae0be9c5cdf3c41234604af97335e943
|
[
"MIT"
] | null | null | null |
aligner/check-alignments.py
|
anoopsarkar/nlp-class-hw-archive
|
04c92930ae0be9c5cdf3c41234604af97335e943
|
[
"MIT"
] | 1
|
2021-01-27T01:20:00.000Z
|
2021-01-27T01:20:00.000Z
|
#!/usr/bin/env python
import optparse, sys, os, logging
optparser = optparse.OptionParser()
optparser.add_option("-d", "--datadir", dest="datadir", default="data", help="data directory (default=data)")
optparser.add_option("-p", "--prefix", dest="fileprefix", default="hansards", help="prefix of parallel data files (default=hansards)")
optparser.add_option("-e", "--english", dest="english", default="en", help="suffix of English (target language) filename (default=en)")
optparser.add_option("-f", "--french", dest="french", default="fr", help="suffix of French (source language) filename (default=fr)")
optparser.add_option("-l", "--logfile", dest="logfile", default=None, help="filename for logging output (default=None)")
optparser.add_option("-i", "--inputfile", dest="inputfile", default=None, help="input alignments file (default=sys.stdin)")
(opts, args) = optparser.parse_args()
f_data = open("%s.%s" % (os.path.join(opts.datadir, opts.fileprefix), opts.french), 'r')
e_data = open("%s.%s" % (os.path.join(opts.datadir, opts.fileprefix), opts.english), 'r')
if opts.logfile:
logging.basicConfig(filename=opts.logfile, filemode='w', level=logging.INFO)
inp = sys.stdin if opts.inputfile is None else open(opts.inputfile, 'r')
for (n, (f, e, a)) in enumerate(zip(f_data, e_data, inp)):
size_f = len(f.strip().split())
size_e = len(e.strip().split())
try:
alignment = set([tuple(map(int, x.split("-"))) for x in a.strip().split()])
for (i,j) in alignment:
if (i>=size_f or j>size_e):
logging.warning("WARNING (%s): Sentence %d, point (%d,%d) is not a valid link\n" % (sys.argv[0],n,i,j))
pass
except (Exception):
logging.error("ERROR (%s) line %d is not formatted correctly:\n %s" % (sys.argv[0],n,a))
logging.error("Lines can contain only tokens \"i-j\", where i and j are integer indexes into the French and English sentences, respectively.\n")
sys.exit(1)
sys.stdout.write(a)
warned = False
for a in inp:
if not warned:
logging.warning("WARNING (%s): alignment file is longer than bitext\n" % sys.argv[0])
warned = True
sys.stdout.write(a)
try:
if next(f_data):
logging.warning("WARNING (%s): bitext is longer than alignment\n" % sys.argv[0])
except StopIteration:
pass
| 47.291667
| 148
| 0.677533
|
a0aa6adb70668b43243a5a98551e3de271c5b451
| 12,087
|
py
|
Python
|
flexget/plugins/input/from_imdb.py
|
fotile96/Flexget
|
0f7f805a43e25e27fce195b91228f911bf4c6b1e
|
[
"MIT"
] | 1
|
2021-03-24T11:54:01.000Z
|
2021-03-24T11:54:01.000Z
|
flexget/plugins/input/from_imdb.py
|
fotile96/Flexget
|
0f7f805a43e25e27fce195b91228f911bf4c6b1e
|
[
"MIT"
] | null | null | null |
flexget/plugins/input/from_imdb.py
|
fotile96/Flexget
|
0f7f805a43e25e27fce195b91228f911bf4c6b1e
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from past.builtins import basestring
import collections
import logging
from jsonschema.compat import str_types
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.event import event
from flexget.entry import Entry
from flexget.utils.cached_input import cached
log = logging.getLogger('from_imdb')
class FromIMDB(object):
"""
This plugin enables generating entries based on an entity, an entity being a person, character or company.
It's based on IMDBpy which is required (pip install imdbpy). The basic config required just an IMDB ID of the
required entity.
For example:
from_imdb: ch0001354
Schema description:
Other than ID, all other properties are meant to filter the full list that the entity generates.
id: string that relates to a supported entity type. For example: 'nm0000375'. Required.
job_types: a string or list with job types from job_types. Default is 'actor'.
content_types: A string or list with content types from content_types. Default is 'movie'.
max_entries: The maximum number of entries that can return. This value's purpose is basically flood protection
against unruly configurations that will return too many results. Default is 200.
Advanced config example:
dynamic_movie_queue:
from_imdb:
id: co0051941
job_types:
- actor
- director
content_types: tv series
accept_all: yes
movie_queue: add
"""
job_types = ['actor', 'actress', 'director', 'producer', 'writer', 'self', 'editor', 'miscellaneous',
'editorial department', 'cinematographer', 'visual effects', 'thanks', 'music department',
'in development', 'archive footage', 'soundtrack']
content_types = ['movie', 'tv series', 'tv mini series', 'video game', 'video movie', 'tv movie', 'episode']
content_type_conversion = {
'movie': 'movie',
'tv series': 'tv',
'tv mini series': 'tv',
'tv movie': 'tv',
'episode': 'tv',
'video movie': 'video',
'video game': 'video game'
}
character_content_type_conversion = {
'movie': 'feature',
'tv series': 'tv',
'tv mini series': 'tv',
'tv movie': 'tv',
'episode': 'tv',
'video movie': 'video',
'video game': 'video-game',
}
jobs_without_content_type = ['actor', 'actress', 'self', 'in development', 'archive footage']
imdb_pattern = one_or_more({'type': 'string',
'pattern': r'(nm|co|ch)\d{7}',
'error_pattern': 'Get the id from the url of the person/company you want to use,'
' e.g. http://imdb.com/text/<id here>/blah'}, unique_items=True)
schema = {
'oneOf': [
imdb_pattern,
{'type': 'object',
'properties': {
'id': imdb_pattern,
'job_types': one_or_more({'type': 'string', 'enum': job_types}, unique_items=True),
'content_types': one_or_more({'type': 'string', 'enum': content_types}, unique_items=True),
'max_entries': {'type': 'integer'},
'match_type': {'type': 'string', 'enum': ['strict', 'loose']}
},
'required': ['id'],
'additionalProperties': False
}
],
}
def prepare_config(self, config):
"""
Converts config to dict form and sets defaults if needed
"""
config = config
if isinstance(config, basestring):
config = {'id': [config]}
elif isinstance(config, list):
config = {'id': config}
if isinstance(config, dict) and not isinstance(config['id'], list):
config['id'] = [config['id']]
config.setdefault('content_types', [self.content_types[0]])
config.setdefault('job_types', [self.job_types[0]])
config.setdefault('max_entries', 200)
config.setdefault('match_type', 'strict')
if isinstance(config.get('content_types'), str_types):
log.debug('Converted content type from string to list.')
config['content_types'] = [config['content_types']]
if isinstance(config['job_types'], str_types):
log.debug('Converted job type from string to list.')
config['job_types'] = [config['job_types']]
# Special case in case user meant to add actress instead of actor (different job types in IMDB)
if 'actor' in config['job_types'] and 'actress' not in config['job_types']:
config['job_types'].append('actress')
return config
def get_items(self, config):
items = []
for id in config['id']:
try:
entity_type, entity_object = self.get_entity_type_and_object(id)
except Exception as e:
log.error(
'Could not resolve entity via ID: {}. '
'Either error in config or unsupported entity. Error:{}'.format(id, e))
continue
items += self.get_items_by_entity(entity_type, entity_object, config.get('content_types'),
config.get('job_types'), config.get('match_type'))
return set(items)
def get_entity_type_and_object(self, imdb_id):
"""
Return a tuple of entity type and entity object
:param imdb_id: string which contains IMDB id
:return: entity type, entity object (person, company, etc.)
"""
if imdb_id.startswith('nm'):
person = self.ia.get_person(imdb_id[2:])
log.info('Starting to retrieve items for person: %s' % person)
return 'Person', person
elif imdb_id.startswith('co'):
company = self.ia.get_company(imdb_id[2:])
log.info('Starting to retrieve items for company: %s' % company)
return 'Company', company
elif imdb_id.startswith('ch'):
character = self.ia.get_character(imdb_id[2:])
log.info('Starting to retrieve items for Character: %s' % character)
return 'Character', character
def get_items_by_entity(self, entity_type, entity_object, content_types, job_types, match_type):
"""
Gets entity object and return movie list using relevant method
"""
if entity_type == 'Company':
return self.items_by_company(entity_object)
if entity_type == 'Character':
return self.items_by_character(entity_object, content_types, match_type)
elif entity_type == 'Person':
return self.items_by_person(entity_object, job_types, content_types, match_type)
def flatten_list(self, _list):
"""
Gets a list of lists and returns a flat list
"""
for el in _list:
if isinstance(el, collections.Iterable) and not isinstance(el, basestring):
for sub in self.flatten_list(el):
yield sub
else:
yield el
def flat_list(self, non_flat_list, remove_none=False):
flat_list = self.flatten_list(non_flat_list)
if remove_none:
flat_list = [_f for _f in flat_list if _f]
return flat_list
def filtered_items(self, unfiltered_items, content_types, match_type):
items = []
unfiltered_items = set(unfiltered_items)
for item in sorted(unfiltered_items):
if match_type == 'strict':
log.debug('Match type is strict, verifying item type to requested content types')
self.ia.update(item)
if item['kind'] in content_types:
log.verbose('Adding item "{}" to list. Item kind is "{}"'.format(item, item['kind']))
items.append(item)
else:
log.verbose('Rejecting item "{}". Item kind is "{}'.format(item, item['kind']))
else:
log.debug('Match type is loose, all items are being added')
items.append(item)
return items
def items_by_person(self, person, job_types, content_types, match_type):
"""
Return item list for a person object
"""
unfiltered_items = self.flat_list(
[self.items_by_job_type(person, job_type, content_types) for job_type in job_types],
remove_none=True)
return self.filtered_items(unfiltered_items, content_types, match_type)
def items_by_content_type(self, person, job_type, content_type):
return [_f for _f in (person.get(job_type + ' ' + self.content_type_conversion[content_type], [])) if _f]
def items_by_job_type(self, person, job_type, content_types):
items = person.get(job_type, []) if job_type in self.jobs_without_content_type else [
person.get(job_type + ' ' + 'documentary', []) and
person.get(job_type + ' ' + 'short', []) and
self.items_by_content_type(person, job_type, content_type)
if content_type == 'movie'
else
self.items_by_content_type(person, job_type, content_type)
for content_type in content_types
]
return [_f for _f in items if _f]
def items_by_character(self, character, content_types, match_type):
"""
Return items list for a character object
:param character: character object
:param content_types: content types as defined in config
:return:
"""
unfiltered_items = self.flat_list(
[character.get(self.character_content_type_conversion[content_type])
for content_type in content_types], remove_none=True)
return self.filtered_items(unfiltered_items, content_types, match_type)
def items_by_company(self, company):
"""
Return items list for a company object
:param company: company object
:return: company items list
"""
return company.get('production companies')
@cached('from_imdb', persist='2 hours')
def on_task_input(self, task, config):
try:
from imdb import IMDb
self.ia = IMDb()
except ImportError:
log.error('IMDBPY is required for this plugin. Please install using "pip install imdbpy"')
return
entries = []
config = self.prepare_config(config)
items = self.get_items(config)
if not items:
log.error('Could not get IMDB item list, check your configuration.')
return
for item in items:
entry = Entry(title=item['title'],
imdb_id='tt' + self.ia.get_imdbID(item),
url='',
imdb_url=self.ia.get_imdbURL(item))
if entry.isvalid():
if entry not in entries:
entries.append(entry)
if entry and task.options.test:
log.info("Test mode. Entry includes:")
for key, value in list(entry.items()):
log.info(' {}: {}'.format(key.capitalize(), value))
else:
log.error('Invalid entry created? %s' % entry)
if len(entries) <= config.get('max_entries'):
return entries
else:
log.warning(
'Number of entries (%s) exceeds maximum allowed value %s. '
'Edit your filters or raise the maximum value by entering a higher "max_entries"' % (
len(entries), config.get('max_entries')))
return
@event('plugin.register')
def register_plugin():
plugin.register(FromIMDB, 'from_imdb', api_ver=2)
| 40.29
| 114
| 0.590717
|
2108522aa15900f28d1b7455bfd24ddc4545056f
| 66
|
py
|
Python
|
info/examples/example/source/hello.py
|
ADicksonLab/openmm_systems
|
ae6e0acb0d55f93de8b68f48b43d3df40311a2e3
|
[
"MIT"
] | 2
|
2020-04-30T19:58:50.000Z
|
2021-06-30T05:39:02.000Z
|
info/examples/example/source/hello.py
|
ADicksonLab/openmm_systems
|
ae6e0acb0d55f93de8b68f48b43d3df40311a2e3
|
[
"MIT"
] | null | null | null |
info/examples/example/source/hello.py
|
ADicksonLab/openmm_systems
|
ae6e0acb0d55f93de8b68f48b43d3df40311a2e3
|
[
"MIT"
] | null | null | null |
with open("../input/hello.txt", 'r') as rf:
print(rf.read())
| 16.5
| 43
| 0.560606
|
76d9585dfcd35e0034c29a5144bc566819e09611
| 4,372
|
py
|
Python
|
oaoo_oa_meeting_manage/models/meeting_summary.py
|
niulinlnc/odooOaModels
|
7204b44d734b221f89f0a6d1eccaeb1b79474d3b
|
[
"Apache-2.0"
] | 8
|
2020-03-23T06:58:56.000Z
|
2021-09-13T02:19:13.000Z
|
oaoo_oa_meeting_manage/models/meeting_summary.py
|
ty1539/odooOaModels
|
4afc2f7dc36a8d3bcb02a20ec75e67af9af2a154
|
[
"Apache-2.0"
] | null | null | null |
oaoo_oa_meeting_manage/models/meeting_summary.py
|
ty1539/odooOaModels
|
4afc2f7dc36a8d3bcb02a20ec75e67af9af2a154
|
[
"Apache-2.0"
] | 13
|
2019-10-13T08:46:59.000Z
|
2022-02-24T02:47:45.000Z
|
# -*- coding: utf-8 -*-
###################################################################################
# Copyright (C) 2019 SuXueFeng
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###################################################################################
import logging
from odoo import models, fields, api, exceptions
from odoo import SUPERUSER_ID
from odoo.exceptions import UserError
import uuid
logger = logging.getLogger(__name__)
class MeetingSummary(models.Model):
_name = 'oa.meeting.meeting.summary'
_inherit = ['mail.thread', 'ir.needaction_mixin']
_rec_name = 'meeting_title'
_description = u'会议纪要'
meeting_title = fields.Many2one('oa.meeting.meeting.application', string=u'所属会议', required=True)
# model_name = fields.Char(u'模型', store=1, index=1)
# res_id = fields.Integer(u'记录ID', store=1)
user_id = fields.Many2one('res.users', string=u'记录人', readonly=True, default=lambda self: self.env.user)
company_id = fields.Many2one('res.company', string=u'所属公司', default=lambda self: self.env.user.company_id)
release_time = fields.Datetime(string=u'发布时间', placeholder=u"当前系统时间")
meeting_summary = fields.Html(string=u'纪要内容')
summary_file = fields.Many2many('ir.attachment', string=u'上传附件')
meeting_members = fields.Many2many('res.users', string=u'抄送至')
check_state = fields.Boolean(default=False)
active = fields.Boolean('Active', default=True)
state = fields.Selection([('init', u'暂存'), ('release', u'已发布')], string=u'发布状态', readonly=True)
uuid = fields.Char('UUID', default=lambda s: uuid.uuid4(), copy=False, required=True)
super_id = fields.Integer(default=SUPERUSER_ID)
_sql_constraints = [
('uuid_uniq', 'unique (uuid)', u"UUID存在重复列,请重试"),
]
@api.multi
def unlink(self):
for line in self:
if line.state == 'release':
raise UserError(u'您不能删除已经发布的纪要文件')
else:
sql = """
UPDATE oa_meeting_meeting_application SET copy_state = 'unpublished' WHERE id = %s
""" % line.meeting_title.id
line.env.cr.execute(sql)
return super(MeetingSummary, self).unlink()
@api.multi
def change_state(self):
self.write({'state': 'release'})
self.write({'check_state': True})
self.send_summary()
@api.onchange('meeting_title')
def _onchange_meeting_title(self):
for line in self:
if line.meeting_title:
line.meeting_members = line.meeting_title.employee_ids
def send_summary(self):
mail_meeting_message = self.env.ref('oa_meeting.mail_meeting_message')
model_name = self.env['ir.model'].search([('model', '=', self._name)]).name
self.env['mail.message'].sudo().create({
'subject': self._name,
'model': self._name,
'res_id': self.id,
'record_name': model_name,
'body': u'<p>给您抄送了一份会议纪要</p>',
'partner_ids': [(6, 0, [user.partner_id.id for user in self.meeting_members])],
'channel_ids': [(6, 0, [mail_meeting_message.id])],
'message_type': 'notification',
'author_id': self.env.user.partner_id.id
})
@api.model
def create(self, vals):
vals['state'] = 'init'
result = super(MeetingSummary, self).create(vals)
return result
@api.constrains('meeting_title')
def constraint_member_ids(self):
for line in self: # 改变该次会议的标识,使其不可再创建会议纪要
if not line.meeting_title.approval_state:
line.meeting_title.copy_state = 'published'
else:
sql = """
UPDATE oa_meeting_meeting_application SET copy_state = 'published' WHERE id = %s
""" % line.meeting_title.id
line.env.cr.execute(sql)
| 41.245283
| 110
| 0.618253
|
3146d8b623ce45d9a412f257b34ef2659f902545
| 922
|
py
|
Python
|
isi_sdk_8_2_1/test/test_auth_log_level_level.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_2_1/test/test_auth_log_level_level.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_2_1/test/test_auth_log_level_level.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 8
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_1
from isi_sdk_8_2_1.models.auth_log_level_level import AuthLogLevelLevel # noqa: E501
from isi_sdk_8_2_1.rest import ApiException
class TestAuthLogLevelLevel(unittest.TestCase):
"""AuthLogLevelLevel unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAuthLogLevelLevel(self):
"""Test AuthLogLevelLevel"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_1.models.auth_log_level_level.AuthLogLevelLevel() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.487805
| 93
| 0.711497
|
a3ac0cb3ef6078714c488a1db9e03657289507bc
| 3,254
|
py
|
Python
|
avscript/avs/regex.py
|
kenlowrie/avscript
|
7e3550adc46a01785ea89cdcd9ddaa5ac35bdbdd
|
[
"Apache-2.0"
] | 1
|
2021-09-30T19:29:30.000Z
|
2021-09-30T19:29:30.000Z
|
avscript/avs/regex.py
|
kenlowrie/avscript
|
7e3550adc46a01785ea89cdcd9ddaa5ac35bdbdd
|
[
"Apache-2.0"
] | 7
|
2018-07-16T22:52:55.000Z
|
2020-05-20T23:48:36.000Z
|
avscript/avs/regex.py
|
kenlowrie/avscript
|
7e3550adc46a01785ea89cdcd9ddaa5ac35bdbdd
|
[
"Apache-2.0"
] | 1
|
2018-05-24T22:58:44.000Z
|
2018-05-24T22:58:44.000Z
|
#!/usr/bin/env python
from re import compile, match, error
class Regex(object):
"""Wrapper class for regular expressions."""
def __init__(self, regex, flags=0):
"""Regex class constructor.
Compiles the regex string with any required flags for efficiency.
"""
self.regex = compile(regex, flags)
def is_match(self, str):
return match(self.regex, str)
class RegexSafe(Regex):
def __init__(self, regex, flags=0):
try:
super(RegexSafe, self).__init__(regex, flags)
self.is_valid = True
except error:
self.is_valid = False
def is_match(self, str):
if self.is_valid:
return super(RegexSafe, self).is_match(str)
return None
class RegexMD(Regex):
"""This class holds the regular expressions used when applying markdown
to inline formatting syntax."""
def __init__(self, regex, new_repl_str, flags=0):
"""Constructor for the RegexMD class.
Arguments:
regex -- the regex string used to detect markdown in the line
new_repl_str -- the string that will be used to insert the markdown into
the line. If this is None, then the handler for the regex markdown type
is responsible for constructing the replacement text.
flags -- flags to re.compile()
"""
super(RegexMD, self).__init__(regex, flags)
self.new_str = new_repl_str
class RegexMain(object):
"""This class holds the regular expressions used for the main parsing loop."""
def __init__(self, starts_new_div, uses_raw_line, allows_class_prefix, test_str, match_str, test_flags=0, match_flags=0):
"""Constructor for the RegexMain class.
Arguments:
starts_new_div -- signals whether this regex will stop the peekplaintext() from processing new lines
uses_raw_line -- signals whether this regex should be applied to the raw line or the marked_down line
allows_class_prefix -- signals whether this regex can be prefixed with a class override
test_str -- this is the regex string used to detect if the line is a match
match_str -- this is the regex string used when parsing the line into groups. If None, uses test_str
test_flags -- re flags to use when compiling test_str
match_flags -- re flags to use when compiling match_str
"""
self.test_str = Regex(test_str, test_flags)
self.match_str = None if not match_str else Regex(match_str, match_flags)
self.starts_new_div = starts_new_div
self.uses_raw_line = uses_raw_line
self.allows_class_prefix = allows_class_prefix
def test_regex(self):
"""Return the regex used to test if the current line matches a parse type."""
return self.test_str.regex
def match_regex(self):
"""Return the regex used to match a parse type and breakdown the elements.
If their is no match regex defined, this method returns the regex for
testing if the line matches a specific parse type."""
return self.match_str.regex if self.match_str else self.test_str.regex
if __name__ == '__main__':
print("Library module. Not directly callable.")
| 38.738095
| 125
| 0.675169
|
47da826bee5db616df50cf1aff4ad073337cc5a5
| 777
|
py
|
Python
|
xpipe/template/template.py
|
Scotchy/XPipe
|
723317ab7029725cc58c4fa74c41cf6b3e3151de
|
[
"MIT"
] | null | null | null |
xpipe/template/template.py
|
Scotchy/XPipe
|
723317ab7029725cc58c4fa74c41cf6b3e3151de
|
[
"MIT"
] | 8
|
2021-11-26T20:28:30.000Z
|
2022-03-26T23:50:45.000Z
|
xpipe/template/template.py
|
Scotchy/XPipe
|
723317ab7029725cc58c4fa74c41cf6b3e3151de
|
[
"MIT"
] | null | null | null |
import yaml
from .utils import is_param
from .parameter import Parameters
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
__all__ = ["Template"]
class Template():
def __init__(self, yaml_dict):
self.yaml_dict = yaml_dict
for key, sub_template in self.yaml_dict.items():
if is_param(sub_template):
setattr(self, key, Parameters(key, sub_template))
else:
setattr(self, key, Template(sub_template))
pass
@staticmethod
def from_yaml(template_file : str):
with open(template_file, "r") as stream:
yaml_template = yaml.load(stream, Loader=Loader)
return Template(yaml_template)
| 26.793103
| 65
| 0.647362
|
2881070b76215da3f75f4057eb5c8d22274999df
| 923
|
py
|
Python
|
oop/Enum.py
|
Alpha-chen/PythonStudy
|
f067b29c9512f2fb074010f7e77778790e748474
|
[
"Apache-2.0"
] | 4
|
2019-01-06T09:45:11.000Z
|
2021-06-29T10:07:54.000Z
|
oop/Enum.py
|
Alpha-chen/PythonStudy
|
f067b29c9512f2fb074010f7e77778790e748474
|
[
"Apache-2.0"
] | null | null | null |
oop/Enum.py
|
Alpha-chen/PythonStudy
|
f067b29c9512f2fb074010f7e77778790e748474
|
[
"Apache-2.0"
] | 1
|
2021-02-25T02:43:15.000Z
|
2021-02-25T02:43:15.000Z
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
'枚举类'
__author__ = 'click'
__date__ = '2018/7/13 下午4:28'
from enum import unique, Enum
@unique
class GenderEnum(Enum):
MALE = 0,
FAMALE = 1
class Student(object):
def __init__(self, name, gender):
self.__name = name
self.__gender = gender
@property
def getGender(self):
return self.__gender
@property
def getName(self):
return self.__name
def __str__(self):
print("Student %s %s" % (self.__name, self.__gender))
def __call__(self):
print("Student %s %s" % (self.__name, self.__gender))
if __name__ == '__main__':
s = Student('小明', GenderEnum.FAMALE)
# print(s)
print(s.__str__())
if s.getGender == GenderEnum.FAMALE:
print("学生 %s 性别是 %s" % (s.getName, GenderEnum.FAMALE))
elif s.getGender == GenderEnum.MALE:
print("学生 %s 性别是 %s" % (s.getName, GenderEnum.MALE))
| 20.065217
| 61
| 0.617551
|
ad3f66711df730ff28411b14162cc55ec9d6009d
| 2,514
|
py
|
Python
|
vqeparser/geom_parser.py
|
robagl9695/vqedriver
|
899aab46aff78a288207fd82602e11fd8dfa0bfa
|
[
"Apache-2.0"
] | null | null | null |
vqeparser/geom_parser.py
|
robagl9695/vqedriver
|
899aab46aff78a288207fd82602e11fd8dfa0bfa
|
[
"Apache-2.0"
] | null | null | null |
vqeparser/geom_parser.py
|
robagl9695/vqedriver
|
899aab46aff78a288207fd82602e11fd8dfa0bfa
|
[
"Apache-2.0"
] | 1
|
2021-10-01T23:22:51.000Z
|
2021-10-01T23:22:51.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 16 21:36:20 2021
@author: rgonzalez
"""
from qiskit.chemistry.drivers import UnitsType
import os
import sys
def read_xyzfile(filename):
prefix = os.getcwd()
xyzcoords = open(prefix + '/' + filename, 'r')
if xyzcoords.mode == 'r':
contents = xyzcoords.readlines()
xyzcoords.close()
for i in reversed(range(2, len(contents), 1)):
if contents[i] == '\n':
contents.pop(i)
try:
numatoms = int(contents[0])
except:
TypeError('Invalid XYZ file format')
if len(contents) != numatoms+2:
raise TypeError('Invalid XYZ file format')
coords = ''
for i in range(2, len(contents)-1, 1):
contents[i] = ' '.join(contents[i].split())
coords += contents[i].strip('\n') + '; '
contents[-1] = ' '.join(contents[-1].split())
coords += contents[-1].strip('\n')
coords = coords.replace('\t', ' ')
return coords
def _geom(blocks):
geom_block = [block for block in blocks if block.startswith('%geometry')]
if len(geom_block) == 0:
print('GeometryInputError: Geometry specification missing')
sys.exit()
elif len(geom_block) > 1:
print('GeometryInputError: More than geometry specified')
sys.exit()
geom_block = geom_block[0]
geom_opts = geom_block.split(' ')
geom_xyzfile = [opt for opt in geom_opts if opt.startswith('xyzfile')][0]
xyzfile = geom_xyzfile.split('=')[1]
coords = read_xyzfile(xyzfile)
geom_charge = [opt for opt in geom_opts if opt.startswith('charge')]
if len(geom_charge) != 0:
charge = int(geom_charge[0].split('=')[1])
else:
charge = 0
geom_spin = [opt for opt in geom_opts if opt.startswith('spin')]
if len(geom_spin) != 0:
spin = int(geom_spin[0].split('=')[1])
else:
spin = 0
geom_units = [opt for opt in geom_opts if opt.startswith('units')]
if len(geom_units) != 0:
if geom_units[0].split('=')[1] == 'angstrom':
units = UnitsType.ANGSTROM
elif geom_units[0].split('=')[1] == 'bohr':
units = UnitsType.BOHR
else:
print('GeometryInputError: Invalid Units on Input, please select between "angstrom" and "bohr" (Default = angstrom)')
sys.exit()
else:
units = UnitsType.ANGSTROM
return coords, charge, spin, units
| 28.247191
| 129
| 0.579952
|
26b163490343cb225ee975122ac9c83ba1fc56da
| 661
|
py
|
Python
|
manage.py
|
colombia-immap/unicef-school-mapping-back
|
76e93f0b177756ea29e45c2c3564e4894d93f004
|
[
"BSD-3-Clause"
] | null | null | null |
manage.py
|
colombia-immap/unicef-school-mapping-back
|
76e93f0b177756ea29e45c2c3564e4894d93f004
|
[
"BSD-3-Clause"
] | null | null | null |
manage.py
|
colombia-immap/unicef-school-mapping-back
|
76e93f0b177756ea29e45c2c3564e4894d93f004
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mapa.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 27.541667
| 73
| 0.676248
|
1b884d3efcc2c12f71e7935112420e5267858ed8
| 75
|
py
|
Python
|
gridworld/agents/buildings/__init__.py
|
NREL/PowerGridworld
|
2f72ac5bb663092ca806c6fff9c7cf70f94fd775
|
[
"BSD-3-Clause"
] | 24
|
2021-11-12T03:42:38.000Z
|
2022-02-27T17:22:30.000Z
|
gridworld/agents/buildings/__init__.py
|
NREL/PowerGridworld
|
2f72ac5bb663092ca806c6fff9c7cf70f94fd775
|
[
"BSD-3-Clause"
] | 4
|
2021-11-11T03:27:58.000Z
|
2021-11-15T23:12:05.000Z
|
gridworld/agents/buildings/__init__.py
|
NREL/PowerGridworld
|
2f72ac5bb663092ca806c6fff9c7cf70f94fd775
|
[
"BSD-3-Clause"
] | 2
|
2022-02-09T09:15:41.000Z
|
2022-02-24T14:56:40.000Z
|
from .five_zone_rom_env import FiveZoneROMEnv, FiveZoneROMThermalEnergyEnv
| 37.5
| 74
| 0.906667
|
47b62f7782562683e946758d39a54a958eb73dd7
| 28,230
|
py
|
Python
|
ckan/lib/dictization/model_dictize.py
|
mkuder/ckan
|
592b65f2538ae88fbc3b0c3155cf9fcd77a0458c
|
[
"Apache-2.0"
] | 2
|
2015-07-17T19:09:52.000Z
|
2017-08-30T20:23:44.000Z
|
ckan/lib/dictization/model_dictize.py
|
mkuder/ckan
|
592b65f2538ae88fbc3b0c3155cf9fcd77a0458c
|
[
"Apache-2.0"
] | 12
|
2015-01-19T18:03:56.000Z
|
2016-04-11T16:40:33.000Z
|
ckan/lib/dictization/model_dictize.py
|
mkuder/ckan
|
592b65f2538ae88fbc3b0c3155cf9fcd77a0458c
|
[
"Apache-2.0"
] | 3
|
2015-03-31T06:19:42.000Z
|
2016-06-27T15:32:28.000Z
|
'''
These dictize functions generally take a domain object (such as Package) and
convert it to a dictionary, including related objects (e.g. for Package it
includes PackageTags, PackageExtras, PackageGroup etc).
The basic recipe is to call:
dictized = ckan.lib.dictization.table_dictize(domain_object)
which builds the dictionary by iterating over the table columns.
'''
import datetime
import urlparse
from pylons import config
from sqlalchemy.sql import select
import ckan.logic as logic
import ckan.plugins as plugins
import ckan.lib.helpers as h
import ckan.lib.dictization as d
import ckan.authz as authz
import ckan.lib.search as search
import ckan.lib.munge as munge
## package save
def group_list_dictize(obj_list, context,
sort_key=lambda x: x['display_name'], reverse=False,
with_package_counts=True,
include_groups=False,
include_tags=False,
include_extras=False):
group_dictize_context = dict(context.items()[:])
# Set options to avoid any SOLR queries for each group, which would
# slow things further.
group_dictize_options = {
'packages_field': 'dataset_count' if with_package_counts else None,
# don't allow packages_field='datasets' as it is too slow
'include_groups': include_groups,
'include_tags': include_tags,
'include_extras': include_extras,
'include_users': False, # too slow - don't allow
}
if with_package_counts and 'dataset_counts' not in group_dictize_context:
# 'dataset_counts' will already be in the context in the case that
# group_list_dictize recurses via group_dictize (groups in groups)
group_dictize_context['dataset_counts'] = get_group_dataset_counts()
if context.get('with_capacity'):
group_list = [group_dictize(group, group_dictize_context,
capacity=capacity, **group_dictize_options)
for group, capacity in obj_list]
else:
group_list = [group_dictize(group, group_dictize_context,
**group_dictize_options)
for group in obj_list]
return sorted(group_list, key=sort_key, reverse=reverse)
def resource_list_dictize(res_list, context):
active = context.get('active', True)
result_list = []
for res in res_list:
resource_dict = resource_dictize(res, context)
if active and res.state != 'active':
continue
result_list.append(resource_dict)
return sorted(result_list, key=lambda x: x["position"])
def extras_dict_dictize(extras_dict, context):
result_list = []
for name, extra in extras_dict.iteritems():
dictized = d.table_dictize(extra, context)
if not extra.state == 'active':
continue
value = dictized["value"]
result_list.append(dictized)
return sorted(result_list, key=lambda x: x["key"])
def extras_list_dictize(extras_list, context):
result_list = []
active = context.get('active', True)
for extra in extras_list:
dictized = d.table_dictize(extra, context)
if active and extra.state != 'active':
continue
value = dictized["value"]
result_list.append(dictized)
return sorted(result_list, key=lambda x: x["key"])
def resource_dictize(res, context):
model = context['model']
resource = d.table_dictize(res, context)
extras = resource.pop("extras", None)
if extras:
resource.update(extras)
# some urls do not have the protocol this adds http:// to these
url = resource['url']
## for_edit is only called at the times when the dataset is to be edited
## in the frontend. Without for_edit the whole qualified url is returned.
if resource.get('url_type') == 'upload' and not context.get('for_edit'):
cleaned_name = munge.munge_filename(url)
resource['url'] = h.url_for(controller='package',
action='resource_download',
id=resource['package_id'],
resource_id=res.id,
filename=cleaned_name,
qualified=True)
elif not urlparse.urlsplit(url).scheme and not context.get('for_edit'):
resource['url'] = u'http://' + url.lstrip('/')
return resource
def _execute(q, table, context):
'''
Takes an SqlAlchemy query (q) that is (at its base) a Select on an
object table (table), and it returns the object.
Analogous with _execute_with_revision, so takes the same params, even
though it doesn't need the table.
'''
model = context['model']
session = model.Session
return session.execute(q)
def _execute_with_revision(q, rev_table, context):
'''
Takes an SqlAlchemy query (q) that is (at its base) a Select on an object
revision table (rev_table), and you provide revision_id or revision_date in
the context and it will filter the object revision(s) to an earlier time.
Raises NotFound if context['revision_id'] is provided, but the revision
ID does not exist.
Returns [] if there are no results.
'''
model = context['model']
session = model.Session
revision_id = context.get('revision_id')
revision_date = context.get('revision_date')
if revision_id:
revision = session.query(context['model'].Revision).filter_by(
id=revision_id).first()
if not revision:
raise logic.NotFound
revision_date = revision.timestamp
q = q.where(rev_table.c.revision_timestamp <= revision_date)
q = q.where(rev_table.c.expired_timestamp > revision_date)
return session.execute(q)
def package_dictize(pkg, context):
'''
Given a Package object, returns an equivalent dictionary.
Normally this is the most recent version, but you can provide revision_id
or revision_date in the context and it will filter to an earlier time.
May raise NotFound if:
* the specified revision_id doesn't exist
* the specified revision_date was before the package was created
'''
model = context['model']
is_latest_revision = not(context.get('revision_id') or
context.get('revision_date'))
execute = _execute if is_latest_revision else _execute_with_revision
#package
if is_latest_revision:
if isinstance(pkg, model.PackageRevision):
pkg = model.Package.get(pkg.id)
result = pkg
else:
package_rev = model.package_revision_table
q = select([package_rev]).where(package_rev.c.id == pkg.id)
result = execute(q, package_rev, context).first()
if not result:
raise logic.NotFound
result_dict = d.table_dictize(result, context)
#strip whitespace from title
if result_dict.get('title'):
result_dict['title'] = result_dict['title'].strip()
#resources
if is_latest_revision:
res = model.resource_table
else:
res = model.resource_revision_table
q = select([res]).where(res.c.package_id == pkg.id)
result = execute(q, res, context)
result_dict["resources"] = resource_list_dictize(result, context)
result_dict['num_resources'] = len(result_dict.get('resources', []))
#tags
tag = model.tag_table
if is_latest_revision:
pkg_tag = model.package_tag_table
else:
pkg_tag = model.package_tag_revision_table
q = select([tag, pkg_tag.c.state],
from_obj=pkg_tag.join(tag, tag.c.id == pkg_tag.c.tag_id)
).where(pkg_tag.c.package_id == pkg.id)
result = execute(q, pkg_tag, context)
result_dict["tags"] = d.obj_list_dictize(result, context,
lambda x: x["name"])
result_dict['num_tags'] = len(result_dict.get('tags', []))
# Add display_names to tags. At first a tag's display_name is just the
# same as its name, but the display_name might get changed later (e.g.
# translated into another language by the multilingual extension).
for tag in result_dict['tags']:
assert not 'display_name' in tag
tag['display_name'] = tag['name']
#extras
if is_latest_revision:
extra = model.package_extra_table
else:
extra = model.extra_revision_table
q = select([extra]).where(extra.c.package_id == pkg.id)
result = execute(q, extra, context)
result_dict["extras"] = extras_list_dictize(result, context)
#groups
if is_latest_revision:
member = model.member_table
else:
member = model.member_revision_table
group = model.group_table
q = select([group, member.c.capacity],
from_obj=member.join(group, group.c.id == member.c.group_id)
).where(member.c.table_id == pkg.id)\
.where(member.c.state == 'active') \
.where(group.c.is_organization == False)
result = execute(q, member, context)
context['with_capacity'] = False
## no package counts as cannot fetch from search index at the same
## time as indexing to it.
## tags, extras and sub-groups are not included for speed
result_dict["groups"] = group_list_dictize(result, context,
with_package_counts=False)
#owning organization
if is_latest_revision:
group = model.group_table
else:
group = model.group_revision_table
q = select([group]
).where(group.c.id == pkg.owner_org) \
.where(group.c.state == 'active')
result = execute(q, group, context)
organizations = d.obj_list_dictize(result, context)
if organizations:
result_dict["organization"] = organizations[0]
else:
result_dict["organization"] = None
#relations
if is_latest_revision:
rel = model.package_relationship_table
else:
rel = model.package_relationship_revision_table
q = select([rel]).where(rel.c.subject_package_id == pkg.id)
result = execute(q, rel, context)
result_dict["relationships_as_subject"] = \
d.obj_list_dictize(result, context)
q = select([rel]).where(rel.c.object_package_id == pkg.id)
result = execute(q, rel, context)
result_dict["relationships_as_object"] = \
d.obj_list_dictize(result, context)
# Extra properties from the domain object
# We need an actual Package object for this, not a PackageRevision
if isinstance(pkg, model.PackageRevision):
pkg = model.Package.get(pkg.id)
# isopen
result_dict['isopen'] = pkg.isopen if isinstance(pkg.isopen, bool) \
else pkg.isopen()
# type
# if null assign the default value to make searching easier
result_dict['type'] = pkg.type or u'dataset'
# license
if pkg.license and pkg.license.url:
result_dict['license_url'] = pkg.license.url
result_dict['license_title'] = pkg.license.title.split('::')[-1]
elif pkg.license:
result_dict['license_title'] = pkg.license.title
else:
result_dict['license_title'] = pkg.license_id
# creation and modification date
result_dict['metadata_modified'] = pkg.metadata_modified.isoformat()
result_dict['metadata_created'] = pkg.metadata_created.isoformat() \
if pkg.metadata_created else None
return result_dict
def _get_members(context, group, member_type):
model = context['model']
Entity = getattr(model, member_type[:-1].capitalize())
q = model.Session.query(Entity, model.Member.capacity).\
join(model.Member, model.Member.table_id == Entity.id).\
filter(model.Member.group_id == group.id).\
filter(model.Member.state == 'active').\
filter(model.Member.table_name == member_type[:-1])
if member_type == 'packages':
q = q.filter(Entity.private==False)
if 'limits' in context and member_type in context['limits']:
return q[:context['limits'][member_type]]
return q.all()
def get_group_dataset_counts():
'''For all public groups, return their dataset counts, as a SOLR facet'''
query = search.PackageSearchQuery()
q = {'q': '+capacity:public',
'fl': 'groups', 'facet.field': ['groups', 'owner_org'],
'facet.limit': -1, 'rows': 1}
query.run(q)
return query.facets
def group_dictize(group, context,
include_groups=True,
include_tags=True,
include_users=True,
include_extras=True,
packages_field='datasets',
**kw):
'''
Turns a Group object and related into a dictionary. The related objects
like tags are included unless you specify it in the params.
:param packages_field: determines the format of the `packages` field - can
be `datasets` or None.
'''
assert packages_field in ('datasets', 'dataset_count', None)
if packages_field == 'dataset_count':
dataset_counts = context.get('dataset_counts', None)
result_dict = d.table_dictize(group, context)
result_dict.update(kw)
result_dict['display_name'] = group.title or group.name
if include_extras:
result_dict['extras'] = extras_dict_dictize(
group._extras, context)
context['with_capacity'] = True
if packages_field:
def get_packages_for_this_group(group_, just_the_count=False):
# Ask SOLR for the list of packages for this org/group
q = {
'facet': 'false',
'rows': 0,
}
if group_.is_organization:
q['fq'] = 'owner_org:"{0}"'.format(group_.id)
else:
q['fq'] = 'groups:"{0}"'.format(group_.name)
# Allow members of organizations to see private datasets.
if group_.is_organization:
is_group_member = (context.get('user') and
authz.has_user_permission_for_group_or_org(
group_.id, context.get('user'), 'read'))
if is_group_member:
context['ignore_capacity_check'] = True
if not just_the_count:
# Is there a packages limit in the context?
try:
packages_limit = context['limits']['packages']
except KeyError:
q['rows'] = 1000 # Only the first 1000 datasets are returned
else:
q['rows'] = packages_limit
search_context = dict((k, v) for (k, v) in context.items()
if k != 'schema')
search_results = logic.get_action('package_search')(search_context,
q)
return search_results['count'], search_results['results']
if packages_field == 'datasets':
package_count, packages = get_packages_for_this_group(group)
result_dict['packages'] = packages
else:
if dataset_counts is None:
package_count, packages = get_packages_for_this_group(
group, just_the_count=True)
else:
# Use the pre-calculated package_counts passed in.
facets = dataset_counts
if group.is_organization:
package_count = facets['owner_org'].get(group.id, 0)
else:
package_count = facets['groups'].get(group.name, 0)
result_dict['package_count'] = package_count
if include_tags:
# group tags are not creatable via the API yet, but that was(/is) a
# future intention (see kindly's commit 5c8df894 on 2011/12/23)
result_dict['tags'] = tag_list_dictize(
_get_members(context, group, 'tags'),
context)
if include_groups:
# these sub-groups won't have tags or extras for speed
result_dict['groups'] = group_list_dictize(
_get_members(context, group, 'groups'),
context, include_groups=True)
if include_users:
result_dict['users'] = user_list_dictize(
_get_members(context, group, 'users'),
context)
context['with_capacity'] = False
if context.get('for_view'):
if result_dict['is_organization']:
plugin = plugins.IOrganizationController
else:
plugin = plugins.IGroupController
for item in plugins.PluginImplementations(plugin):
result_dict = item.before_view(result_dict)
image_url = result_dict.get('image_url')
result_dict['image_display_url'] = image_url
if image_url and not image_url.startswith('http'):
#munge here should not have an effect only doing it incase
#of potential vulnerability of dodgy api input
image_url = munge.munge_filename_legacy(image_url)
result_dict['image_display_url'] = h.url_for_static(
'uploads/group/%s' % result_dict.get('image_url'),
qualified=True
)
return result_dict
def tag_list_dictize(tag_list, context):
result_list = []
for tag in tag_list:
if context.get('with_capacity'):
tag, capacity = tag
dictized = d.table_dictize(tag, context, capacity=capacity)
else:
dictized = d.table_dictize(tag, context)
# Add display_names to tag dicts. At first a tag's display_name is just
# the same as its name, but the display_name might get changed later
# (e.g. translated into another language by the multilingual
# extension).
assert not dictized.has_key('display_name')
dictized['display_name'] = dictized['name']
if context.get('for_view'):
for item in plugins.PluginImplementations(
plugins.ITagController):
dictized = item.before_view(dictized)
result_list.append(dictized)
return result_list
def tag_dictize(tag, context, include_datasets=True):
tag_dict = d.table_dictize(tag, context)
if include_datasets:
query = search.PackageSearchQuery()
tag_query = u'+capacity:public '
vocab_id = tag_dict.get('vocabulary_id')
if vocab_id:
model = context['model']
vocab = model.Vocabulary.get(vocab_id)
tag_query += u'+vocab_{0}:"{1}"'.format(vocab.name, tag.name)
else:
tag_query += u'+tags:"{0}"'.format(tag.name)
q = {'q': tag_query, 'fl': 'data_dict', 'wt': 'json', 'rows': 1000}
package_dicts = [h.json.loads(result['data_dict'])
for result in query.run(q)['results']]
# Add display_names to tags. At first a tag's display_name is just the
# same as its name, but the display_name might get changed later (e.g.
# translated into another language by the multilingual extension).
assert 'display_name' not in tag_dict
tag_dict['display_name'] = tag_dict['name']
if context.get('for_view'):
for item in plugins.PluginImplementations(plugins.ITagController):
tag_dict = item.before_view(tag_dict)
if include_datasets:
tag_dict['packages'] = []
for package_dict in package_dicts:
for item in plugins.PluginImplementations(plugins.IPackageController):
package_dict = item.before_view(package_dict)
tag_dict['packages'].append(package_dict)
else:
if include_datasets:
tag_dict['packages'] = package_dicts
return tag_dict
def user_list_dictize(obj_list, context,
sort_key=lambda x:x['name'], reverse=False):
result_list = []
for obj in obj_list:
user_dict = user_dictize(obj, context)
user_dict.pop('reset_key', None)
user_dict.pop('apikey', None)
user_dict.pop('email', None)
result_list.append(user_dict)
return sorted(result_list, key=sort_key, reverse=reverse)
def member_dictize(member, context):
return d.table_dictize(member, context)
def user_dictize(user, context, include_password_hash=False):
if context.get('with_capacity'):
user, capacity = user
result_dict = d.table_dictize(user, context, capacity=capacity)
else:
result_dict = d.table_dictize(user, context)
password_hash = result_dict.pop('password')
del result_dict['reset_key']
result_dict['display_name'] = user.display_name
result_dict['email_hash'] = user.email_hash
result_dict['number_of_edits'] = user.number_of_edits()
result_dict['number_created_packages'] = user.number_created_packages(
include_private_and_draft=context.get(
'count_private_and_draft_datasets', False))
requester = context.get('user')
reset_key = result_dict.pop('reset_key', None)
apikey = result_dict.pop('apikey', None)
email = result_dict.pop('email', None)
if context.get('keep_email', False):
result_dict['email'] = email
if context.get('keep_apikey', False):
result_dict['apikey'] = apikey
if requester == user.name:
result_dict['apikey'] = apikey
result_dict['email'] = email
if authz.is_sysadmin(requester):
result_dict['apikey'] = apikey
result_dict['email'] = email
if include_password_hash:
result_dict['password_hash'] = password_hash
model = context['model']
session = model.Session
return result_dict
def task_status_dictize(task_status, context):
return d.table_dictize(task_status, context)
## conversion to api
def group_to_api(group, context):
api_version = context.get('api_version')
assert api_version, 'No api_version supplied in context'
dictized = group_dictize(group, context)
dictized["extras"] = dict((extra["key"], extra["value"])
for extra in dictized["extras"])
if api_version == 1:
dictized["packages"] = sorted([pkg["name"] for pkg in dictized["packages"]])
else:
dictized["packages"] = sorted([pkg["id"] for pkg in dictized["packages"]])
return dictized
def tag_to_api(tag, context):
api_version = context.get('api_version')
assert api_version, 'No api_version supplied in context'
dictized = tag_dictize(tag, context)
if api_version == 1:
return sorted([package["name"] for package in dictized["packages"]])
else:
return sorted([package["id"] for package in dictized["packages"]])
def resource_dict_to_api(res_dict, package_id, context):
res_dict.pop("revision_id")
res_dict.pop("state")
res_dict["package_id"] = package_id
def package_to_api(pkg, context):
api_version = context.get('api_version')
assert api_version, 'No api_version supplied in context'
dictized = package_dictize(pkg, context)
dictized["tags"] = [tag["name"] for tag in dictized["tags"] \
if not tag.get('vocabulary_id')]
dictized["extras"] = dict((extra["key"], extra["value"])
for extra in dictized["extras"])
dictized['license'] = pkg.license.title if pkg.license else None
dictized['ratings_average'] = pkg.get_average_rating()
dictized['ratings_count'] = len(pkg.ratings)
dictized['notes_rendered'] = h.render_markdown(pkg.notes)
site_url = config.get('ckan.site_url', None)
if site_url:
dictized['ckan_url'] = '%s/dataset/%s' % (site_url, pkg.name)
for resource in dictized["resources"]:
resource_dict_to_api(resource, pkg.id, context)
def make_api_1(package_id):
return pkg.get(package_id).name
def make_api_2(package_id):
return package_id
if api_version == 1:
api_fn = make_api_1
dictized["groups"] = [group["name"] for group in dictized["groups"]]
# FIXME why is this just for version 1?
if pkg.resources:
dictized['download_url'] = pkg.resources[0].url
else:
api_fn = make_api_2
dictized["groups"] = [group["id"] for group in dictized["groups"]]
subjects = dictized.pop("relationships_as_subject")
objects = dictized.pop("relationships_as_object")
relationships = []
for rel in objects:
model = context['model']
swap_types = model.PackageRelationship.forward_to_reverse_type
type = swap_types(rel['type'])
relationships.append({'subject': api_fn(rel['object_package_id']),
'type': type,
'object': api_fn(rel['subject_package_id']),
'comment': rel["comment"]})
for rel in subjects:
relationships.append({'subject': api_fn(rel['subject_package_id']),
'type': rel['type'],
'object': api_fn(rel['object_package_id']),
'comment': rel["comment"]})
dictized['relationships'] = relationships
return dictized
def vocabulary_dictize(vocabulary, context, include_datasets=False):
vocabulary_dict = d.table_dictize(vocabulary, context)
assert not vocabulary_dict.has_key('tags')
vocabulary_dict['tags'] = [tag_dictize(tag, context, include_datasets)
for tag in vocabulary.tags]
return vocabulary_dict
def vocabulary_list_dictize(vocabulary_list, context):
return [vocabulary_dictize(vocabulary, context)
for vocabulary in vocabulary_list]
def activity_dictize(activity, context):
activity_dict = d.table_dictize(activity, context)
return activity_dict
def activity_list_dictize(activity_list, context):
return [activity_dictize(activity, context) for activity in activity_list]
def activity_detail_dictize(activity_detail, context):
return d.table_dictize(activity_detail, context)
def activity_detail_list_dictize(activity_detail_list, context):
return [activity_detail_dictize(activity_detail, context)
for activity_detail in activity_detail_list]
def package_to_api1(pkg, context):
# DEPRICIATED set api_version in context and use package_to_api()
context['api_version'] = 1
return package_to_api(pkg, context)
def package_to_api2(pkg, context):
# DEPRICIATED set api_version in context and use package_to_api()
context['api_version'] = 2
return package_to_api(pkg, context)
def group_to_api1(group, context):
# DEPRICIATED set api_version in context and use group_to_api()
context['api_version'] = 1
return group_to_api(group, context)
def group_to_api2(group, context):
# DEPRICIATED set api_version in context and use group_to_api()
context['api_version'] = 2
return group_to_api(group, context)
def tag_to_api1(tag, context):
# DEPRICIATED set api_version in context and use tag_to_api()
context['api_version'] = 1
return tag_to_api(tag, context)
def tag_to_api2(tag, context):
# DEPRICIATED set api_version in context and use tag_to_api()
context['api_version'] = 2
return tag_to_api(tag, context)
def user_following_user_dictize(follower, context):
return d.table_dictize(follower, context)
def user_following_dataset_dictize(follower, context):
return d.table_dictize(follower, context)
def user_following_group_dictize(follower, context):
return d.table_dictize(follower, context)
def resource_view_dictize(resource_view, context):
dictized = d.table_dictize(resource_view, context)
dictized.pop('order')
config = dictized.pop('config', {})
dictized.update(config)
resource = context['model'].Resource.get(resource_view.resource_id)
package_id = resource.package_id
dictized['package_id'] = package_id
return dictized
def resource_view_list_dictize(resource_views, context):
resource_view_dicts = []
for view in resource_views:
resource_view_dicts.append(resource_view_dictize(view, context))
return resource_view_dicts
| 36.853786
| 86
| 0.643464
|
22de9c14c08544243c93c7f160045a674120d1f7
| 1,838
|
py
|
Python
|
chatbot_nlu/tokenizers/mitie_tokenizer.py
|
charlesXu86/Chatbot_NLU
|
29bc3ead75ddca74b9c41662fb1a77501cc09cf9
|
[
"MIT"
] | 6
|
2020-03-11T03:14:41.000Z
|
2021-12-02T02:30:01.000Z
|
chatbot_nlu/tokenizers/mitie_tokenizer.py
|
charlesXu86/Chatbot_CN_Component
|
29bc3ead75ddca74b9c41662fb1a77501cc09cf9
|
[
"MIT"
] | 1
|
2020-06-24T23:36:16.000Z
|
2020-06-25T03:49:49.000Z
|
chatbot_nlu/tokenizers/mitie_tokenizer.py
|
charlesXu86/Chatbot_CN_Component
|
29bc3ead75ddca74b9c41662fb1a77501cc09cf9
|
[
"MIT"
] | 9
|
2020-02-11T06:57:39.000Z
|
2020-12-01T09:53:34.000Z
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import str
import re
from typing import Any
from typing import Dict
from typing import List
from typing import Text
from typing import Tuple
from chatbot_nlu.config import RasaNLUModelConfig
from chatbot_nlu.tokenizers import Token
from chatbot_nlu.tokenizers import Tokenizer
from chatbot_nlu.components import Component
from chatbot_nlu.training_data import Message
from chatbot_nlu.training_data import TrainingData
class MitieTokenizer(Tokenizer, Component):
name = "tokenizer_mitie"
provides = ["tokens"]
@classmethod
def required_packages(cls):
# type: () -> List[Text]
return ["mitie"]
def train(self, training_data, config, **kwargs):
# type: (TrainingData, RasaNLUModelConfig, **Any) -> None
for example in training_data.training_examples:
example.set("tokens", self.tokenize(example.text))
def process(self, message, **kwargs):
# type: (Message, **Any) -> None
message.set("tokens", self.tokenize(message.text))
def _token_from_offset(self, text, offset, encoded_sentence):
return Token(text.decode('utf-8'),
self._byte_to_char_offset(encoded_sentence, offset))
def tokenize(self, text):
# type: (Text) -> List[Token]
import mitie
encoded_sentence = text.encode('utf-8')
tokenized = mitie.tokenize_with_offsets(encoded_sentence)
tokens = [self._token_from_offset(token, offset, encoded_sentence)
for token, offset in tokenized]
return tokens
@staticmethod
def _byte_to_char_offset(text, byte_offset):
return len(text[:byte_offset].decode('utf-8'))
| 30.633333
| 74
| 0.710555
|
668098528a6af9956770d9a014374e25185ebde7
| 3,433
|
py
|
Python
|
pysparkbasics/L02_DataFrame/UDFExample.py
|
pengfei99/PySparkCommonFunc
|
8238949f52a8e0d2c30c42d9f4002941f43db466
|
[
"MIT"
] | null | null | null |
pysparkbasics/L02_DataFrame/UDFExample.py
|
pengfei99/PySparkCommonFunc
|
8238949f52a8e0d2c30c42d9f4002941f43db466
|
[
"MIT"
] | null | null | null |
pysparkbasics/L02_DataFrame/UDFExample.py
|
pengfei99/PySparkCommonFunc
|
8238949f52a8e0d2c30c42d9f4002941f43db466
|
[
"MIT"
] | null | null | null |
""" UDF is called user define function
UDF is very useful when you want to transform your data frame, and there is no pre-defined
Spark sql functions already available.
To define a spark udf, you have three options:
1. use pyspark.sql.functions.udf, this works for select, withColumn.
udf(lambda_function, return_type). The default return_type is String. If you omit
return_type, the value returned by lambda function will be convert it to String.
2. use @udf(returnType=<>) annotation, this works for select, withColumn.
3. use spark.udf.register, this works for sql.
But, remember two important things about UDF
- UDF is not optimized at all. So you can quickly come across performance issues.
- UDF need to treat null value explicitly.
"""
from pyspark.sql import SparkSession, DataFrame
from pyspark.sql.functions import udf
from pyspark.sql.types import IntegerType, StringType
def name_count(name: str) -> int:
return len(name)
# The pyspark.sql.functions.udf function takes a python function, and it returns
# org.apache.spark.sql.expressions.UserDefinedFunction class object. In our case
# it's Name_Count_UDF. And this object can used inside select or withColumn.
Name_Count_UDF = udf(lambda x: name_count(x), IntegerType())
Null_Safe_Name_Count_UDF = udf(lambda x: name_count(x) if not (x is None) else None, IntegerType())
# We can also use @udf to define a spark udf.
@udf(returnType=StringType())
def add_hello(name: str) -> str:
return "{} {}".format("hello", name)
""" Exp1,
In this example, we show how to use udf inside a select
"""
def exp1(df: DataFrame):
df1 = df.select("name", Name_Count_UDF("name").alias("length"), add_hello("name").alias("msg"))
print("Exp1 udf in select")
df1.printSchema()
df1.show()
""" Exp2,
In this example, we show how to use udf inside a withColumn
"""
def exp2(df: DataFrame):
df1 = df.withColumn("length", Name_Count_UDF("name")).withColumn("msg", add_hello("name"))
print("Exp2 udf in withColumn")
df1.printSchema()
df1.show()
""" Exp3
In this example, we show how to register and use udf inside sql
"""
def exp3(spark: SparkSession, df: DataFrame):
# register the function for sql
spark.udf.register("Count_Name_UDF", name_count, IntegerType())
df.createOrReplaceTempView("name_table")
df1 = spark.sql("select name, Count_Name_UDF(name) as length, from name_table")
print("Exp3 udf in sql statement: ")
df1.show()
def exp4(spark: SparkSession):
data1 = [("haha ",),
("toto",),
("titi",),
(None,)]
df1 = spark.createDataFrame(data1, schema=['name'])
print("Source data frame: ")
df1.printSchema()
df1.show()
# try to replace Null_Safe_Name_Count_UDF by Name_Count_UDF, and see what happens
#
try:
df1.select("name", Null_Safe_Name_Count_UDF("name")).show()
except Exception as e:
print("udf failed error msg: {}".format(e))
def exp5():
pass
def main():
spark = SparkSession.builder.master("local[2]").appName("UdfExample").getOrCreate()
data = [("haha ",),
("toto",),
("titi",)]
df = spark.createDataFrame(data, schema=['name'])
print("Source data frame: ")
df.printSchema()
df.show()
# exp1
# exp1(df)
# exp2
# exp2(df)
# exp3
# exp3(spark, df)
# exp4
exp4(spark)
if __name__ == "__main__":
main()
| 27.910569
| 99
| 0.676959
|
489193c5c51809216b9061ad6b5c622cdf83d717
| 94
|
py
|
Python
|
src/exoplanet/theano_ops/interp/__init__.py
|
christinahedges/exoplanet
|
55d2252c71191044613fabb9c8bd3062aca3bc1b
|
[
"MIT"
] | 2
|
2021-10-01T12:46:09.000Z
|
2022-03-24T10:25:20.000Z
|
src/exoplanet/theano_ops/interp/__init__.py
|
christinahedges/exoplanet
|
55d2252c71191044613fabb9c8bd3062aca3bc1b
|
[
"MIT"
] | null | null | null |
src/exoplanet/theano_ops/interp/__init__.py
|
christinahedges/exoplanet
|
55d2252c71191044613fabb9c8bd3062aca3bc1b
|
[
"MIT"
] | 1
|
2021-12-04T22:27:14.000Z
|
2021-12-04T22:27:14.000Z
|
# -*- coding: utf-8 -*-
__all__ = ["RegularGridOp"]
from .regular_grid import RegularGridOp
| 15.666667
| 39
| 0.691489
|
03feb2596ab7b039453aedad52a6838553df36bc
| 65
|
py
|
Python
|
run.py
|
abrenaut/geofinder
|
5f3b22f97e81b251af8efac539de0ff1a8c41686
|
[
"Apache-2.0"
] | 1
|
2017-10-23T04:34:27.000Z
|
2017-10-23T04:34:27.000Z
|
run.py
|
abrenaut/geofinder
|
5f3b22f97e81b251af8efac539de0ff1a8c41686
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
abrenaut/geofinder
|
5f3b22f97e81b251af8efac539de0ff1a8c41686
|
[
"Apache-2.0"
] | null | null | null |
from app import socketio, app
socketio.run(app, host='0.0.0.0')
| 16.25
| 33
| 0.707692
|
2090624521e5fa3a5899522fef1f444c0c14817b
| 5,296
|
py
|
Python
|
egs2/jkac/tts1/local/prep_segments.py
|
texpomru13/espnet
|
7ef005e832e2fb033f356c16f54e0f08762fb4b0
|
[
"Apache-2.0"
] | 5,053
|
2017-12-13T06:21:41.000Z
|
2022-03-31T13:38:29.000Z
|
egs2/jkac/tts1/local/prep_segments.py
|
texpomru13/espnet
|
7ef005e832e2fb033f356c16f54e0f08762fb4b0
|
[
"Apache-2.0"
] | 3,666
|
2017-12-14T05:58:50.000Z
|
2022-03-31T22:11:49.000Z
|
egs2/jkac/tts1/local/prep_segments.py
|
texpomru13/espnet
|
7ef005e832e2fb033f356c16f54e0f08762fb4b0
|
[
"Apache-2.0"
] | 1,709
|
2017-12-13T01:02:42.000Z
|
2022-03-31T11:57:45.000Z
|
#!/usr/bin/env python3
# Copyright 2021 Nagoya University (Yusuke Yasuda)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
from collections import namedtuple
import os
import re
import sys
import yaml
class JKACPath(namedtuple("JKACPath", ["label_path", "wav_path", "category", "title"])):
def recording_id(self):
return "{}_{}".format(self.category, self.title)
def wav_scp_str(self, sample_rate=None):
if sample_rate is not None:
return "{} sox {} -t wav -r {} - |".format(
self.recording_id(), self.wav_path, sample_rate
)
else:
return "{} {}".format(self.recording_id(), self.wav_path)
class JKACLabel(
namedtuple(
"JKACLabel",
[
"path",
"chapter_id",
"paragraph_id",
"style_id",
"sentence_id",
"sentence",
"time_begin",
"time_end",
],
)
):
def utt_id(self):
return "{}_{}_{}_{}_{}_{}".format(
self.path.category,
self.path.title,
self.chapter_id,
self.paragraph_id,
self.style_id,
self.sentence_id,
)
def segment_file_str(self):
return "{} {} {:.3f} {:.3f}".format(
self.utt_id(), self.path.recording_id(), self.time_begin, self.time_end
)
def kanji_sentence(self):
return re.sub(r"\[(.+?)\|(.+?)\]", r"\1", self.sentence).replace(" ", " ")
def furigana_sentence(self):
return re.sub(r"\[(.+?)\|(.+?)\]", r"\2", self.sentence).replace(" ", " ")
def text_file_str(self):
return "{} {}".format(self.utt_id(), self.kanji_sentence())
def utt2spk_str(self, speaker_id):
return "{} {}".format(self.utt_id(), speaker_id)
def get_parser():
parser = argparse.ArgumentParser(
description="Prepare segments from text files in yaml format",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("input_dir", type=str, help="path to J-KAC corpus")
parser.add_argument("wav_scp_path", type=str, help="path to output 'wav.scp' file")
parser.add_argument("utt2spk_path", type=str, help="path to output 'utt2spk' file")
parser.add_argument("text_path", type=str, help="path to output 'text' file")
parser.add_argument(
"segments_path", type=str, help="path to output 'segments' file"
)
parser.add_argument("sample_rate", type=str, help="sampling rate")
return parser
def list_labels(root_path):
txt_dir_path = os.path.join(root_path, "txt")
wav_dir_path = os.path.join(root_path, "wav")
categories = os.listdir(txt_dir_path)
for category in categories:
category_txt_path = os.path.join(txt_dir_path, category)
category_wav_path = os.path.join(wav_dir_path, category)
for label_filename in os.listdir(category_txt_path):
if label_filename.endswith(".yaml"):
title = label_filename.replace(".yaml", "")
label_path = os.path.join(category_txt_path, label_filename)
wav_path = os.path.join(category_wav_path, title + ".wav")
yield JKACPath(
label_path=label_path,
wav_path=wav_path,
category=category,
title=title,
)
def read_label(path):
with open(path.label_path, "r") as f:
label_dict = yaml.load(f, Loader=yaml.Loader)
return parse_label(label_dict, path)
def parse_label(book_dict, path):
for chapter_id in book_dict.keys():
chapter = book_dict[chapter_id]
for paragraph_id in chapter.keys():
paragraph = chapter[paragraph_id]
for style_id in paragraph.keys():
style = paragraph[style_id]
for sentence_id, sentence in enumerate(style):
yield JKACLabel(
path=path,
chapter_id=chapter_id,
paragraph_id=paragraph_id,
style_id=style_id,
sentence_id=sentence_id + 1,
sentence=sentence["sent"],
time_begin=sentence["time"][0],
time_end=sentence["time"][1],
)
if __name__ == "__main__":
args = get_parser().parse_args(sys.argv[1:])
sample_rate = None if args.sample_rate == "48000" else args.sample_rate
with open(args.wav_scp_path, "w") as wav_scp_f, open(
args.utt2spk_path, "w"
) as utt2spk_f, open(args.text_path, "w") as text_f, open(
args.segments_path, "w"
) as segments_f:
paths = list(list_labels(args.input_dir))
paths.sort(key=lambda p: p.recording_id())
for path in paths:
wav_scp_f.write(path.wav_scp_str(sample_rate=sample_rate) + "\n")
labels = list(read_label(path))
labels.sort(key=lambda l: l.utt_id())
for label in labels:
text_f.write(label.text_file_str() + "\n")
segments_f.write(label.segment_file_str() + "\n")
utt2spk_f.write(label.utt2spk_str(speaker_id="JKAC") + "\n")
| 35.072848
| 88
| 0.578927
|
3e0654b107ed3ec9d8569fd9a846447f3a34cc1f
| 7,913
|
py
|
Python
|
wechatpy/pay/api/withhold.py
|
myuanz/wechatpy
|
f131e1808ffd51f61881188fc080c36523063c2b
|
[
"MIT"
] | 2
|
2020-04-12T17:28:01.000Z
|
2020-04-12T17:28:08.000Z
|
wechatpy/pay/api/withhold.py
|
myuanz/wechatpy
|
f131e1808ffd51f61881188fc080c36523063c2b
|
[
"MIT"
] | null | null | null |
wechatpy/pay/api/withhold.py
|
myuanz/wechatpy
|
f131e1808ffd51f61881188fc080c36523063c2b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import time
import random
from datetime import datetime
from optionaldict import optionaldict
from wechatpy.utils import timezone
from wechatpy.pay.utils import get_external_ip, calculate_signature
from wechatpy.pay.base import BaseWeChatPayAPI
class WeChatWithhold(BaseWeChatPayAPI):
def apply_signing(
self,
plan_id,
contract_code,
contract_display_account,
notify_url,
version="1.0",
clientip=None,
deviceid=None,
mobile=None,
email=None,
qq=None,
request_serial=None,
openid=None,
creid=None,
outerid=None,
):
"""
申请签约 api
https://pay.weixin.qq.com/wiki/doc/api/pap.php?chapter=18_1&index=1
:param plan_id: 模板id 协议模板id,设置路径见开发步骤。
:param contract_code: 签约协议号 商户侧的签约协议号,由商户生成
:param contract_display_account: 用户账户展示名称 签约用户的名称,用于页面展示,页面样例可见案例与规范
:param notify_url: 回调通知url 用于接收签约成功消息的回调通知地址,以http或https开头。
:param version: 版本号 固定值1.0
:param request_serial: 可选 请求序列号 商户请求签约时的序列号,商户侧须唯一。序列号主要用于排序,不作为查询条件
:param clientip: 可选 客户端 IP 点分IP格式(客户端IP)
:param deviceid: 可选 设备ID android填imei的一次md5; ios填idfa的一次md5
:param mobile: 可选 手机号 用户手机号
:param email: 可选 邮箱地址 用户邮箱地址
:param qq: 可选 QQ号 用户QQ号
:param openid: 可选 微信open ID 用户微信open ID
:param creid: 可选 身份证号 用户身份证号
:param outerid: 可选 商户侧用户标识 用户在商户侧的标识
:return: 返回的结果数据字典
"""
timestamp = int(time.time())
if request_serial is None:
request_serial = int(time.time() * 1000)
data = {
"appid": self.appid,
"mch_id": self.mch_id,
"sub_mch_id": self.sub_mch_id,
"plan_id": plan_id,
"contract_code": contract_code,
"request_serial": request_serial,
"contract_display_account": contract_display_account,
"notify_url": notify_url,
"version": version,
"timestamp": timestamp,
"clientip": clientip,
"deviceid": deviceid,
"mobile": mobile,
"email": email,
"qq": qq,
"openid": openid,
"creid": creid,
"outerid": outerid,
}
data = optionaldict(data)
sign = calculate_signature(data, self._client.api_key)
data["sign"] = sign
return {
"base_url": "{}papay/entrustweb".format(self._client.API_BASE_URL),
"data": data,
}
def query_signing(
self, contract_id=None, plan_id=None, contract_code=None, openid=None, version="1.0",
):
"""
查询签约关系 api
:param contract_id: 可选 委托代扣协议id 委托代扣签约成功后由微信返回的委托代扣协议id,选择contract_id查询,则此参数必填
:param plan_id: 可选 模板id 商户在微信商户平台配置的代扣模板id,选择plan_id+contract_code查询,则此参数必填
:param contract_code: 可选 签约协议号 商户请求签约时传入的签约协议号,商户侧须唯一。选择plan_id+contract_code查询,则此参数必填
:param openid: 可选 openid 用户标识,必须保证与传入appid对应
:param version: 版本号 固定值1.0
:return: 返回的结果信息
"""
if not contract_id and not (plan_id and contract_code) and not (plan_id and openid):
raise ValueError("contract_id and (plan_id, contract_code) and (plan_id, openid) must be a choice.")
data = {
"appid": self.appid,
"mch_id": self.mch_id,
"contract_id": contract_id,
"plan_id": plan_id,
"contract_code": contract_code,
"openid": openid,
"version": version,
"nonce_str": None,
}
return self._post("papay/querycontract", data=data)
def apply_deduct(
self,
body,
total_fee,
contract_id,
notify_url,
out_trade_no=None,
detail=None,
attach=None,
fee_type="CNY",
goods_tag=None,
clientip=None,
deviceid=None,
mobile=None,
email=None,
qq=None,
openid=None,
creid=None,
outerid=None,
):
"""
申请扣款 api
:param body: 商品描述 商品或支付单简要描述
:param out_trade_no: 可选 商户订单号 商户系统内部的订单号,32个字符内、可包含字母, 其他说明见商户订单号
:param total_fee: 总金额 订单总金额,单位为分,只能为整数,详见支付金额
:param contract_id: 委托代扣协议id 签约成功后,微信返回的委托代扣协议id
:param notify_url: 回调通知url 接受扣款结果异步回调通知的url
:param detail: 可选 商品详情 商品名称明细列表
:param attach: 可选 附加数据 附加数据,在查询API和支付通知中原样返回,该字段主要用于商户携带订单的自定义数据
:param fee_type: 可选 货币类型 符合ISO 4217标准的三位字母代码,默认人民币:CNY
:param goods_tag: 可选 商品标记 商品标记,代金券或立减优惠功能的参数,说明详见代金券或立减优惠
:param clientip: 可选 客户端 IP 点分IP格式(客户端IP)
:param deviceid: 可选 设备ID android填imei的一次md5; ios填idfa的一次md5
:param mobile: 可选 手机号 用户手机号
:param email: 可选 邮箱地址 用户邮箱地址
:param qq: 可选 QQ号 用户QQ号
:param openid: 可选 微信open ID 用户微信open ID
:param creid: 可选 身份证号 用户身份证号
:param outerid: 可选 商户侧用户标识 用户在商户侧的标识
:return: 返回的结果信息
"""
trade_type = "PAP" # 交易类型 交易类型PAP-微信委托代扣支付
timestamp = int(time.time()) # 10位时间戳
spbill_create_ip = get_external_ip() # 终端IP 调用微信支付API的机器IP
if not out_trade_no:
now = datetime.fromtimestamp(time.time(), tz=timezone("Asia/Shanghai"))
out_trade_no = "{0}{1}{2}".format(self.mch_id, now.strftime("%Y%m%d%H%M%S"), random.randint(1000, 10000))
data = {
"appid": self.appid,
"mch_id": self.mch_id,
"body": body,
"out_trade_no": out_trade_no,
"total_fee": total_fee,
"trade_type": trade_type,
"contract_id": contract_id,
"notify_url": notify_url,
"detail": detail,
"attach": attach,
"fee_type": fee_type,
"goods_tag": goods_tag,
"clientip": clientip,
"deviceid": deviceid,
"mobile": mobile,
"email": email,
"qq": qq,
"openid": openid,
"creid": creid,
"outerid": outerid,
"timestamp": timestamp,
"spbill_create_ip": spbill_create_ip,
}
return self._post("pay/pappayapply", data=data)
def query_order(self, transaction_id=None, out_trade_no=None):
"""
查询订单 api
:param transaction_id: 二选一 微信订单号 微信的订单号,优先使用
:param out_trade_no: 二选一 商户订单号 商户系统内部的订单号,当没提供transaction_id时需要传这个。
:return: 返回的结果信息
"""
if not transaction_id and not out_trade_no:
raise ValueError("transaction_id and out_trade_no must be a choice.")
data = {
"appid": self.appid,
"mch_id": self.mch_id,
"transaction_id": transaction_id,
"out_trade_no": out_trade_no,
}
return self._post("pay/paporderquery", data=data)
def apply_cancel_signing(
self, contract_id=None, plan_id=None, contract_code=None, contract_termination_remark=None, version="1.0",
):
"""
申请解约
https://pay.weixin.qq.com/wiki/doc/api/pap.php?chapter=18_4&index=6
:param contract_id: 合同ID
:param plan_id: 模板ID
:param contract_code: 合同号
:param contract_termination_remark: 解约原因
:param version: 版本号
:return:
"""
if not (contract_id or (plan_id and contract_code)):
raise ValueError("contract_id and (plan_id, contract_code) must be a choice.")
data = {
"appid": self.appid,
"mch_id": self.mch_id,
"plan_id": plan_id,
"contract_code": contract_code,
"contract_id": contract_id,
"contract_termination_remark": contract_termination_remark,
"version": version,
"nonce_str": None,
}
return self._post("papay/deletecontract", data=data)
| 34.107759
| 117
| 0.588272
|
cff9f8d3067f20cbb6d7b6e2832fe1c9284f4ef1
| 468
|
py
|
Python
|
python/p14.py
|
tonyfg/project_euler
|
3a9e6352a98faaa506056b42160c91bffe93838c
|
[
"WTFPL"
] | null | null | null |
python/p14.py
|
tonyfg/project_euler
|
3a9e6352a98faaa506056b42160c91bffe93838c
|
[
"WTFPL"
] | null | null | null |
python/p14.py
|
tonyfg/project_euler
|
3a9e6352a98faaa506056b42160c91bffe93838c
|
[
"WTFPL"
] | null | null | null |
#Q: Which Collatz conjecture starting number, under one million, produces the longest chain?
#A: 837799
def collatz(n):
n_iterations = 1
while n > 1:
if n%2: #atencao impar!
n = 3*n +1
else:
n = n//2
n_iterations += 1
return n_iterations
n = -1
max_seq = 0
for i in xrange(1, 1000000):
tmp = collatz(i)
if tmp > max_seq:
n = i
max_seq = tmp
print(str(i)+': '+ str(max_seq))
| 21.272727
| 92
| 0.544872
|
6d118889f48d14fde7ba0fe3afe31838fd433f01
| 2,303
|
py
|
Python
|
tools/Vitis-AI-Library/graph_runner/test/run_graph.py
|
hito0512/Vitis-AI
|
996459fb96cb077ed2f7e789d515893b1cccbc95
|
[
"Apache-2.0"
] | 848
|
2019-12-03T00:16:17.000Z
|
2022-03-31T22:53:17.000Z
|
tools/Vitis-AI-Library/graph_runner/test/run_graph.py
|
wangyifan778/Vitis-AI
|
f61061eef7550d98bf02a171604c9a9f283a7c47
|
[
"Apache-2.0"
] | 656
|
2019-12-03T00:48:46.000Z
|
2022-03-31T18:41:54.000Z
|
tools/Vitis-AI-Library/graph_runner/test/run_graph.py
|
wangyifan778/Vitis-AI
|
f61061eef7550d98bf02a171604c9a9f283a7c47
|
[
"Apache-2.0"
] | 506
|
2019-12-03T00:46:26.000Z
|
2022-03-30T10:34:56.000Z
|
#
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import xir
import simple_graph
import process_json
error_counter = []
def main():
json_file_name = sys.argv[1]
process_json.process(run, json_file_name, json_file_name)
sys.exit(1 if error_counter else 0)
def run(name, v):
if v['meta'].get('skip', False) is True:
return
xmodel_file_name = os.path.join(
v['meta']['path'], v['meta']['xmodel'])
root = os.environ.get(
'MODEL_ZOO_ROOT', '/scratch/models/xilinx_model_zoo_u50_1.3.0_amd64')
path = os.path.join(root, "." + v["meta"]["path"])
xmodel = v["meta"]["xmodel"]
g = xir.Graph.deserialize(os.path.join(path, xmodel))
batch = 1
for (tensor_name, tensor_ref_file) in v["meta"]["init_tensors"].items():
simple_graph.set_reference_input(g, tensor_name, tensor_ref_file)
batch = len(tensor_ref_file)
simple_graph.add_check_point(g, v["meta"]["dump_tensors_ref"], batch)
simple_graph.normal_setting_for_graph(g)
print("RUNNING %s : %s" % (name, xmodel_file_name))
errors = simple_graph.run_graph(g)
for k in errors:
for (b, actual_md5sum, expected_md5sum) in errors[k]:
b = b % len(v['meta']['dump_tensors_ref'][k])
v['meta']['dump_tensors_ref'][k][b]['md5sum_graph'] = actual_md5sum
v['meta']['pass'] = True if not errors else False
result = "SUCCESS" if v['meta']['pass'] is True else "FAIL"
print("DONE(%s) %s : %s" % (result, name, xmodel_file_name))
if not v['meta']['pass'] is True:
error_counter.append(name)
v['meta']['reason'] = v['meta'].get(
'reason', 'autoregressuion test failed.')
v['meta']['skip'] = True
if __name__ == '__main__':
main()
| 34.893939
| 79
| 0.661311
|
5de75e33c25902f3619bc1e382ba15dbdb2ce4df
| 866
|
py
|
Python
|
openprocurement/tender/esco/tests/document.py
|
openprocurement/openprocurement.tender.esco
|
ea9732cb22b82997a0d8c0bf413d59e988d0600d
|
[
"Apache-2.0"
] | 1
|
2020-07-14T05:19:43.000Z
|
2020-07-14T05:19:43.000Z
|
openprocurement/tender/esco/tests/document.py
|
openprocurement/openprocurement.tender.esco
|
ea9732cb22b82997a0d8c0bf413d59e988d0600d
|
[
"Apache-2.0"
] | 39
|
2017-05-12T13:08:46.000Z
|
2018-08-13T08:26:04.000Z
|
openprocurement/tender/esco/tests/document.py
|
ProzorroUKR/openprocurement.tender.esco
|
9e6a3a00d9ad43a81f3b3415bc023a14088980a2
|
[
"Apache-2.0"
] | 17
|
2017-05-11T11:32:28.000Z
|
2018-06-15T14:39:18.000Z
|
# -*- coding: utf-8 -*-
import unittest
from openprocurement.tender.belowthreshold.tests.document import (
TenderDocumentResourceTestMixin,
TenderDocumentWithDSResourceTestMixin
)
from openprocurement.tender.esco.tests.base import BaseESCOContentWebTest
class TenderDocumentResourceTest(BaseESCOContentWebTest, TenderDocumentResourceTestMixin):
docservice = False
initial_auth = ('Basic', ('broker', ''))
class TenderDocumentWithDSResourceTest(TenderDocumentResourceTest,
TenderDocumentWithDSResourceTestMixin):
docservice = True
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TenderDocumentResourceTest))
suite.addTest(unittest.makeSuite(TenderDocumentWithDSResourceTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 27.935484
| 90
| 0.756351
|
d231740bc7b662efda97c02d2083625e002baec4
| 1,519
|
py
|
Python
|
examples/plot_square_h2_poincare_half_plane.py
|
PabloJ-1/geomstats
|
b53f62b745b21972b80bd7222df9af2549b66d64
|
[
"MIT"
] | null | null | null |
examples/plot_square_h2_poincare_half_plane.py
|
PabloJ-1/geomstats
|
b53f62b745b21972b80bd7222df9af2549b66d64
|
[
"MIT"
] | null | null | null |
examples/plot_square_h2_poincare_half_plane.py
|
PabloJ-1/geomstats
|
b53f62b745b21972b80bd7222df9af2549b66d64
|
[
"MIT"
] | null | null | null |
"""Plot a square on H2 with Poincare half-plane visualization."""
import logging
import os
import matplotlib.pyplot as plt
import numpy as np
import geomstats.visualization as visualization
from geomstats.geometry.hyperbolic import Hyperbolic
H2 = Hyperbolic(dimension=2)
METRIC = H2.metric
SQUARE_SIZE = 50
def main():
top = SQUARE_SIZE / 2.0
bot = - SQUARE_SIZE / 2.0
left = - SQUARE_SIZE / 2.0
right = SQUARE_SIZE / 2.0
corners_int = [(bot, left), (bot, right), (top, right), (top, left)]
corners_ext = H2.intrinsic_to_extrinsic_coords(corners_int)
n_steps = 20
ax = plt.gca()
for i, src in enumerate(corners_ext):
dst_id = (i + 1) % len(corners_ext)
dst = corners_ext[dst_id]
tangent_vec = METRIC.log(point=dst, base_point=src)
geodesic = METRIC.geodesic(initial_point=src,
initial_tangent_vec=tangent_vec)
t = np.linspace(0, 1, n_steps)
edge_points = geodesic(t)
visualization.plot(
edge_points,
ax=ax,
space='H2_poincare_half_plane',
marker='.',
color='black')
plt.show()
if __name__ == "__main__":
if os.environ['GEOMSTATS_BACKEND'] == 'tensorflow':
logging.info('Examples with visualizations are only implemented '
'with numpy backend.\n'
'To change backend, write: '
'export GEOMSTATS_BACKEND = \'numpy\'.')
else:
main()
| 28.12963
| 73
| 0.606978
|
9ac537a4e311a6ff3cced086eadcce54a2fc144f
| 4,290
|
py
|
Python
|
weblogo/seq_io/plain_io.py
|
ghuls/weblogo
|
7eab5d1b8a8ec38786fa426af84bd77950835524
|
[
"MIT"
] | 108
|
2015-08-21T10:39:22.000Z
|
2022-03-04T22:10:49.000Z
|
weblogo/seq_io/plain_io.py
|
ghuls/weblogo
|
7eab5d1b8a8ec38786fa426af84bd77950835524
|
[
"MIT"
] | 60
|
2015-07-21T22:55:52.000Z
|
2022-03-24T21:20:00.000Z
|
weblogo/seq_io/plain_io.py
|
ghuls/weblogo
|
7eab5d1b8a8ec38786fa426af84bd77950835524
|
[
"MIT"
] | 40
|
2015-08-04T00:18:23.000Z
|
2021-12-30T13:41:54.000Z
|
#!/usr/bin/env python
# Copyright (c) 2005 Gavin E. Crooks <gec@threeplusone.com>
#
# This software is distributed under the MIT Open Source License.
# <http://www.opensource.org/licenses/mit-license.html>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""Read and write raw, unformatted sequence data. The whole file is read
in as a sequence. Whitespace is removed.
--- Example Plain/Raw/Text File ---
--------------------------LENSTSPYDYGENESD-------FSDSPPCPQDF
--------------------------LENLEDLF-WELDRLD------NYNDTSLVENH-
--------------------------MSNITDPQMWDFDDLN-------FTGMPPADEDY
-----------------------------------YTSDN---------YSGSGDYDSNK
-SL-------NFDRTFLPALYSLLFLLGLLGNGAVAAVLLSQRTALSSTDTFLLHLAVAD
--LC-PATMASFKAVFVPVAYSLIFLLGVIGNVLVLVILERHRQTRSSTETFLFHLAVAD
-SPC-MLETETLNKYVVIIAYALVFLLSLLGNSLVMLVILYSRVGRSVTDVYLLNLALAD
-EPC-RDENVHFNRIFLPTIYFIIFLTGIVGNGLVILVMGYQKKLRSMTDKYRLHLSVAD
"""
from ..seq import Alphabet, Seq, SeqList
from ..utils import remove_whitespace
example = """
--------------------------LENSTSPYDYGENESD-------FSDSPPCPQDF
--------------------------LENLEDLF-WELDRLD------NYNDTSLVENH-
--------------------------MSNITDPQMWDFDDLN-------FTGMPPADEDY
-----------------------------------YTSDN---------YSGSGDYDSNK
-SL-------NFDRTFLPALYSLLFLLGLLGNGAVAAVLLSQRTALSSTDTFLLHLAVAD
--LC-PATMASFKAVFVPVAYSLIFLLGVIGNVLVLVILERHRQTRSSTETFLFHLAVAD
-SPC-MLETETLNKYVVIIAYALVFLLSLLGNSLVMLVILYSRVGRSVTDVYLLNLALAD
-EPC-RDENVHFNRIFLPTIYFIIFLTGIV
"""
names = ("plain", "raw")
extensions = ()
def read(fin, alphabet=None):
"""Read a file of raw sequence data.
Args:
fin -- A stream or file to read
alphabet -- The expected alphabet of the data, if given
Returns:
SeqList -- A list of sequences
Raises:
ValueError -- If the file is unparsable
"""
seqs = [s for s in iterseq(fin, alphabet)]
return SeqList(seqs)
def iterseq(fin, alphabet=None):
"""Read the sequence data and yield one (and only one) sequence.
Args:
fin -- A stream or file to read
alphabet -- The expected alphabet of the data, if given
Yields:
Seq -- One alphabetic sequence at a time.
Raises:
ValueError -- If the file is unparsable
"""
alphabet = Alphabet(alphabet)
lines = []
for linenum, line in enumerate(fin):
if line.isspace():
continue # Blank line
line = line.strip()
if line[0] == ">": # probably a fasta file. Fail.
raise ValueError("Parse Error on input line: %d " % (linenum))
line = remove_whitespace(line)
if not alphabet.alphabetic(line):
raise ValueError(
"Character on line: %d not in alphabet: %s : %s"
% (linenum, alphabet, line)
)
lines.append(line)
yield Seq("".join(lines), alphabet)
def write(afile, seqs):
"""Write raw sequence data, one line per sequence.
arguments:
afile -- A writable stream.
seqs -- A list of Seq's
"""
for s in seqs:
writeseq(afile, s)
def writeseq(afile, seq):
"""Write a single sequence in raw format.
arguments:
afile -- A writable stream.
seq -- A Seq instance
"""
print(seq, file=afile)
| 33.515625
| 80
| 0.654079
|
c8e460af4f20184579e9b18b91012925fb5ff898
| 7,494
|
py
|
Python
|
bot.py
|
MrPickless/Discord-Bot
|
0bbbf234e6689b6cc1d02690903242f5c6c50022
|
[
"MIT"
] | null | null | null |
bot.py
|
MrPickless/Discord-Bot
|
0bbbf234e6689b6cc1d02690903242f5c6c50022
|
[
"MIT"
] | null | null | null |
bot.py
|
MrPickless/Discord-Bot
|
0bbbf234e6689b6cc1d02690903242f5c6c50022
|
[
"MIT"
] | 1
|
2018-09-23T01:29:16.000Z
|
2018-09-23T01:29:16.000Z
|
import discord
from discord.ext import commands
import aiohttp
import re
from datetime import timedelta
import traceback
import os
from random import choice, randint
from config import token
owner = ["222526329109741568", "252084047264743428"]
version = "Ver. 0.1.29.3"
bot = commands.Bot(command_prefix='p!', description=" I'm that demonic border collie from that television show.")
@bot.event
async def on_ready():
print('Mr. Pickles Discord Bot')
print('Version:')
print(version)
print('Logged in as')
print(bot.user.name)
print('With ID:')
print(bot.user.id)
print('Number of Guilds:')
print((len(bot.servers)))
print('------')
print('Invite me to your server:')
print(discord.utils.oauth_url(bot.user.id))
await bot.change_presence(game=discord.Game(name='with my vaccum cleaner~ | p!help'))
@bot.command(pass_context=True, hidden=True)
async def setgame(ctx, *, game):
if ctx.message.author.id not in owner:
print('Someone not set as bot owner attempted to setgame')
return
game = game.strip()
if game != "":
try:
await bot.change_presence(game=discord.Game(name=game))
except:
embed=discord.Embed(title="Failed", description="Couldn't change game.. Check console.", color=0xfb0006)
await bot.say(embed=embed)
else:
embed=discord.Embed(title="Success!", description="Game changed.", color=0xfb0006)
await bot.say(embed=embed)
print('game changed by dev.')
else:
await bot.send_cmd_help(ctx)
@bot.command(pass_context=True, hidden=True)
async def setname(ctx, *, name):
if ctx.message.author.id not in owner:
print('Someone not set as bot owner attempted to setname')
return
name = name.strip()
if name != "":
try:
await bot.edit_profile(username=name)
except:
embed=discord.Embed(title="Failed", description="Couldn't change name. Check console.", color=0xfb0006)
await bot.say(embed=embed)
else:
embed=discord.Embed(title="Success!", description="Name changed.", color=0xfb0006)
await bot.say(embed=embed)
print('Name changed by dev.')
else:
await bot.send_cmd_help(ctx)
@bot.event
async def on_command_error(error, ctx):
channel = ctx.message.channel
if isinstance(error, commands.MissingRequiredArgument):
await send_cmd_help(ctx)
elif isinstance(error, commands.BadArgument):
await send_cmd_help(ctx)
elif isinstance(error, commands.CommandInvokeError):
print("<:mrpickles:480552232165572608> Exception in command '{}', {}".format(ctx.command.qualified_name, error.original))
traceback.print_tb(error.original.__traceback__)
embed=discord.Embed(title="Error", description="It seems like something went wrong. Check console/report to my developers.", color=0xfb0006)
await bot.say(embed=embed)
@bot.command(pass_context=True, no_pm=True)
async def avatar(ctx, member: discord.Member):
"""User Avatar"""
await bot.reply("{}".format(member.avatar_url))
@bot.command(pass_context=True, no_pm=True)
async def guildicon(ctx):
"""Guild Icon"""
await bot.reply("{}".format(ctx.message.server.icon_url))
@bot.command(pass_context=True)
async def guildid(ctx):
"""Guild ID"""
await bot.reply("`{}`".format(ctx.message.server.id))
@bot.command(pass_context=True, hidden=True)
async def setavatar(ctx, url):
if ctx.message.author.id not in owner:
return
async with aiohttp.ClientSession() as session:
async with session.get(url) as r:
data = await r.read()
await bot.edit_profile(avatar=data)
embed=discord.Embed(title="Success!", description="Avatar changed.", color=0xfb0006)
await bot.say(embed=embed)
@bot.command()
async def invite():
"""Bot Invite"""
embed = discord.Embed(title="\U0001f44d")
embed2=discord.Embed(title="Mr. Pickles Invite", url=(discord.utils.oauth_url(bot.user.id)), description="Click the link if you want me to join your server.", color=0xfb0006)
await bot.say(embed=embed)
await bot.whisper(embed=embed2)
@bot.command()
async def guildcount():
"""Bot Guild Count"""
embed=discord.Embed(title=(len(bot.servers)), color=0xfb0006)
embed.set_author(name="Guild Count")
await bot.say(embed=embed)
@bot.event
async def send_cmd_help(ctx):
if ctx.invoked_subcommand:
pages = bot.formatter.format_help_for(ctx, ctx.invoked_subcommand)
for page in pages:
em = discord.Embed(description=page.strip("```").replace('<', '[').replace('>', ']'),
color=discord.Color.blue())
await bot.send_message(ctx.message.channel, embed=em)
else:
pages = bot.formatter.format_help_for(ctx, ctx.command)
for page in pages:
em = discord.Embed(description=page.strip("```").replace('<', '[').replace('>', ']'),
color=discord.Color.blue())
await bot.send_message(ctx.message.channel, embed=em)
@bot.command(pass_context=True)
async def ping(ctx):
embed = discord.Embed(title="Pong! :ping_pong:")
await bot.say(embed=embed)
@bot.command(pass_context=True)
async def info():
"""Information about this bot!"""
embed=discord.Embed(title="Mr. Pickles Discord Bot", color=0xfb0006)
embed.add_field(name=":information_source: Version", value=(version), inline=True)
embed.add_field(name=":busts_in_silhouette: Developers", value="**MZFX18#0069 & JoshTheGamer632#0017**", inline=True)
embed.add_field(name="<:github:425761614441218048> GitHub", value="https://github.com/Mr-Pickles-bot/Mr.-Pickles-Discord-Bot", inline=True)
embed.add_field(name="<:kingwumpus:425762228667416625> Need Support?", value="https://discord.gg/jqDH5wZ", inline=True)
embed.add_field(name=":link: Invite the bot", value:"https://bot.discord.io/mrpickles", inline=True)
embed.set_footer(text="That was info about my vaccum cleaner.")
await bot.say(embed=embed)
@bot.command(pass_context=True, hidden=True)
async def shutdown(ctx):
if ctx.message.author.id not in owner:
await bot.say("Naughty you...")
print('someone attempted shutdown')
return
embed=discord.Embed(title="Back to my lair I go...", color=0xfb0006)
await bot.say(embed=embed)
await bot.logout()
@bot.command(hidden=True)
async def vaccum():
await bot.say("*naughty things to Grandpa's vaccum*")
print('vaccum used')
@bot.command(hidden=True)
async def secret():
await bot.say("Stop scanning for commands. FFS.")
print('command secret used')
@bot.command(pass_context=True, hidden=True)
async def authors():
await bot.say("<@222526329109741568> and <@252084047264743428> are my creators. ")
@bot.command(hidden=True)
async def lemmefuck():
await bot.say("Cooters with Wings")
@bot.command()
async def vaccumgif():
await bot.say("https://lh4.googleusercontent.com/-3PqHxlkQcXg/VE7XszJKUpI/AAAAAAAAAMg/3vOnHgjKMLQ/w500-h307/tumblr_ndsgyr7Hs51tmpaf1o1_500.gif")
@bot.command()
async def settoken():
await bot.say("swear to fuck man.")
print('settoken used by someone')
@bot.command(hidden=True)
async def ban():
await bot.say('Sometimes, it is best to just not try these commands, especially when they aren\'t implemented yet.')
bot.run(token) # Where 'TOKEN' is your bot token
| 37.47
| 178
| 0.678409
|
93aebc583d9845c9c74bcb30384461e61a5f5136
| 14,907
|
py
|
Python
|
examples/advanced_operations/add_dynamic_page_feed.py
|
jiulongw/google-ads-python
|
6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e
|
[
"Apache-2.0"
] | null | null | null |
examples/advanced_operations/add_dynamic_page_feed.py
|
jiulongw/google-ads-python
|
6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e
|
[
"Apache-2.0"
] | null | null | null |
examples/advanced_operations/add_dynamic_page_feed.py
|
jiulongw/google-ads-python
|
6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a page feed with URLs for a Dynamic Search Ads Campaign.
The page feed specifies precisely which URLs to use with the campaign. To use
a Dynamic Search Ads Campaign run add_dynamic_search_ads_campaign.py. To get
campaigns run basic_operations/get_campaigns.py.
"""
import argparse
import sys
import uuid
from datetime import datetime, timedelta
from google.api_core import protobuf_helpers
from google.ads.google_ads.client import GoogleAdsClient
from google.ads.google_ads.errors import GoogleAdsException
# Class to keep track of page feed details.
class FeedDetails(object):
def __init__(self, resource_name, url_attribute_id, label_attribute_id):
self.resource_name = resource_name
self.url_attribute_id = url_attribute_id
self.label_attribute_id = label_attribute_id
def main(client, customer_id, campaign_id, ad_group_id):
"""The main method that creates all necessary entities for the example.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID str.
campaign_id: a campaign ID str.
ad_group_id: an ad group ID str.
"""
dsa_page_url_label = 'discounts'
try:
# Get the page feed resource name. This code example creates a new feed,
# but you can fetch and re-use an existing feed.
feed_resource_name = create_feed(client, customer_id)
# We need to look up the attribute name and ID for the feed we just
# created so that we can give them back to the API for construction of
# feed mappings in the next function.
feed_details = get_feed_details(client, customer_id, feed_resource_name)
create_feed_mapping(client, customer_id, feed_details)
create_feed_items(client, customer_id, feed_details, dsa_page_url_label)
# Associate the page feed with the campaign.
update_campaign_dsa_setting(client, customer_id, campaign_id,
feed_details)
ad_group_service = client.get_service('AdGroupService', version='v2')
ad_group_resource_name = ad_group_service.ad_group_path(customer_id,
ad_group_id)
# Optional: Target web pages matching the feed's label in the ad group.
add_dsa_targeting(client, customer_id, ad_group_resource_name,
dsa_page_url_label)
except GoogleAdsException as ex:
print('Request with ID "{}" failed with status "{}" and includes the '
'following errors:'.format(ex.request_id, ex.error.code().name))
for error in ex.failure.errors:
print('\tError with message "{}".'.format(error.message))
if error.location:
for field_path_element in error.location.field_path_elements:
print('\t\tOn field: {}'.format(
field_path_element.field_name))
sys.exit(1)
def create_feed(client, customer_id):
"""Creates a page feed with URLs
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID str.
Returns:
A FeedDetails instance with information about the newly created feed.
"""
# Retrieve a new feed operation object.
feed_operation = client.get_type('FeedOperation', version='v2')
# Create a new feed.
feed = feed_operation.create
feed.name.value = 'DSA Feed #{}'.format(uuid.uuid4())
feed.origin = client.get_type('FeedOriginEnum', version='v2').USER
feed_attribute_type_enum = client.get_type('FeedAttributeTypeEnum',
version='v2')
# Create the feed's attributes.
feed_attribute_url = feed.attributes.add()
feed_attribute_url.type = feed_attribute_type_enum.URL_LIST
feed_attribute_url.name.value = 'Page URL'
feed_attribute_label = feed.attributes.add()
feed_attribute_label.type = feed_attribute_type_enum.STRING_LIST
feed_attribute_label.name.value = 'Label'
# Retrieve the feed service.
feed_service = client.get_service('FeedService', version='v2')
# Send the feed operation and add the feed.
response = feed_service.mutate_feeds(customer_id, [feed_operation])
return response.results[0].resource_name
def get_feed_details(client, customer_id, resource_name):
"""Makes a search request to retrieve the attributes of a single feed.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID str.
resource_name: the str resource_name of a feed.
Returns:
A FeedDetails instance with information about the feed that was
retrieved in the search request.
"""
query = '''
SELECT
feed.attributes
FROM
feed
WHERE
feed.resource_name = "{}"
LIMIT 1
'''.format(resource_name)
ga_service = client.get_service('GoogleAdsService', version='v2')
response = ga_service.search(customer_id, query=query)
# Maps specific fields in each row in the response to a dict. This would
# overwrite the same fields in the dict for each row, but we know we'll
# only one row will be returned.
for row in response:
attribute_lookup = {
attribute.name.value:
attribute.id.value for attribute in row.feed.attributes}
return FeedDetails(resource_name, attribute_lookup['Page URL'],
attribute_lookup['Label'])
def create_feed_mapping(client, customer_id, feed_details):
"""Creates feed mapping using the given feed details
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID str.
feed_details: a FeedDetails instance with feed attribute information
"""
# Retrieve a new feed mapping operation object.
feed_mapping_operation = client.get_type('FeedMappingOperation',
version='v2')
# Create a new feed mapping.
feed_mapping = feed_mapping_operation.create
feed_mapping.criterion_type = client.get_type(
'FeedMappingCriterionTypeEnum', version='v2').DSA_PAGE_FEED
feed_mapping.feed.value = feed_details.resource_name
dsa_page_feed_field_enum = client.get_type('DsaPageFeedCriterionFieldEnum',
version='v2')
url_field_mapping = feed_mapping.attribute_field_mappings.add()
url_field_mapping.feed_attribute_id.value = feed_details.url_attribute_id
url_field_mapping.dsa_page_feed_field = dsa_page_feed_field_enum.PAGE_URL
label_field_mapping = feed_mapping.attribute_field_mappings.add()
label_field_mapping.feed_attribute_id.value = (
feed_details.label_attribute_id)
label_field_mapping.dsa_page_feed_field = dsa_page_feed_field_enum.LABEL
# Retrieve the feed mapping service.
feed_mapping_service = client.get_service('FeedMappingService',
version='v2')
# Submit the feed mapping operation and add the feed mapping.
response = feed_mapping_service.mutate_feed_mappings(
customer_id, [feed_mapping_operation])
resource_name = response.results[0].resource_name
# Display the results.
print('Feed mapping created with resource_name: # {}'.format(resource_name))
def create_feed_items(client, customer_id, feed_details, label):
"""Creates feed items with the given feed_details and label
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID str.
feed_details: a FeedDetails instance with feed attribute information
label: a Dynamic Search Ad URL label str.
"""
# See https://support.google.com/adwords/answer/7166527 for page feed URL
# recommendations and rules.
urls = ["http://www.example.com/discounts/rental-cars",
"http://www.example.com/discounts/hotel-deals",
"http://www.example.com/discounts/flight-deals"]
def map_feed_urls(url):
feed_item_operation = client.get_type('FeedItemOperation', version='v2')
feed_item = feed_item_operation.create
feed_item.feed.value = feed_details.resource_name
url_attribute_value = feed_item.attribute_values.add()
url_attribute_value.feed_attribute_id.value = (
feed_details.url_attribute_id)
url_string_val = url_attribute_value.string_values.add()
url_string_val.value = url
label_attribute_value = feed_item.attribute_values.add()
label_attribute_value.feed_attribute_id.value = (
feed_details.label_attribute_id)
label_string_val = label_attribute_value.string_values.add()
label_string_val.value = label
return feed_item_operation
# Create a new feed item operation for each of the URLs in the url list.
feed_item_operations = list(map(map_feed_urls, urls))
# Retrieve the feed item service.
feed_item_service = client.get_service('FeedItemService', version='v2')
# Submit the feed item operations and add the feed items.
response = feed_item_service.mutate_feed_items(customer_id,
feed_item_operations)
# Display the results.
for feed_item in response.results:
print('Created feed item with resource_name: # {}'.format(
feed_item.resource_name))
def update_campaign_dsa_setting(client, customer_id, campaign_id, feed_details):
"""Updates the given campaign with the given feed details
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID str.
campaign_id: a campaign ID str;
feed_details: a FeedDetails instance with feed attribute information.
"""
query = '''
SELECT
campaign.id,
campaign.name,
campaign.dynamic_search_ads_setting.domain_name
FROM
campaign
WHERE
campaign.id = {}
LIMIT 1
'''.format(campaign_id)
ga_service = client.get_service('GoogleAdsService', version='v2')
results = ga_service.search(customer_id, query=query)
for row in results:
campaign = row.campaign
if not campaign:
raise ValueError('Campaign with id #{} not found'.format(campaign_id))
if not campaign.dynamic_search_ads_setting.domain_name:
raise ValueError(
'Campaign id #{} is not set up for Dynamic Search Ads.'.format(
campaign_id))
# Retrieve a new campaign operation
campaign_operation = client.get_type('CampaignOperation', version='v2')
# Copy the retrieved campaign onto the new campaign operation.
campaign_operation.update.CopyFrom(campaign)
updated_campaign = campaign_operation.update
feed = updated_campaign.dynamic_search_ads_setting.feeds.add()
# Use a page feed to specify precisely which URLs to use with your Dynamic
# Search ads.
feed.value = feed_details.resource_name
field_mask = protobuf_helpers.field_mask(campaign, updated_campaign)
campaign_operation.update_mask.CopyFrom(field_mask)
# Retrieve the campaign service.
campaign_service = client.get_service('CampaignService', version='v2')
# Submit the campaign operation and update the campaign.
response = campaign_service.mutate_campaigns(customer_id,
[campaign_operation])
resource_name = response.results[0].resource_name
# Display the results.
print('Updated campaign #{}'.format(resource_name))
def add_dsa_targeting(client, customer_id, ad_group_resource_name, label):
"""Adds Dynamic Search Ad targeting criteria to the given ad group
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID str.
ad_group_resource_name: a resource_name str for an Ad Group.
label: a Dynamic Search Ad URL label str.
"""
# Retrieve a new ad group criterion operation object.
ad_group_criterion_operation = client.get_type(
'AdGroupCriterionOperation', version='v2')
# Create a new ad group criterion.
ad_group_criterion = ad_group_criterion_operation.create
ad_group_criterion.ad_group.value = ad_group_resource_name
# Set the custom bid for this criterion.
ad_group_criterion.cpc_bid_micros.value = 1500000
ad_group_criterion.webpage.criterion_name.value = 'Test criterion'
# Add a condition for label=specified_label_name
webpage_criterion_info = ad_group_criterion.webpage.conditions.add()
webpage_criterion_info.argument.value = label
webpage_criterion_info.operand = client.get_type(
'WebpageConditionOperandEnum', version='v2').CUSTOM_LABEL
# Retrieve the ad group criterion service.
ad_group_criterion_service = client.get_service('AdGroupCriterionService',
version='v2')
response = ad_group_criterion_service.mutate_ad_group_criteria(
customer_id, [ad_group_criterion_operation])
resource_name = response.results[0].resource_name
# Display the results.
print('Created ad group criterion with resource_name: # {}'.format(
resource_name))
if __name__ == '__main__':
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
google_ads_client = GoogleAdsClient.load_from_storage()
parser = argparse.ArgumentParser(
description= ('Adds a page feed with URLs for a Dynamic Search Ads '
'Campaign.'))
# The following argument(s) should be provided to run the example.
parser.add_argument('-c', '--customer_id', type=str,
required=True, help='The Google Ads customer ID.')
parser.add_argument('-i', '--campaign_id', type=str,
required=True, help='The campaign ID.')
parser.add_argument('-a', '--ad_group_id', type=str,
required=True, help='The ad group ID.')
args = parser.parse_args()
main(google_ads_client, args.customer_id, args.campaign_id,
args.ad_group_id)
| 41.179558
| 80
| 0.692762
|
6bb4bb04757f9e3d94072131489d6bc24a50acfa
| 1,192
|
py
|
Python
|
test_ufile/test_bucket.py
|
luxiaobai/ufile-sdk-python
|
be1d88a328ff8a815a9a47a3ae5a0035f4d9e2e7
|
[
"MIT"
] | null | null | null |
test_ufile/test_bucket.py
|
luxiaobai/ufile-sdk-python
|
be1d88a328ff8a815a9a47a3ae5a0035f4d9e2e7
|
[
"MIT"
] | null | null | null |
test_ufile/test_bucket.py
|
luxiaobai/ufile-sdk-python
|
be1d88a328ff8a815a9a47a3ae5a0035f4d9e2e7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
test bucket
"""
from ufile import bucketmanager
public_key = '<your public key>' #添加自己的账户公钥(UCloud账户的API密钥公钥,此密钥权限较高,仅bucket操作使用)
private_key = '<your private key>' #添加自己的账户私钥(UCloud账户的API密钥私钥,此密钥权限较高,仅bucket操作使用)
public_bucket = '<your public bucket name>' #添加公共空间名称
private_bucket = '<your private bucket name>' #添加私有空间名称
region = '<your region>' #添加bucket所在的地理区域
# create public bucket
bucketmanager = bucketmanager.BucketManager(public_key, private_key)
ret, resp = bucketmanager.createbucket(public_bucket, region,'public')
print(ret)
# create private bucket
ret, resp = bucketmanager.createbucket(private_bucket, region,'private')
print(ret)
# delete public bucket
ret, resp = bucketmanager.deletebucket(public_bucket)
print(ret)
# delete private bucket
ret, resp = bucketmanager.deletebucket(private_bucket)
print(ret)
# describle public bucket
ret, resp = bucketmanager.describebucket(public_bucket)
print(ret)
# describe private bucket
ret, resp = bucketmanager.describebucket(private_bucket)
print(ret)
# get a list of files from a bucket
ret, resp = bucketmanager.getfilelist(public_bucket)
print(ret)
| 32.216216
| 94
| 0.755872
|
a4b36276781ccc48e732d21909f965ae26715f88
| 96
|
py
|
Python
|
siptrackdlib/storage/stsqlite/__init__.py
|
sii/siptrackd
|
f124f750c5c826156c31ae8699e90ff95a964a02
|
[
"Apache-2.0"
] | null | null | null |
siptrackdlib/storage/stsqlite/__init__.py
|
sii/siptrackd
|
f124f750c5c826156c31ae8699e90ff95a964a02
|
[
"Apache-2.0"
] | 14
|
2016-03-18T13:28:16.000Z
|
2019-06-02T21:11:29.000Z
|
siptrackdlib/storage/stsqlite/__init__.py
|
sii/siptrackd
|
f124f750c5c826156c31ae8699e90ff95a964a02
|
[
"Apache-2.0"
] | 7
|
2016-03-18T13:04:54.000Z
|
2021-06-22T10:39:04.000Z
|
"""An sqlite based storage backend."""
from siptrackdlib.storage.stsqlite.base import Storage
| 19.2
| 54
| 0.78125
|
744f70ba95d0cf8a4165146159b7719e937c172b
| 12,484
|
py
|
Python
|
reversialphazero/batch_norm/neural_network.py
|
FritzFlorian/bachelor-thesis-code
|
609bc82ab6ac9879eed82f9a2968ae8ee2ef1ebb
|
[
"MIT"
] | null | null | null |
reversialphazero/batch_norm/neural_network.py
|
FritzFlorian/bachelor-thesis-code
|
609bc82ab6ac9879eed82f9a2968ae8ee2ef1ebb
|
[
"MIT"
] | null | null | null |
reversialphazero/batch_norm/neural_network.py
|
FritzFlorian/bachelor-thesis-code
|
609bc82ab6ac9879eed82f9a2968ae8ee2ef1ebb
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import hometrainer.neural_network as neural_network
import reversialphazero.batch_norm.input_output_conversion as input_output_conversion
BOARD_SIZE = 12
BOARD_HEIGHT = BOARD_SIZE
BOARD_WIDTH = BOARD_SIZE
# Number of different possible states/contents of a
# single field on the board.
N_RAW_VALUES = 4
FLOAT = tf.float32
L2_LOSS_WEIGHT = 0.0015
# Changes
# Everything like 'more_maps'
# Probs are normalized in input/output conversion
# Add batch-norm + ELU activation + HeInit
INITIALIZER = tf.contrib.layers.variance_scaling_initializer(mode="FAN_AVG") # He-Init
class SimpleNeuralNetwork(neural_network.NeuralNetwork):
def input_conversion_function(self):
return input_output_conversion.input
def output_conversion_function(self):
return input_output_conversion.output
def __init__(self):
super().__init__()
# More Filters to allow the network to learn more details
self.n_conv_filetrs = 64
def construct_network(self, sess, graph):
self._construct_inputs()
with tf.variable_scope('Convolutional-Layers'):
conv1 = self._construct_conv_layer(self.one_hot_x, self.n_conv_filetrs, 'cov1', activation=tf.nn.elu)
self.conv1_kernel = [v for v in tf.trainable_variables()
if v.name == "Convolutional-Layers/cov1/conv2d/kernel:0"][0]
# Added skip connections similar to 'dense network' connections.
# We add the 'raw' input to every processing step. This adds not too much overhead,
# but allows the network to not use 'useless' kernels for passthrough of possible moves.
# Hopefully this will also perfectly eliminate any error in not allowed moves.
self.one_hot_x_float = tf.cast(self.one_hot_x, FLOAT)
res1 = self._construct_residual_block(conv1, self.n_conv_filetrs, 'res1', self.one_hot_x_float)
res2 = self._construct_residual_block(res1, self.n_conv_filetrs, 'res2', self.one_hot_x_float)
res3 = self._construct_residual_block(res2, self.n_conv_filetrs, 'res3', self.one_hot_x_float)
res4 = self._construct_residual_block(res3, self.n_conv_filetrs, 'res4', self.one_hot_x_float)
res5 = self._construct_residual_block(res4, self.n_conv_filetrs, 'res5', self.one_hot_x_float)
res6 = self._construct_residual_block(res5, self.n_conv_filetrs, 'res6', self.one_hot_x_float)
res7 = self._construct_residual_block(res6, self.n_conv_filetrs, 'res7', self.one_hot_x_float)
res8 = self._construct_residual_block(res7, self.n_conv_filetrs, 'res8', self.one_hot_x_float)
with tf.variable_scope('Probability-Head'):
n_filters = 3
# Reduce the big amount of convolutional filters to a reasonable size.
prob_conv = self._construct_conv_layer(res8, n_filters, 'prob_conv', kernel=[1, 1], stride=1)
prob_conv_with_skip_input = tf.concat([self.one_hot_x_float, prob_conv], 3)
# Flattern the output tensor to allow it as input to a fully connected layer.
flattered_prob_conv = tf.reshape(prob_conv_with_skip_input, [-1, (n_filters + N_RAW_VALUES) * BOARD_WIDTH * BOARD_HEIGHT])
# Add a fully connected hidden layer.
prob_hidden = self._construct_dense_layer(flattered_prob_conv, BOARD_WIDTH * BOARD_HEIGHT, 'prob_hidden',
activation=tf.nn.elu)
prob_hidden_dropout = tf.layers.dropout(prob_hidden, training=self.training)
# Add a fully connected output layer.
self.out_prob_logits = self._construct_dense_layer(prob_hidden_dropout, BOARD_WIDTH * BOARD_HEIGHT, 'prob_logits')
# The final output is a probability distribution and we use the softmax loss.
# So we need to apply softmax to the output.
self.out_prob = tf.nn.softmax(self.out_prob_logits)
with tf.variable_scope('Value-Head'):
n_filters = 3
# Reduce the big amount of convolutional filters to a reasonable size.
value_conv = self._construct_conv_layer(res8, n_filters, 'value_conv', kernel=[1, 1], stride=1)
value_conv_with_skip_input = tf.concat([self.one_hot_x_float, value_conv], 3)
# Flattern the output tensor to allow it as input to a fully connected layer.
flattered_value_conv = tf.reshape(value_conv_with_skip_input, [-1, (n_filters + N_RAW_VALUES) * BOARD_WIDTH * BOARD_HEIGHT])
# Add a fully connected hidden layer.
value_hidden = self._construct_dense_layer(flattered_value_conv, BOARD_WIDTH * BOARD_HEIGHT, 'value_hidden',
activation=tf.nn.elu)
value_hidden_dropout = tf.layers.dropout(value_hidden, training=self.training)
# Add a fully connected output layer.
value_scalar = self._construct_dense_layer(value_hidden_dropout, 1, 'value_output')
# Than will give us a value between -1 and 1 as we need it
self.out_value = tf.nn.tanh(value_scalar)
with tf.variable_scope('Final-Output'):
# Combine the output as this is needed to fulfill our internal raw data representation
self.out_combined = tf.concat([self.out_prob, self.out_value], axis=1)
with tf.variable_scope('Losses'):
# Value loss is measured in mean square error.
# Our values are in [-1, 1], so a MSE of 1 would mean that our network simply always outputs the
# mean of our values. Everything below 1 would be at least a little bit better than guessing.
self.value_loss = tf.losses.mean_squared_error(self.y_value, self.out_value)
# Probability loss is the loss of a probability distribution.
# We have a multilabel problem, where labels are mutually exclusive, but our labels are not
# one hot, but a target probability distribution.
# This suggests the softmax cross entropy as an error measure.
prob_losses = tf.nn.softmax_cross_entropy_with_logits(labels=self.y_prob, logits=self.out_prob_logits)
self.prob_loss = tf.reduce_mean(prob_losses)
# Lastly we add L2 regularization
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
self.reg_loss = tf.add_n(reg_losses)
# The summ of all three are our total loss
self.loss = tf.add_n([self.prob_loss, self.value_loss, self.reg_loss], name="loss")
with tf.variable_scope('Training'):
# Use a simpler optimizer to avoid issues because of it
optimizer = tf.train.MomentumOptimizer(0.002, 0.9)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.training_op = optimizer.minimize(self.loss)
with tf.variable_scope('Logging'):
self.saver = tf.train.Saver()
self.init = tf.global_variables_initializer()
# Log individual losses for debugging.
self.loss_summary = tf.summary.scalar('loss', self.loss)
self.value_loss_summary = tf.summary.scalar('value loss', self.value_loss)
self.prob_loss_summary = tf.summary.scalar('prob loss', self.prob_loss)
self.reg_loss_summary = tf.summary.scalar('reg loss', self.reg_loss)
self.conv1_kernel_summaries = []
for filter_number in range(self.n_conv_filetrs):
for image_number in range(N_RAW_VALUES):
image = tf.slice(self.conv1_kernel, [0, 0, image_number, filter_number], [3, 3, 1, 1])
transposed_image = tf.transpose(image, [3, 0, 1, 2])
image_summary = tf.summary.image('conv1-filter-{}-kernel-{}'.format(filter_number, image_number), transposed_image)
self.conv1_kernel_summaries.append(image_summary)
def _construct_inputs(self):
with tf.variable_scope("inputs"):
# Toggle Flag to enable/disable stuff during training
self.training = tf.placeholder_with_default(False, shape=(), name='training')
# Board will be one hot encoded.
self.one_hot_x = \
tf.placeholder(FLOAT, shape=(None, BOARD_HEIGHT, BOARD_WIDTH, N_RAW_VALUES), name='one_hot_x')
# Concat the expected outputs to one big array, as this is our raw input array
n_fields = BOARD_HEIGHT * BOARD_WIDTH
self.y_combined = tf.placeholder(FLOAT, shape=[None, n_fields + 1], name='y_combined')
# Outputs are the move probabilities for each field and a value estimation for player one.
# (Note: this is intended to only support two players)
self.y_prob = tf.slice(self.y_combined, [0, 0], [-1, n_fields])
self.y_value = tf.slice(self.y_combined, [0, n_fields], [-1, 1])
def _construct_conv_layer(self, input, n_filters, name, kernel=[3, 3], stride=1, normalization=True, activation=None):
"""Construct a convolutional layer with the given settings.
Kernel, stride and a optional normalization layer can be configured."""
with tf.variable_scope(name):
conv = tf.layers.conv2d(
inputs=input,
filters=n_filters,
kernel_size=kernel,
strides=[stride, stride],
padding="same",
activation=activation,
kernel_initializer=INITIALIZER,
kernel_regularizer=tf.contrib.layers.l2_regularizer(L2_LOSS_WEIGHT))
if not normalization:
return conv
return tf.layers.batch_normalization(conv, training=self.training)
def _construct_residual_block(self, input, n_filters, name, dense_skip_input):
with tf.variable_scope(name):
input_with_higway_skips = tf.concat([dense_skip_input, input], 3)
conv1 = self._construct_conv_layer(input_with_higway_skips, n_filters, 'conv1')
conv1_relu = tf.nn.elu(conv1)
conv2 = self._construct_conv_layer(conv1_relu, n_filters, 'conv2')
skip = tf.add(input, conv2, 'skip_connection')
return tf.nn.elu(skip)
def _construct_dense_layer(self, input, n_nodes, name, activation=None):
return tf.layers.dense(input, n_nodes, name=name, activation=activation,
kernel_regularizer=tf.contrib.layers.l2_regularizer(L2_LOSS_WEIGHT),
kernel_initializer=INITIALIZER)
def log_training_progress(self, sess, tf_file_writer, input_arrays, target_arrays, training_batch):
# Get all the losses
prob_loss, value_loss, reg_loss, loss =\
sess.run([self.prob_loss, self.value_loss, self.reg_loss, self.loss],
feed_dict={self.one_hot_x: input_arrays, self.y_combined: target_arrays})
reg_log_summary_str = self.reg_loss_summary.eval(feed_dict={self.reg_loss: reg_loss})
value_log_summary_str = self.value_loss_summary.eval(feed_dict={self.value_loss: value_loss})
prob_log_summary_str = self.prob_loss_summary.eval(feed_dict={self.prob_loss: prob_loss})
log_summary_str = self.loss_summary.eval(feed_dict={self.loss: loss})
tf_file_writer.add_summary(log_summary_str, training_batch)
tf_file_writer.add_summary(reg_log_summary_str, training_batch)
tf_file_writer.add_summary(value_log_summary_str, training_batch)
tf_file_writer.add_summary(prob_log_summary_str, training_batch)
for image_summary in self.conv1_kernel_summaries:
tf_file_writer.add_summary(image_summary.eval())
return loss
def load_weights(self, sess, filename):
self.saver.restore(sess, filename)
def train_batch(self, sess, input_arrays, target_arrays):
sess.run(self.training_op, feed_dict={self.one_hot_x: input_arrays, self.y_combined: target_arrays,
self.training: True})
def save_weights(self, sess, filename):
self.saver.save(sess, filename)
def init_network(self):
self.init.run()
def execute_batch(self, sess, input_arrays):
return sess.run(self.out_combined, feed_dict={self.one_hot_x: input_arrays})
| 52.453782
| 136
| 0.670138
|
27abc82848e0fe920a72f2faa1caa9d8e23c87f8
| 133,402
|
py
|
Python
|
astropy/wcs/wcs.py
|
emirkmo/astropy
|
d96cd45b25ae55117d1bcc9c40e83a82037fc815
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/wcs/wcs.py
|
emirkmo/astropy
|
d96cd45b25ae55117d1bcc9c40e83a82037fc815
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/wcs/wcs.py
|
emirkmo/astropy
|
d96cd45b25ae55117d1bcc9c40e83a82037fc815
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# Under the hood, there are 3 separate classes that perform different
# parts of the transformation:
#
# - `~astropy.wcs.Wcsprm`: Is a direct wrapper of the core WCS
# functionality in `wcslib`_. (This includes TPV and TPD
# polynomial distortion, but not SIP distortion).
#
# - `~astropy.wcs.Sip`: Handles polynomial distortion as defined in the
# `SIP`_ convention.
#
# - `~astropy.wcs.DistortionLookupTable`: Handles `distortion paper`_
# lookup tables.
#
# Additionally, the class `WCS` aggregates all of these transformations
# together in a pipeline:
#
# - Detector to image plane correction (by a pair of
# `~astropy.wcs.DistortionLookupTable` objects).
#
# - `SIP`_ distortion correction (by an underlying `~astropy.wcs.Sip`
# object)
#
# - `distortion paper`_ table-lookup correction (by a pair of
# `~astropy.wcs.DistortionLookupTable` objects).
#
# - `wcslib`_ WCS transformation (by a `~astropy.wcs.Wcsprm` object)
# STDLIB
import copy
import uuid
import io
import itertools
import os
import re
import textwrap
import warnings
import builtins
# THIRD-PARTY
import numpy as np
# LOCAL
from astropy import log
from astropy.io import fits
from . import docstrings
from . import _wcs
from astropy import units as u
from astropy.utils.compat import possible_filename
from astropy.utils.exceptions import AstropyWarning, AstropyUserWarning, AstropyDeprecationWarning
from astropy.utils.decorators import deprecated_renamed_argument
# Mix-in class that provides the APE 14 API
from .wcsapi.fitswcs import FITSWCSAPIMixin, SlicedFITSWCS
__all__ = ['FITSFixedWarning', 'WCS', 'find_all_wcs',
'DistortionLookupTable', 'Sip', 'Tabprm', 'Wcsprm', 'Auxprm',
'Celprm', 'Prjprm', 'Wtbarr', 'WCSBase', 'validate', 'WcsError',
'SingularMatrixError', 'InconsistentAxisTypesError',
'InvalidTransformError', 'InvalidCoordinateError',
'InvalidPrjParametersError', 'NoSolutionError',
'InvalidSubimageSpecificationError', 'NoConvergence',
'NonseparableSubimageCoordinateSystemError',
'NoWcsKeywordsFoundError', 'InvalidTabularParametersError']
__doctest_skip__ = ['WCS.all_world2pix']
if _wcs is not None:
_parsed_version = _wcs.__version__.split('.')
if int(_parsed_version[0]) == 5 and int(_parsed_version[1]) < 8:
raise ImportError(
"astropy.wcs is built with wcslib {0}, but only versions 5.8 and "
"later on the 5.x series are known to work. The version of wcslib "
"that ships with astropy may be used.")
if not _wcs._sanity_check():
raise RuntimeError(
"astropy.wcs did not pass its sanity check for your build "
"on your platform.")
WCSBase = _wcs._Wcs
DistortionLookupTable = _wcs.DistortionLookupTable
Sip = _wcs.Sip
Wcsprm = _wcs.Wcsprm
Auxprm = _wcs.Auxprm
Celprm = _wcs.Celprm
Prjprm = _wcs.Prjprm
Tabprm = _wcs.Tabprm
Wtbarr = _wcs.Wtbarr
WcsError = _wcs.WcsError
SingularMatrixError = _wcs.SingularMatrixError
InconsistentAxisTypesError = _wcs.InconsistentAxisTypesError
InvalidTransformError = _wcs.InvalidTransformError
InvalidCoordinateError = _wcs.InvalidCoordinateError
NoSolutionError = _wcs.NoSolutionError
InvalidSubimageSpecificationError = _wcs.InvalidSubimageSpecificationError
NonseparableSubimageCoordinateSystemError = _wcs.NonseparableSubimageCoordinateSystemError
NoWcsKeywordsFoundError = _wcs.NoWcsKeywordsFoundError
InvalidTabularParametersError = _wcs.InvalidTabularParametersError
InvalidPrjParametersError = _wcs.InvalidPrjParametersError
# Copy all the constants from the C extension into this module's namespace
for key, val in _wcs.__dict__.items():
if key.startswith(('WCSSUB_', 'WCSHDR_', 'WCSHDO_', 'WCSCOMPARE_', 'PRJ_')):
locals()[key] = val
__all__.append(key)
# Set coordinate extraction callback for WCS -TAB:
def _load_tab_bintable(hdulist, extnam, extver, extlev, kind, ttype, row, ndim):
arr = hdulist[(extnam, extver)].data[ttype][row - 1]
if arr.ndim != ndim:
if kind == 'c' and ndim == 2:
arr = arr.reshape((arr.size, 1))
else:
raise ValueError("Bad TDIM")
return np.ascontiguousarray(arr, dtype=np.double)
_wcs.set_wtbarr_fitsio_callback(_load_tab_bintable)
else:
WCSBase = object
Wcsprm = object
DistortionLookupTable = object
Sip = object
Tabprm = object
Wtbarr = object
WcsError = None
SingularMatrixError = None
InconsistentAxisTypesError = None
InvalidTransformError = None
InvalidCoordinateError = None
NoSolutionError = None
InvalidSubimageSpecificationError = None
NonseparableSubimageCoordinateSystemError = None
NoWcsKeywordsFoundError = None
InvalidTabularParametersError = None
# Additional relax bit flags
WCSHDO_SIP = 0x80000
# Regular expression defining SIP keyword It matches keyword that starts with A
# or B, optionally followed by P, followed by an underscore then a number in
# range of 0-19, followed by an underscore and another number in range of 0-19.
# Keyword optionally ends with a capital letter.
SIP_KW = re.compile('''^[AB]P?_1?[0-9]_1?[0-9][A-Z]?$''')
def _parse_keysel(keysel):
keysel_flags = 0
if keysel is not None:
for element in keysel:
if element.lower() == 'image':
keysel_flags |= _wcs.WCSHDR_IMGHEAD
elif element.lower() == 'binary':
keysel_flags |= _wcs.WCSHDR_BIMGARR
elif element.lower() == 'pixel':
keysel_flags |= _wcs.WCSHDR_PIXLIST
else:
raise ValueError(
"keysel must be a list of 'image', 'binary' " +
"and/or 'pixel'")
else:
keysel_flags = -1
return keysel_flags
class NoConvergence(Exception):
"""
An error class used to report non-convergence and/or divergence
of numerical methods. It is used to report errors in the
iterative solution used by
the :py:meth:`~astropy.wcs.WCS.all_world2pix`.
Attributes
----------
best_solution : `numpy.ndarray`
Best solution achieved by the numerical method.
accuracy : `numpy.ndarray`
Accuracy of the ``best_solution``.
niter : `int`
Number of iterations performed by the numerical method
to compute ``best_solution``.
divergent : None, `numpy.ndarray`
Indices of the points in ``best_solution`` array
for which the solution appears to be divergent. If the
solution does not diverge, ``divergent`` will be set to `None`.
slow_conv : None, `numpy.ndarray`
Indices of the solutions in ``best_solution`` array
for which the solution failed to converge within the
specified maximum number of iterations. If there are no
non-converging solutions (i.e., if the required accuracy
has been achieved for all input data points)
then ``slow_conv`` will be set to `None`.
"""
def __init__(self, *args, best_solution=None, accuracy=None, niter=None,
divergent=None, slow_conv=None, **kwargs):
super().__init__(*args)
self.best_solution = best_solution
self.accuracy = accuracy
self.niter = niter
self.divergent = divergent
self.slow_conv = slow_conv
if kwargs:
warnings.warn("Function received unexpected arguments ({}) these "
"are ignored but will raise an Exception in the "
"future.".format(list(kwargs)),
AstropyDeprecationWarning)
class FITSFixedWarning(AstropyWarning):
"""
The warning raised when the contents of the FITS header have been
modified to be standards compliant.
"""
pass
class WCS(FITSWCSAPIMixin, WCSBase):
"""WCS objects perform standard WCS transformations, and correct for
`SIP`_ and `distortion paper`_ table-lookup transformations, based
on the WCS keywords and supplementary data read from a FITS file.
See also: https://docs.astropy.org/en/stable/wcs/
Parameters
----------
header : `~astropy.io.fits.Header`, `~astropy.io.fits.hdu.image.PrimaryHDU`, `~astropy.io.fits.hdu.image.ImageHDU`, str, dict-like, or None, optional
If *header* is not provided or None, the object will be
initialized to default values.
fobj : `~astropy.io.fits.HDUList`, optional
It is needed when header keywords point to a `distortion
paper`_ lookup table stored in a different extension.
key : str, optional
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the
``\"a\"`` part of the ``CTYPEia`` cards. *key* may only be
provided if *header* is also provided.
minerr : float, optional
The minimum value a distortion correction must have in order
to be applied. If the value of ``CQERRja`` is smaller than
*minerr*, the corresponding distortion is not applied.
relax : bool or int, optional
Degree of permissiveness:
- `True` (default): Admit all recognized informal extensions
of the WCS standard.
- `False`: Recognize only FITS keywords defined by the
published WCS standard.
- `int`: a bit field selecting specific extensions to accept.
See :ref:`astropy:relaxread` for details.
naxis : int or sequence, optional
Extracts specific coordinate axes using
:meth:`~astropy.wcs.Wcsprm.sub`. If a header is provided, and
*naxis* is not ``None``, *naxis* will be passed to
:meth:`~astropy.wcs.Wcsprm.sub` in order to select specific
axes from the header. See :meth:`~astropy.wcs.Wcsprm.sub` for
more details about this parameter.
keysel : sequence of str, optional
A sequence of flags used to select the keyword types
considered by wcslib. When ``None``, only the standard image
header keywords are considered (and the underlying wcspih() C
function is called). To use binary table image array or pixel
list keywords, *keysel* must be set.
Each element in the list should be one of the following
strings:
- 'image': Image header keywords
- 'binary': Binary table image array keywords
- 'pixel': Pixel list keywords
Keywords such as ``EQUIna`` or ``RFRQna`` that are common to
binary table image arrays and pixel lists (including
``WCSNna`` and ``TWCSna``) are selected by both 'binary' and
'pixel'.
colsel : sequence of int, optional
A sequence of table column numbers used to restrict the WCS
transformations considered to only those pertaining to the
specified columns. If `None`, there is no restriction.
fix : bool, optional
When `True` (default), call `~astropy.wcs.Wcsprm.fix` on
the resulting object to fix any non-standard uses in the
header. `FITSFixedWarning` Warnings will be emitted if any
changes were made.
translate_units : str, optional
Specify which potentially unsafe translations of non-standard
unit strings to perform. By default, performs none. See
`WCS.fix` for more information about this parameter. Only
effective when ``fix`` is `True`.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid key.
KeyError
Key not found in FITS header.
ValueError
Lookup table distortion present in the header but *fobj* was
not provided.
Notes
-----
1. astropy.wcs supports arbitrary *n* dimensions for the core WCS
(the transformations handled by WCSLIB). However, the
`distortion paper`_ lookup table and `SIP`_ distortions must be
two dimensional. Therefore, if you try to create a WCS object
where the core WCS has a different number of dimensions than 2
and that object also contains a `distortion paper`_ lookup
table or `SIP`_ distortion, a `ValueError`
exception will be raised. To avoid this, consider using the
*naxis* kwarg to select two dimensions from the core WCS.
2. The number of coordinate axes in the transformation is not
determined directly from the ``NAXIS`` keyword but instead from
the highest of:
- ``NAXIS`` keyword
- ``WCSAXESa`` keyword
- The highest axis number in any parameterized WCS keyword.
The keyvalue, as well as the keyword, must be
syntactically valid otherwise it will not be considered.
If none of these keyword types is present, i.e. if the header
only contains auxiliary WCS keywords for a particular
coordinate representation, then no coordinate description is
constructed for it.
The number of axes, which is set as the ``naxis`` member, may
differ for different coordinate representations of the same
image.
3. When the header includes duplicate keywords, in most cases the
last encountered is used.
4. `~astropy.wcs.Wcsprm.set` is called immediately after
construction, so any invalid keywords or transformations will
be raised by the constructor, not when subsequently calling a
transformation method.
""" # noqa: E501
def __init__(self, header=None, fobj=None, key=' ', minerr=0.0,
relax=True, naxis=None, keysel=None, colsel=None,
fix=True, translate_units='', _do_set=True):
close_fds = []
# these parameters are stored to be used when unpickling a WCS object:
self._init_kwargs = {
'keysel': copy.copy(keysel),
'colsel': copy.copy(colsel),
}
if header is None:
if naxis is None:
naxis = 2
wcsprm = _wcs.Wcsprm(header=None, key=key,
relax=relax, naxis=naxis)
self.naxis = wcsprm.naxis
# Set some reasonable defaults.
det2im = (None, None)
cpdis = (None, None)
sip = None
else:
keysel_flags = _parse_keysel(keysel)
if isinstance(header, (str, bytes)):
try:
is_path = (possible_filename(header) and
os.path.exists(header))
except (OSError, ValueError):
is_path = False
if is_path:
if fobj is not None:
raise ValueError(
"Can not provide both a FITS filename to "
"argument 1 and a FITS file object to argument 2")
fobj = fits.open(header)
close_fds.append(fobj)
header = fobj[0].header
elif isinstance(header, fits.hdu.image._ImageBaseHDU):
header = header.header
elif not isinstance(header, fits.Header):
try:
# Accept any dict-like object
orig_header = header
header = fits.Header()
for dict_key in orig_header.keys():
header[dict_key] = orig_header[dict_key]
except TypeError:
raise TypeError(
"header must be a string, an astropy.io.fits.Header "
"object, or a dict-like object")
if isinstance(header, fits.Header):
header_string = header.tostring().rstrip()
else:
header_string = header
# Importantly, header is a *copy* of the passed-in header
# because we will be modifying it
if isinstance(header_string, str):
header_bytes = header_string.encode('ascii')
header_string = header_string
else:
header_bytes = header_string
header_string = header_string.decode('ascii')
if not (fobj is None or isinstance(fobj, fits.HDUList)):
raise AssertionError("'fobj' must be either None or an "
"astropy.io.fits.HDUList object.")
est_naxis = 2
try:
tmp_header = fits.Header.fromstring(header_string)
self._remove_sip_kw(tmp_header)
tmp_header_bytes = tmp_header.tostring().rstrip()
if isinstance(tmp_header_bytes, str):
tmp_header_bytes = tmp_header_bytes.encode('ascii')
tmp_wcsprm = _wcs.Wcsprm(header=tmp_header_bytes, key=key,
relax=relax, keysel=keysel_flags,
colsel=colsel, warnings=False,
hdulist=fobj)
if naxis is not None:
try:
tmp_wcsprm = tmp_wcsprm.sub(naxis)
except ValueError:
pass
est_naxis = tmp_wcsprm.naxis if tmp_wcsprm.naxis else 2
except _wcs.NoWcsKeywordsFoundError:
pass
self.naxis = est_naxis
header = fits.Header.fromstring(header_string)
det2im = self._read_det2im_kw(header, fobj, err=minerr)
cpdis = self._read_distortion_kw(
header, fobj, dist='CPDIS', err=minerr)
sip = self._read_sip_kw(header, wcskey=key)
self._remove_sip_kw(header)
header_string = header.tostring()
header_string = header_string.replace('END' + ' ' * 77, '')
if isinstance(header_string, str):
header_bytes = header_string.encode('ascii')
header_string = header_string
else:
header_bytes = header_string
header_string = header_string.decode('ascii')
try:
wcsprm = _wcs.Wcsprm(header=header_bytes, key=key,
relax=relax, keysel=keysel_flags,
colsel=colsel, hdulist=fobj)
except _wcs.NoWcsKeywordsFoundError:
# The header may have SIP or distortions, but no core
# WCS. That isn't an error -- we want a "default"
# (identity) core Wcs transformation in that case.
if colsel is None:
wcsprm = _wcs.Wcsprm(header=None, key=key,
relax=relax, keysel=keysel_flags,
colsel=colsel, hdulist=fobj)
else:
raise
if naxis is not None:
wcsprm = wcsprm.sub(naxis)
self.naxis = wcsprm.naxis
if (wcsprm.naxis != 2 and
(det2im[0] or det2im[1] or cpdis[0] or cpdis[1] or sip)):
raise ValueError(
"""
FITS WCS distortion paper lookup tables and SIP distortions only work
in 2 dimensions. However, WCSLIB has detected {} dimensions in the
core WCS keywords. To use core WCS in conjunction with FITS WCS
distortion paper lookup tables or SIP distortion, you must select or
reduce these to 2 dimensions using the naxis kwarg.
""".format(wcsprm.naxis))
header_naxis = header.get('NAXIS', None)
if header_naxis is not None and header_naxis < wcsprm.naxis:
warnings.warn(
"The WCS transformation has more axes ({:d}) than the "
"image it is associated with ({:d})".format(
wcsprm.naxis, header_naxis), FITSFixedWarning)
self._get_naxis(header)
WCSBase.__init__(self, sip, cpdis, wcsprm, det2im)
if fix:
if header is None:
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
self.fix(translate_units=translate_units)
else:
self.fix(translate_units=translate_units)
if _do_set:
self.wcs.set()
for fd in close_fds:
fd.close()
self._pixel_bounds = None
def __copy__(self):
new_copy = self.__class__()
WCSBase.__init__(new_copy, self.sip,
(self.cpdis1, self.cpdis2),
self.wcs,
(self.det2im1, self.det2im2))
new_copy.__dict__.update(self.__dict__)
return new_copy
def __deepcopy__(self, memo):
from copy import deepcopy
new_copy = self.__class__()
new_copy.naxis = deepcopy(self.naxis, memo)
WCSBase.__init__(new_copy, deepcopy(self.sip, memo),
(deepcopy(self.cpdis1, memo),
deepcopy(self.cpdis2, memo)),
deepcopy(self.wcs, memo),
(deepcopy(self.det2im1, memo),
deepcopy(self.det2im2, memo)))
for key, val in self.__dict__.items():
new_copy.__dict__[key] = deepcopy(val, memo)
return new_copy
def copy(self):
"""
Return a shallow copy of the object.
Convenience method so user doesn't have to import the
:mod:`copy` stdlib module.
.. warning::
Use `deepcopy` instead of `copy` unless you know why you need a
shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Return a deep copy of the object.
Convenience method so user doesn't have to import the
:mod:`copy` stdlib module.
"""
return copy.deepcopy(self)
def sub(self, axes=None):
copy = self.deepcopy()
# We need to know which axes have been dropped, but there is no easy
# way to do this with the .sub function, so instead we assign UUIDs to
# the CNAME parameters in copy.wcs. We can later access the original
# CNAME properties from self.wcs.
cname_uuid = [str(uuid.uuid4()) for i in range(copy.wcs.naxis)]
copy.wcs.cname = cname_uuid
# Subset the WCS
copy.wcs = copy.wcs.sub(axes)
copy.naxis = copy.wcs.naxis
# Construct a list of dimensions from the original WCS in the order
# in which they appear in the final WCS.
keep = [cname_uuid.index(cname) if cname in cname_uuid else None
for cname in copy.wcs.cname]
# Restore the original CNAMEs
copy.wcs.cname = ['' if i is None else self.wcs.cname[i] for i in keep]
# Subset pixel_shape and pixel_bounds
if self.pixel_shape:
copy.pixel_shape = tuple([None if i is None else self.pixel_shape[i] for i in keep])
if self.pixel_bounds:
copy.pixel_bounds = [None if i is None else self.pixel_bounds[i] for i in keep]
return copy
if _wcs is not None:
sub.__doc__ = _wcs.Wcsprm.sub.__doc__
def _fix_scamp(self):
"""
Remove SCAMP's PVi_m distortion parameters if SIP distortion parameters
are also present. Some projects (e.g., Palomar Transient Factory)
convert SCAMP's distortion parameters (which abuse the PVi_m cards) to
SIP. However, wcslib gets confused by the presence of both SCAMP and
SIP distortion parameters.
See https://github.com/astropy/astropy/issues/299.
"""
# Nothing to be done if no WCS attached
if self.wcs is None:
return
# Nothing to be done if no PV parameters attached
pv = self.wcs.get_pv()
if not pv:
return
# Nothing to be done if axes don't use SIP distortion parameters
if self.sip is None:
return
# Nothing to be done if any radial terms are present...
# Loop over list to find any radial terms.
# Certain values of the `j' index are used for storing
# radial terms; refer to Equation (1) in
# <http://web.ipac.caltech.edu/staff/shupe/reprints/SIP_to_PV_SPIE2012.pdf>.
pv = np.asarray(pv)
# Loop over distinct values of `i' index
for i in set(pv[:, 0]):
# Get all values of `j' index for this value of `i' index
js = set(pv[:, 1][pv[:, 0] == i])
# Find max value of `j' index
max_j = max(js)
for j in (3, 11, 23, 39):
if j < max_j and j in js:
return
self.wcs.set_pv([])
warnings.warn("Removed redundant SCAMP distortion parameters " +
"because SIP parameters are also present", FITSFixedWarning)
def fix(self, translate_units='', naxis=None):
"""
Perform the fix operations from wcslib, and warn about any
changes it has made.
Parameters
----------
translate_units : str, optional
Specify which potentially unsafe translations of
non-standard unit strings to perform. By default,
performs none.
Although ``"S"`` is commonly used to represent seconds,
its translation to ``"s"`` is potentially unsafe since the
standard recognizes ``"S"`` formally as Siemens, however
rarely that may be used. The same applies to ``"H"`` for
hours (Henry), and ``"D"`` for days (Debye).
This string controls what to do in such cases, and is
case-insensitive.
- If the string contains ``"s"``, translate ``"S"`` to
``"s"``.
- If the string contains ``"h"``, translate ``"H"`` to
``"h"``.
- If the string contains ``"d"``, translate ``"D"`` to
``"d"``.
Thus ``''`` doesn't do any unsafe translations, whereas
``'shd'`` does all of them.
naxis : int array, optional
Image axis lengths. If this array is set to zero or
``None``, then `~astropy.wcs.Wcsprm.cylfix` will not be
invoked.
"""
if self.wcs is not None:
self._fix_scamp()
fixes = self.wcs.fix(translate_units, naxis)
for key, val in fixes.items():
if val != "No change":
if (key == 'datfix' and '1858-11-17' in val and
not np.count_nonzero(self.wcs.mjdref)):
continue
warnings.warn(
("'{0}' made the change '{1}'.").
format(key, val),
FITSFixedWarning)
def calc_footprint(self, header=None, undistort=True, axes=None, center=True):
"""
Calculates the footprint of the image on the sky.
A footprint is defined as the positions of the corners of the
image on the sky after all available distortions have been
applied.
Parameters
----------
header : `~astropy.io.fits.Header` object, optional
Used to get ``NAXIS1`` and ``NAXIS2``
header and axes are mutually exclusive, alternative ways
to provide the same information.
undistort : bool, optional
If `True`, take SIP and distortion lookup table into
account
axes : (int, int), optional
If provided, use the given sequence as the shape of the
image. Otherwise, use the ``NAXIS1`` and ``NAXIS2``
keywords from the header that was used to create this
`WCS` object.
center : bool, optional
If `True` use the center of the pixel, otherwise use the corner.
Returns
-------
coord : (4, 2) array of (*x*, *y*) coordinates.
The order is clockwise starting with the bottom left corner.
"""
if axes is not None:
naxis1, naxis2 = axes
else:
if header is None:
try:
# classes that inherit from WCS and define naxis1/2
# do not require a header parameter
naxis1, naxis2 = self.pixel_shape
except (AttributeError, TypeError):
warnings.warn(
"Need a valid header in order to calculate footprint\n", AstropyUserWarning)
return None
else:
naxis1 = header.get('NAXIS1', None)
naxis2 = header.get('NAXIS2', None)
if naxis1 is None or naxis2 is None:
raise ValueError(
"Image size could not be determined.")
if center:
corners = np.array([[1, 1],
[1, naxis2],
[naxis1, naxis2],
[naxis1, 1]], dtype=np.float64)
else:
corners = np.array([[0.5, 0.5],
[0.5, naxis2 + 0.5],
[naxis1 + 0.5, naxis2 + 0.5],
[naxis1 + 0.5, 0.5]], dtype=np.float64)
if undistort:
return self.all_pix2world(corners, 1)
else:
return self.wcs_pix2world(corners, 1)
def _read_det2im_kw(self, header, fobj, err=0.0):
"""
Create a `distortion paper`_ type lookup table for detector to
image plane correction.
"""
if fobj is None:
return (None, None)
if not isinstance(fobj, fits.HDUList):
return (None, None)
try:
axiscorr = header['AXISCORR']
d2imdis = self._read_d2im_old_format(header, fobj, axiscorr)
return d2imdis
except KeyError:
pass
dist = 'D2IMDIS'
d_kw = 'D2IM'
err_kw = 'D2IMERR'
tables = {}
for i in range(1, self.naxis + 1):
d_error = header.get(err_kw + str(i), 0.0)
if d_error < err:
tables[i] = None
continue
distortion = dist + str(i)
if distortion in header:
dis = header[distortion].lower()
if dis == 'lookup':
del header[distortion]
assert isinstance(fobj, fits.HDUList), (
'An astropy.io.fits.HDUList'
'is required for Lookup table distortion.')
dp = (d_kw + str(i)).strip()
dp_extver_key = dp + '.EXTVER'
if dp_extver_key in header:
d_extver = header[dp_extver_key]
del header[dp_extver_key]
else:
d_extver = 1
dp_axis_key = dp + f'.AXIS.{i:d}'
if i == header[dp_axis_key]:
d_data = fobj['D2IMARR', d_extver].data
else:
d_data = (fobj['D2IMARR', d_extver].data).transpose()
del header[dp_axis_key]
d_header = fobj['D2IMARR', d_extver].header
d_crpix = (d_header.get('CRPIX1', 0.0), d_header.get('CRPIX2', 0.0))
d_crval = (d_header.get('CRVAL1', 0.0), d_header.get('CRVAL2', 0.0))
d_cdelt = (d_header.get('CDELT1', 1.0), d_header.get('CDELT2', 1.0))
d_lookup = DistortionLookupTable(d_data, d_crpix,
d_crval, d_cdelt)
tables[i] = d_lookup
else:
warnings.warn('Polynomial distortion is not implemented.\n', AstropyUserWarning)
for key in set(header):
if key.startswith(dp + '.'):
del header[key]
else:
tables[i] = None
if not tables:
return (None, None)
else:
return (tables.get(1), tables.get(2))
def _read_d2im_old_format(self, header, fobj, axiscorr):
warnings.warn(
"The use of ``AXISCORR`` for D2IM correction has been deprecated."
"`~astropy.wcs` will read in files with ``AXISCORR`` but ``to_fits()`` will write "
"out files without it.",
AstropyDeprecationWarning)
cpdis = [None, None]
crpix = [0., 0.]
crval = [0., 0.]
cdelt = [1., 1.]
try:
d2im_data = fobj[('D2IMARR', 1)].data
except KeyError:
return (None, None)
except AttributeError:
return (None, None)
d2im_data = np.array([d2im_data])
d2im_hdr = fobj[('D2IMARR', 1)].header
naxis = d2im_hdr['NAXIS']
for i in range(1, naxis + 1):
crpix[i - 1] = d2im_hdr.get('CRPIX' + str(i), 0.0)
crval[i - 1] = d2im_hdr.get('CRVAL' + str(i), 0.0)
cdelt[i - 1] = d2im_hdr.get('CDELT' + str(i), 1.0)
cpdis = DistortionLookupTable(d2im_data, crpix, crval, cdelt)
if axiscorr == 1:
return (cpdis, None)
elif axiscorr == 2:
return (None, cpdis)
else:
warnings.warn("Expected AXISCORR to be 1 or 2", AstropyUserWarning)
return (None, None)
def _write_det2im(self, hdulist):
"""
Writes a `distortion paper`_ type lookup table to the given
`~astropy.io.fits.HDUList`.
"""
if self.det2im1 is None and self.det2im2 is None:
return
dist = 'D2IMDIS'
d_kw = 'D2IM'
def write_d2i(num, det2im):
if det2im is None:
return
hdulist[0].header[f'{dist}{num:d}'] = (
'LOOKUP', 'Detector to image correction type')
hdulist[0].header[f'{d_kw}{num:d}.EXTVER'] = (
num, 'Version number of WCSDVARR extension')
hdulist[0].header[f'{d_kw}{num:d}.NAXES'] = (
len(det2im.data.shape), 'Number of independent variables in D2IM function')
for i in range(det2im.data.ndim):
jth = {1: '1st', 2: '2nd', 3: '3rd'}.get(i + 1, f'{i + 1}th')
hdulist[0].header[f'{d_kw}{num:d}.AXIS.{i + 1:d}'] = (
i + 1, f'Axis number of the {jth} variable in a D2IM function')
image = fits.ImageHDU(det2im.data, name='D2IMARR')
header = image.header
header['CRPIX1'] = (det2im.crpix[0],
'Coordinate system reference pixel')
header['CRPIX2'] = (det2im.crpix[1],
'Coordinate system reference pixel')
header['CRVAL1'] = (det2im.crval[0],
'Coordinate system value at reference pixel')
header['CRVAL2'] = (det2im.crval[1],
'Coordinate system value at reference pixel')
header['CDELT1'] = (det2im.cdelt[0],
'Coordinate increment along axis')
header['CDELT2'] = (det2im.cdelt[1],
'Coordinate increment along axis')
image.ver = int(hdulist[0].header[f'{d_kw}{num:d}.EXTVER'])
hdulist.append(image)
write_d2i(1, self.det2im1)
write_d2i(2, self.det2im2)
def _read_distortion_kw(self, header, fobj, dist='CPDIS', err=0.0):
"""
Reads `distortion paper`_ table-lookup keywords and data, and
returns a 2-tuple of `~astropy.wcs.DistortionLookupTable`
objects.
If no `distortion paper`_ keywords are found, ``(None, None)``
is returned.
"""
if isinstance(header, (str, bytes)):
return (None, None)
if dist == 'CPDIS':
d_kw = 'DP'
err_kw = 'CPERR'
else:
d_kw = 'DQ'
err_kw = 'CQERR'
tables = {}
for i in range(1, self.naxis + 1):
d_error_key = err_kw + str(i)
if d_error_key in header:
d_error = header[d_error_key]
del header[d_error_key]
else:
d_error = 0.0
if d_error < err:
tables[i] = None
continue
distortion = dist + str(i)
if distortion in header:
dis = header[distortion].lower()
del header[distortion]
if dis == 'lookup':
if not isinstance(fobj, fits.HDUList):
raise ValueError('an astropy.io.fits.HDUList is '
'required for Lookup table distortion.')
dp = (d_kw + str(i)).strip()
dp_extver_key = dp + '.EXTVER'
if dp_extver_key in header:
d_extver = header[dp_extver_key]
del header[dp_extver_key]
else:
d_extver = 1
dp_axis_key = dp + f'.AXIS.{i:d}'
if i == header[dp_axis_key]:
d_data = fobj['WCSDVARR', d_extver].data
else:
d_data = (fobj['WCSDVARR', d_extver].data).transpose()
del header[dp_axis_key]
d_header = fobj['WCSDVARR', d_extver].header
d_crpix = (d_header.get('CRPIX1', 0.0),
d_header.get('CRPIX2', 0.0))
d_crval = (d_header.get('CRVAL1', 0.0),
d_header.get('CRVAL2', 0.0))
d_cdelt = (d_header.get('CDELT1', 1.0),
d_header.get('CDELT2', 1.0))
d_lookup = DistortionLookupTable(d_data, d_crpix, d_crval, d_cdelt)
tables[i] = d_lookup
for key in set(header):
if key.startswith(dp + '.'):
del header[key]
else:
warnings.warn('Polynomial distortion is not implemented.\n', AstropyUserWarning)
else:
tables[i] = None
if not tables:
return (None, None)
else:
return (tables.get(1), tables.get(2))
def _write_distortion_kw(self, hdulist, dist='CPDIS'):
"""
Write out `distortion paper`_ keywords to the given
`~astropy.io.fits.HDUList`.
"""
if self.cpdis1 is None and self.cpdis2 is None:
return
if dist == 'CPDIS':
d_kw = 'DP'
else:
d_kw = 'DQ'
def write_dist(num, cpdis):
if cpdis is None:
return
hdulist[0].header[f'{dist}{num:d}'] = (
'LOOKUP', 'Prior distortion function type')
hdulist[0].header[f'{d_kw}{num:d}.EXTVER'] = (
num, 'Version number of WCSDVARR extension')
hdulist[0].header[f'{d_kw}{num:d}.NAXES'] = (
len(cpdis.data.shape), f'Number of independent variables in {dist} function')
for i in range(cpdis.data.ndim):
jth = {1: '1st', 2: '2nd', 3: '3rd'}.get(i + 1, f'{i + 1}th')
hdulist[0].header[f'{d_kw}{num:d}.AXIS.{i + 1:d}'] = (
i + 1,
f'Axis number of the {jth} variable in a {dist} function')
image = fits.ImageHDU(cpdis.data, name='WCSDVARR')
header = image.header
header['CRPIX1'] = (cpdis.crpix[0], 'Coordinate system reference pixel')
header['CRPIX2'] = (cpdis.crpix[1], 'Coordinate system reference pixel')
header['CRVAL1'] = (cpdis.crval[0], 'Coordinate system value at reference pixel')
header['CRVAL2'] = (cpdis.crval[1], 'Coordinate system value at reference pixel')
header['CDELT1'] = (cpdis.cdelt[0], 'Coordinate increment along axis')
header['CDELT2'] = (cpdis.cdelt[1], 'Coordinate increment along axis')
image.ver = int(hdulist[0].header[f'{d_kw}{num:d}.EXTVER'])
hdulist.append(image)
write_dist(1, self.cpdis1)
write_dist(2, self.cpdis2)
def _remove_sip_kw(self, header):
"""
Remove SIP information from a header.
"""
# Never pass SIP coefficients to wcslib
# CTYPE must be passed with -SIP to wcslib
for key in set(m.group() for m in map(SIP_KW.match, list(header))
if m is not None):
del header[key]
def _read_sip_kw(self, header, wcskey=""):
"""
Reads `SIP`_ header keywords and returns a `~astropy.wcs.Sip`
object.
If no `SIP`_ header keywords are found, ``None`` is returned.
"""
if isinstance(header, (str, bytes)):
# TODO: Parse SIP from a string without pyfits around
return None
if "A_ORDER" in header and header['A_ORDER'] > 1:
if "B_ORDER" not in header:
raise ValueError(
"A_ORDER provided without corresponding B_ORDER "
"keyword for SIP distortion")
m = int(header["A_ORDER"])
a = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"A_{i}_{j}"
if key in header:
a[i, j] = header[key]
del header[key]
m = int(header["B_ORDER"])
if m > 1:
b = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"B_{i}_{j}"
if key in header:
b[i, j] = header[key]
del header[key]
else:
a = None
b = None
del header['A_ORDER']
del header['B_ORDER']
ctype = [header[f'CTYPE{nax}{wcskey}'] for nax in range(1, self.naxis + 1)]
if any(not ctyp.endswith('-SIP') for ctyp in ctype):
message = """
Inconsistent SIP distortion information is present in the FITS header and the WCS object:
SIP coefficients were detected, but CTYPE is missing a "-SIP" suffix.
astropy.wcs is using the SIP distortion coefficients,
therefore the coordinates calculated here might be incorrect.
If you do not want to apply the SIP distortion coefficients,
please remove the SIP coefficients from the FITS header or the
WCS object. As an example, if the image is already distortion-corrected
(e.g., drizzled) then distortion components should not apply and the SIP
coefficients should be removed.
While the SIP distortion coefficients are being applied here, if that was indeed the intent,
for consistency please append "-SIP" to the CTYPE in the FITS header or the WCS object.
""" # noqa: E501
log.info(message)
elif "B_ORDER" in header and header['B_ORDER'] > 1:
raise ValueError(
"B_ORDER provided without corresponding A_ORDER " +
"keyword for SIP distortion")
else:
a = None
b = None
if "AP_ORDER" in header and header['AP_ORDER'] > 1:
if "BP_ORDER" not in header:
raise ValueError(
"AP_ORDER provided without corresponding BP_ORDER "
"keyword for SIP distortion")
m = int(header["AP_ORDER"])
ap = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"AP_{i}_{j}"
if key in header:
ap[i, j] = header[key]
del header[key]
m = int(header["BP_ORDER"])
if m > 1:
bp = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"BP_{i}_{j}"
if key in header:
bp[i, j] = header[key]
del header[key]
else:
ap = None
bp = None
del header['AP_ORDER']
del header['BP_ORDER']
elif "BP_ORDER" in header and header['BP_ORDER'] > 1:
raise ValueError(
"BP_ORDER provided without corresponding AP_ORDER "
"keyword for SIP distortion")
else:
ap = None
bp = None
if a is None and b is None and ap is None and bp is None:
return None
if f"CRPIX1{wcskey}" not in header or f"CRPIX2{wcskey}" not in header:
raise ValueError(
"Header has SIP keywords without CRPIX keywords")
crpix1 = header.get(f"CRPIX1{wcskey}")
crpix2 = header.get(f"CRPIX2{wcskey}")
return Sip(a, b, ap, bp, (crpix1, crpix2))
def _write_sip_kw(self):
"""
Write out SIP keywords. Returns a dictionary of key-value
pairs.
"""
if self.sip is None:
return {}
keywords = {}
def write_array(name, a):
if a is None:
return
size = a.shape[0]
trdir = 'sky to detector' if name[-1] == 'P' else 'detector to sky'
comment = ('SIP polynomial order, axis {:d}, {:s}'
.format(ord(name[0]) - ord('A'), trdir))
keywords[f'{name}_ORDER'] = size - 1, comment
comment = 'SIP distortion coefficient'
for i in range(size):
for j in range(size - i):
if a[i, j] != 0.0:
keywords[
f'{name}_{i:d}_{j:d}'] = a[i, j], comment
write_array('A', self.sip.a)
write_array('B', self.sip.b)
write_array('AP', self.sip.ap)
write_array('BP', self.sip.bp)
return keywords
def _denormalize_sky(self, sky):
if self.wcs.lngtyp != 'RA':
raise ValueError(
"WCS does not have longitude type of 'RA', therefore " +
"(ra, dec) data can not be used as input")
if self.wcs.lattyp != 'DEC':
raise ValueError(
"WCS does not have longitude type of 'DEC', therefore " +
"(ra, dec) data can not be used as input")
if self.wcs.naxis == 2:
if self.wcs.lng == 0 and self.wcs.lat == 1:
return sky
elif self.wcs.lng == 1 and self.wcs.lat == 0:
# Reverse the order of the columns
return sky[:, ::-1]
else:
raise ValueError(
"WCS does not have longitude and latitude celestial " +
"axes, therefore (ra, dec) data can not be used as input")
else:
if self.wcs.lng < 0 or self.wcs.lat < 0:
raise ValueError(
"WCS does not have both longitude and latitude "
"celestial axes, therefore (ra, dec) data can not be " +
"used as input")
out = np.zeros((sky.shape[0], self.wcs.naxis))
out[:, self.wcs.lng] = sky[:, 0]
out[:, self.wcs.lat] = sky[:, 1]
return out
def _normalize_sky(self, sky):
if self.wcs.lngtyp != 'RA':
raise ValueError(
"WCS does not have longitude type of 'RA', therefore " +
"(ra, dec) data can not be returned")
if self.wcs.lattyp != 'DEC':
raise ValueError(
"WCS does not have longitude type of 'DEC', therefore " +
"(ra, dec) data can not be returned")
if self.wcs.naxis == 2:
if self.wcs.lng == 0 and self.wcs.lat == 1:
return sky
elif self.wcs.lng == 1 and self.wcs.lat == 0:
# Reverse the order of the columns
return sky[:, ::-1]
else:
raise ValueError(
"WCS does not have longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be returned")
else:
if self.wcs.lng < 0 or self.wcs.lat < 0:
raise ValueError(
"WCS does not have both longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be returned")
out = np.empty((sky.shape[0], 2))
out[:, 0] = sky[:, self.wcs.lng]
out[:, 1] = sky[:, self.wcs.lat]
return out
def _array_converter(self, func, sky, *args, ra_dec_order=False):
"""
A helper function to support reading either a pair of arrays
or a single Nx2 array.
"""
def _return_list_of_arrays(axes, origin):
if any([x.size == 0 for x in axes]):
return axes
try:
axes = np.broadcast_arrays(*axes)
except ValueError:
raise ValueError(
"Coordinate arrays are not broadcastable to each other")
xy = np.hstack([x.reshape((x.size, 1)) for x in axes])
if ra_dec_order and sky == 'input':
xy = self._denormalize_sky(xy)
output = func(xy, origin)
if ra_dec_order and sky == 'output':
output = self._normalize_sky(output)
return (output[:, 0].reshape(axes[0].shape),
output[:, 1].reshape(axes[0].shape))
return [output[:, i].reshape(axes[0].shape)
for i in range(output.shape[1])]
def _return_single_array(xy, origin):
if xy.shape[-1] != self.naxis:
raise ValueError(
"When providing two arguments, the array must be "
"of shape (N, {})".format(self.naxis))
if 0 in xy.shape:
return xy
if ra_dec_order and sky == 'input':
xy = self._denormalize_sky(xy)
result = func(xy, origin)
if ra_dec_order and sky == 'output':
result = self._normalize_sky(result)
return result
if len(args) == 2:
try:
xy, origin = args
xy = np.asarray(xy)
origin = int(origin)
except Exception:
raise TypeError(
"When providing two arguments, they must be "
"(coords[N][{}], origin)".format(self.naxis))
if xy.shape == () or len(xy.shape) == 1:
return _return_list_of_arrays([xy], origin)
return _return_single_array(xy, origin)
elif len(args) == self.naxis + 1:
axes = args[:-1]
origin = args[-1]
try:
axes = [np.asarray(x) for x in axes]
origin = int(origin)
except Exception:
raise TypeError(
"When providing more than two arguments, they must be " +
"a 1-D array for each axis, followed by an origin.")
return _return_list_of_arrays(axes, origin)
raise TypeError(
"WCS projection has {0} dimensions, so expected 2 (an Nx{0} array "
"and the origin argument) or {1} arguments (the position in each "
"dimension, and the origin argument). Instead, {2} arguments were "
"given.".format(
self.naxis, self.naxis + 1, len(args)))
def all_pix2world(self, *args, **kwargs):
return self._array_converter(
self._all_pix2world, 'output', *args, **kwargs)
all_pix2world.__doc__ = """
Transforms pixel coordinates to world coordinates.
Performs all of the following in series:
- Detector to image plane correction (if present in the
FITS file)
- `SIP`_ distortion correction (if present in the FITS
file)
- `distortion paper`_ table-lookup correction (if present
in the FITS file)
- `wcslib`_ "core" WCS transformation
Parameters
----------
{}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{}
Returns
-------
{}
Notes
-----
The order of the axes for the result is determined by the
``CTYPEia`` keywords in the FITS header, therefore it may not
always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('naxis', 8),
docstrings.RA_DEC_ORDER(8),
docstrings.RETURNS('sky coordinates, in degrees', 8))
def wcs_pix2world(self, *args, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda xy, o: self.wcs.p2s(xy, o)['world'],
'output', *args, **kwargs)
wcs_pix2world.__doc__ = """
Transforms pixel coordinates to world coordinates by doing
only the basic `wcslib`_ transformation.
No `SIP`_ or `distortion paper`_ table lookup correction is
applied. To perform distortion correction, see
`~astropy.wcs.WCS.all_pix2world`,
`~astropy.wcs.WCS.sip_pix2foc`, `~astropy.wcs.WCS.p4_pix2foc`,
or `~astropy.wcs.WCS.pix2foc`.
Parameters
----------
{}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{}
Returns
-------
{}
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
Notes
-----
The order of the axes for the result is determined by the
``CTYPEia`` keywords in the FITS header, therefore it may not
always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
""".format(docstrings.TWO_OR_MORE_ARGS('naxis', 8),
docstrings.RA_DEC_ORDER(8),
docstrings.RETURNS('world coordinates, in degrees', 8))
def _all_world2pix(self, world, origin, tolerance, maxiter, adaptive,
detect_divergence, quiet):
# ############################################################
# # DESCRIPTION OF THE NUMERICAL METHOD ##
# ############################################################
# In this section I will outline the method of solving
# the inverse problem of converting world coordinates to
# pixel coordinates (*inverse* of the direct transformation
# `all_pix2world`) and I will summarize some of the aspects
# of the method proposed here and some of the issues of the
# original `all_world2pix` (in relation to this method)
# discussed in https://github.com/astropy/astropy/issues/1977
# A more detailed discussion can be found here:
# https://github.com/astropy/astropy/pull/2373
#
#
# ### Background ###
#
#
# I will refer here to the [SIP Paper]
# (http://fits.gsfc.nasa.gov/registry/sip/SIP_distortion_v1_0.pdf).
# According to this paper, the effect of distortions as
# described in *their* equation (1) is:
#
# (1) x = CD*(u+f(u)),
#
# where `x` is a *vector* of "intermediate spherical
# coordinates" (equivalent to (x,y) in the paper) and `u`
# is a *vector* of "pixel coordinates", and `f` is a vector
# function describing geometrical distortions
# (see equations 2 and 3 in SIP Paper.
# However, I prefer to use `w` for "intermediate world
# coordinates", `x` for pixel coordinates, and assume that
# transformation `W` performs the **linear**
# (CD matrix + projection onto celestial sphere) part of the
# conversion from pixel coordinates to world coordinates.
# Then we can re-write (1) as:
#
# (2) w = W*(x+f(x)) = T(x)
#
# In `astropy.wcs.WCS` transformation `W` is represented by
# the `wcs_pix2world` member, while the combined ("total")
# transformation (linear part + distortions) is performed by
# `all_pix2world`. Below I summarize the notations and their
# equivalents in `astropy.wcs.WCS`:
#
# | Equation term | astropy.WCS/meaning |
# | ------------- | ---------------------------- |
# | `x` | pixel coordinates |
# | `w` | world coordinates |
# | `W` | `wcs_pix2world()` |
# | `W^{-1}` | `wcs_world2pix()` |
# | `T` | `all_pix2world()` |
# | `x+f(x)` | `pix2foc()` |
#
#
# ### Direct Solving of Equation (2) ###
#
#
# In order to find the pixel coordinates that correspond to
# given world coordinates `w`, it is necessary to invert
# equation (2): `x=T^{-1}(w)`, or solve equation `w==T(x)`
# for `x`. However, this approach has the following
# disadvantages:
# 1. It requires unnecessary transformations (see next
# section).
# 2. It is prone to "RA wrapping" issues as described in
# https://github.com/astropy/astropy/issues/1977
# (essentially because `all_pix2world` may return points with
# a different phase than user's input `w`).
#
#
# ### Description of the Method Used here ###
#
#
# By applying inverse linear WCS transformation (`W^{-1}`)
# to both sides of equation (2) and introducing notation `x'`
# (prime) for the pixels coordinates obtained from the world
# coordinates by applying inverse *linear* WCS transformation
# ("focal plane coordinates"):
#
# (3) x' = W^{-1}(w)
#
# we obtain the following equation:
#
# (4) x' = x+f(x),
#
# or,
#
# (5) x = x'-f(x)
#
# This equation is well suited for solving using the method
# of fixed-point iterations
# (http://en.wikipedia.org/wiki/Fixed-point_iteration):
#
# (6) x_{i+1} = x'-f(x_i)
#
# As an initial value of the pixel coordinate `x_0` we take
# "focal plane coordinate" `x'=W^{-1}(w)=wcs_world2pix(w)`.
# We stop iterations when `|x_{i+1}-x_i|<tolerance`. We also
# consider the process to be diverging if
# `|x_{i+1}-x_i|>|x_i-x_{i-1}|`
# **when** `|x_{i+1}-x_i|>=tolerance` (when current
# approximation is close to the true solution,
# `|x_{i+1}-x_i|>|x_i-x_{i-1}|` may be due to rounding errors
# and we ignore such "divergences" when
# `|x_{i+1}-x_i|<tolerance`). It may appear that checking for
# `|x_{i+1}-x_i|<tolerance` in order to ignore divergence is
# unnecessary since the iterative process should stop anyway,
# however, the proposed implementation of this iterative
# process is completely vectorized and, therefore, we may
# continue iterating over *some* points even though they have
# converged to within a specified tolerance (while iterating
# over other points that have not yet converged to
# a solution).
#
# In order to efficiently implement iterative process (6)
# using available methods in `astropy.wcs.WCS`, we add and
# subtract `x_i` from the right side of equation (6):
#
# (7) x_{i+1} = x'-(x_i+f(x_i))+x_i = x'-pix2foc(x_i)+x_i,
#
# where `x'=wcs_world2pix(w)` and it is computed only *once*
# before the beginning of the iterative process (and we also
# set `x_0=x'`). By using `pix2foc` at each iteration instead
# of `all_pix2world` we get about 25% increase in performance
# (by not performing the linear `W` transformation at each
# step) and we also avoid the "RA wrapping" issue described
# above (by working in focal plane coordinates and avoiding
# pix->world transformations).
#
# As an added benefit, the process converges to the correct
# solution in just one iteration when distortions are not
# present (compare to
# https://github.com/astropy/astropy/issues/1977 and
# https://github.com/astropy/astropy/pull/2294): in this case
# `pix2foc` is the identical transformation
# `x_i=pix2foc(x_i)` and from equation (7) we get:
#
# x' = x_0 = wcs_world2pix(w)
# x_1 = x' - pix2foc(x_0) + x_0 = x' - pix2foc(x') + x' = x'
# = wcs_world2pix(w) = x_0
# =>
# |x_1-x_0| = 0 < tolerance (with tolerance > 0)
#
# However, for performance reasons, it is still better to
# avoid iterations altogether and return the exact linear
# solution (`wcs_world2pix`) right-away when non-linear
# distortions are not present by checking that attributes
# `sip`, `cpdis1`, `cpdis2`, `det2im1`, and `det2im2` are
# *all* `None`.
#
#
# ### Outline of the Algorithm ###
#
#
# While the proposed code is relatively long (considering
# the simplicity of the algorithm), this is due to: 1)
# checking if iterative solution is necessary at all; 2)
# checking for divergence; 3) re-implementation of the
# completely vectorized algorithm as an "adaptive" vectorized
# algorithm (for cases when some points diverge for which we
# want to stop iterations). In my tests, the adaptive version
# of the algorithm is about 50% slower than non-adaptive
# version for all HST images.
#
# The essential part of the vectorized non-adaptive algorithm
# (without divergence and other checks) can be described
# as follows:
#
# pix0 = self.wcs_world2pix(world, origin)
# pix = pix0.copy() # 0-order solution
#
# for k in range(maxiter):
# # find correction to the previous solution:
# dpix = self.pix2foc(pix, origin) - pix0
#
# # compute norm (L2) of the correction:
# dn = np.linalg.norm(dpix, axis=1)
#
# # apply correction:
# pix -= dpix
#
# # check convergence:
# if np.max(dn) < tolerance:
# break
#
# return pix
#
# Here, the input parameter `world` can be a `MxN` array
# where `M` is the number of coordinate axes in WCS and `N`
# is the number of points to be converted simultaneously to
# image coordinates.
#
#
# ### IMPORTANT NOTE: ###
#
# If, in the future releases of the `~astropy.wcs`,
# `pix2foc` will not apply all the required distortion
# corrections then in the code below, calls to `pix2foc` will
# have to be replaced with
# wcs_world2pix(all_pix2world(pix_list, origin), origin)
#
# ############################################################
# # INITIALIZE ITERATIVE PROCESS: ##
# ############################################################
# initial approximation (linear WCS based only)
pix0 = self.wcs_world2pix(world, origin)
# Check that an iterative solution is required at all
# (when any of the non-CD-matrix-based corrections are
# present). If not required return the initial
# approximation (pix0).
if not self.has_distortion:
# No non-WCS corrections detected so
# simply return initial approximation:
return pix0
pix = pix0.copy() # 0-order solution
# initial correction:
dpix = self.pix2foc(pix, origin) - pix0
# Update initial solution:
pix -= dpix
# Norm (L2) squared of the correction:
dn = np.sum(dpix*dpix, axis=1)
dnprev = dn.copy() # if adaptive else dn
tol2 = tolerance**2
# Prepare for iterative process
k = 1
ind = None
inddiv = None
# Turn off numpy runtime warnings for 'invalid' and 'over':
old_invalid = np.geterr()['invalid']
old_over = np.geterr()['over']
np.seterr(invalid='ignore', over='ignore')
# ############################################################
# # NON-ADAPTIVE ITERATIONS: ##
# ############################################################
if not adaptive:
# Fixed-point iterations:
while (np.nanmax(dn) >= tol2 and k < maxiter):
# Find correction to the previous solution:
dpix = self.pix2foc(pix, origin) - pix0
# Compute norm (L2) squared of the correction:
dn = np.sum(dpix*dpix, axis=1)
# Check for divergence (we do this in two stages
# to optimize performance for the most common
# scenario when successive approximations converge):
if detect_divergence:
divergent = (dn >= dnprev)
if np.any(divergent):
# Find solutions that have not yet converged:
slowconv = (dn >= tol2)
inddiv, = np.where(divergent & slowconv)
if inddiv.shape[0] > 0:
# Update indices of elements that
# still need correction:
conv = (dn < dnprev)
iconv = np.where(conv)
# Apply correction:
dpixgood = dpix[iconv]
pix[iconv] -= dpixgood
dpix[iconv] = dpixgood
# For the next iteration choose
# non-divergent points that have not yet
# converged to the requested accuracy:
ind, = np.where(slowconv & conv)
pix0 = pix0[ind]
dnprev[ind] = dn[ind]
k += 1
# Switch to adaptive iterations:
adaptive = True
break
# Save current correction magnitudes for later:
dnprev = dn
# Apply correction:
pix -= dpix
k += 1
# ############################################################
# # ADAPTIVE ITERATIONS: ##
# ############################################################
if adaptive:
if ind is None:
ind, = np.where(np.isfinite(pix).all(axis=1))
pix0 = pix0[ind]
# "Adaptive" fixed-point iterations:
while (ind.shape[0] > 0 and k < maxiter):
# Find correction to the previous solution:
dpixnew = self.pix2foc(pix[ind], origin) - pix0
# Compute norm (L2) of the correction:
dnnew = np.sum(np.square(dpixnew), axis=1)
# Bookkeeping of corrections:
dnprev[ind] = dn[ind].copy()
dn[ind] = dnnew
if detect_divergence:
# Find indices of pixels that are converging:
conv = (dnnew < dnprev[ind])
iconv = np.where(conv)
iiconv = ind[iconv]
# Apply correction:
dpixgood = dpixnew[iconv]
pix[iiconv] -= dpixgood
dpix[iiconv] = dpixgood
# Find indices of solutions that have not yet
# converged to the requested accuracy
# AND that do not diverge:
subind, = np.where((dnnew >= tol2) & conv)
else:
# Apply correction:
pix[ind] -= dpixnew
dpix[ind] = dpixnew
# Find indices of solutions that have not yet
# converged to the requested accuracy:
subind, = np.where(dnnew >= tol2)
# Choose solutions that need more iterations:
ind = ind[subind]
pix0 = pix0[subind]
k += 1
# ############################################################
# # FINAL DETECTION OF INVALID, DIVERGING, ##
# # AND FAILED-TO-CONVERGE POINTS ##
# ############################################################
# Identify diverging and/or invalid points:
invalid = ((~np.all(np.isfinite(pix), axis=1)) &
(np.all(np.isfinite(world), axis=1)))
# When detect_divergence==False, dnprev is outdated
# (it is the norm of the very first correction).
# Still better than nothing...
inddiv, = np.where(((dn >= tol2) & (dn >= dnprev)) | invalid)
if inddiv.shape[0] == 0:
inddiv = None
# Identify points that did not converge within 'maxiter'
# iterations:
if k >= maxiter:
ind, = np.where((dn >= tol2) & (dn < dnprev) & (~invalid))
if ind.shape[0] == 0:
ind = None
else:
ind = None
# Restore previous numpy error settings:
np.seterr(invalid=old_invalid, over=old_over)
# ############################################################
# # RAISE EXCEPTION IF DIVERGING OR TOO SLOWLY CONVERGING ##
# # DATA POINTS HAVE BEEN DETECTED: ##
# ############################################################
if (ind is not None or inddiv is not None) and not quiet:
if inddiv is None:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
"converge to the requested accuracy after {:d} "
"iterations.".format(k), best_solution=pix,
accuracy=np.abs(dpix), niter=k,
slow_conv=ind, divergent=None)
else:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
"converge to the requested accuracy.\n"
"After {:d} iterations, the solution is diverging "
"at least for one input point."
.format(k), best_solution=pix,
accuracy=np.abs(dpix), niter=k,
slow_conv=ind, divergent=inddiv)
return pix
@deprecated_renamed_argument('accuracy', 'tolerance', '4.3')
def all_world2pix(self, *args, tolerance=1e-4, maxiter=20, adaptive=False,
detect_divergence=True, quiet=False, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda *args, **kwargs:
self._all_world2pix(
*args, tolerance=tolerance, maxiter=maxiter,
adaptive=adaptive, detect_divergence=detect_divergence,
quiet=quiet),
'input', *args, **kwargs
)
all_world2pix.__doc__ = """
all_world2pix(*arg, tolerance=1.0e-4, maxiter=20,
adaptive=False, detect_divergence=True, quiet=False)
Transforms world coordinates to pixel coordinates, using
numerical iteration to invert the full forward transformation
`~astropy.wcs.WCS.all_pix2world` with complete
distortion model.
Parameters
----------
{0}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{1}
tolerance : float, optional (default = 1.0e-4)
Tolerance of solution. Iteration terminates when the
iterative solver estimates that the "true solution" is
within this many pixels current estimate, more
specifically, when the correction to the solution found
during the previous iteration is smaller
(in the sense of the L2 norm) than ``tolerance``.
maxiter : int, optional (default = 20)
Maximum number of iterations allowed to reach a solution.
quiet : bool, optional (default = False)
Do not throw :py:class:`NoConvergence` exceptions when
the method does not converge to a solution with the
required accuracy within a specified number of maximum
iterations set by ``maxiter`` parameter. Instead,
simply return the found solution.
Other Parameters
----------------
adaptive : bool, optional (default = False)
Specifies whether to adaptively select only points that
did not converge to a solution within the required
accuracy for the next iteration. Default is recommended
for HST as well as most other instruments.
.. note::
The :py:meth:`all_world2pix` uses a vectorized
implementation of the method of consecutive
approximations (see ``Notes`` section below) in which it
iterates over *all* input points *regardless* until
the required accuracy has been reached for *all* input
points. In some cases it may be possible that
*almost all* points have reached the required accuracy
but there are only a few of input data points for
which additional iterations may be needed (this
depends mostly on the characteristics of the geometric
distortions for a given instrument). In this situation
it may be advantageous to set ``adaptive`` = `True` in
which case :py:meth:`all_world2pix` will continue
iterating *only* over the points that have not yet
converged to the required accuracy. However, for the
HST's ACS/WFC detector, which has the strongest
distortions of all HST instruments, testing has
shown that enabling this option would lead to a about
50-100% penalty in computational time (depending on
specifics of the image, geometric distortions, and
number of input points to be converted). Therefore,
for HST and possibly instruments, it is recommended
to set ``adaptive`` = `False`. The only danger in
getting this setting wrong will be a performance
penalty.
.. note::
When ``detect_divergence`` is `True`,
:py:meth:`all_world2pix` will automatically switch
to the adaptive algorithm once divergence has been
detected.
detect_divergence : bool, optional (default = True)
Specifies whether to perform a more detailed analysis
of the convergence to a solution. Normally
:py:meth:`all_world2pix` may not achieve the required
accuracy if either the ``tolerance`` or ``maxiter`` arguments
are too low. However, it may happen that for some
geometric distortions the conditions of convergence for
the the method of consecutive approximations used by
:py:meth:`all_world2pix` may not be satisfied, in which
case consecutive approximations to the solution will
diverge regardless of the ``tolerance`` or ``maxiter``
settings.
When ``detect_divergence`` is `False`, these divergent
points will be detected as not having achieved the
required accuracy (without further details). In addition,
if ``adaptive`` is `False` then the algorithm will not
know that the solution (for specific points) is diverging
and will continue iterating and trying to "improve"
diverging solutions. This may result in ``NaN`` or
``Inf`` values in the return results (in addition to a
performance penalties). Even when ``detect_divergence``
is `False`, :py:meth:`all_world2pix`, at the end of the
iterative process, will identify invalid results
(``NaN`` or ``Inf``) as "diverging" solutions and will
raise :py:class:`NoConvergence` unless the ``quiet``
parameter is set to `True`.
When ``detect_divergence`` is `True`,
:py:meth:`all_world2pix` will detect points for which
current correction to the coordinates is larger than
the correction applied during the previous iteration
**if** the requested accuracy **has not yet been
achieved**. In this case, if ``adaptive`` is `True`,
these points will be excluded from further iterations and
if ``adaptive`` is `False`, :py:meth:`all_world2pix` will
automatically switch to the adaptive algorithm. Thus, the
reported divergent solution will be the latest converging
solution computed immediately *before* divergence
has been detected.
.. note::
When accuracy has been achieved, small increases in
current corrections may be possible due to rounding
errors (when ``adaptive`` is `False`) and such
increases will be ignored.
.. note::
Based on our testing using HST ACS/WFC images, setting
``detect_divergence`` to `True` will incur about 5-20%
performance penalty with the larger penalty
corresponding to ``adaptive`` set to `True`.
Because the benefits of enabling this
feature outweigh the small performance penalty,
especially when ``adaptive`` = `False`, it is
recommended to set ``detect_divergence`` to `True`,
unless extensive testing of the distortion models for
images from specific instruments show a good stability
of the numerical method for a wide range of
coordinates (even outside the image itself).
.. note::
Indices of the diverging inverse solutions will be
reported in the ``divergent`` attribute of the
raised :py:class:`NoConvergence` exception object.
Returns
-------
{2}
Notes
-----
The order of the axes for the input world array is determined by
the ``CTYPEia`` keywords in the FITS header, therefore it may
not always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp`, and
`~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Using the method of fixed-point iterations approximations we
iterate starting with the initial approximation, which is
computed using the non-distortion-aware
:py:meth:`wcs_world2pix` (or equivalent).
The :py:meth:`all_world2pix` function uses a vectorized
implementation of the method of consecutive approximations and
therefore it is highly efficient (>30x) when *all* data points
that need to be converted from sky coordinates to image
coordinates are passed at *once*. Therefore, it is advisable,
whenever possible, to pass as input a long array of all points
that need to be converted to :py:meth:`all_world2pix` instead
of calling :py:meth:`all_world2pix` for each data point. Also
see the note to the ``adaptive`` parameter.
Raises
------
NoConvergence
The method did not converge to a
solution to the required accuracy within a specified
number of maximum iterations set by the ``maxiter``
parameter. To turn off this exception, set ``quiet`` to
`True`. Indices of the points for which the requested
accuracy was not achieved (if any) will be listed in the
``slow_conv`` attribute of the
raised :py:class:`NoConvergence` exception object.
See :py:class:`NoConvergence` documentation for
more details.
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
Examples
--------
>>> import astropy.io.fits as fits
>>> import astropy.wcs as wcs
>>> import numpy as np
>>> import os
>>> filename = os.path.join(wcs.__path__[0], 'tests/data/j94f05bgq_flt.fits')
>>> hdulist = fits.open(filename)
>>> w = wcs.WCS(hdulist[('sci',1)].header, hdulist)
>>> hdulist.close()
>>> ra, dec = w.all_pix2world([1,2,3], [1,1,1], 1)
>>> print(ra) # doctest: +FLOAT_CMP
[ 5.52645627 5.52649663 5.52653698]
>>> print(dec) # doctest: +FLOAT_CMP
[-72.05171757 -72.05171276 -72.05170795]
>>> radec = w.all_pix2world([[1,1], [2,1], [3,1]], 1)
>>> print(radec) # doctest: +FLOAT_CMP
[[ 5.52645627 -72.05171757]
[ 5.52649663 -72.05171276]
[ 5.52653698 -72.05170795]]
>>> x, y = w.all_world2pix(ra, dec, 1)
>>> print(x) # doctest: +FLOAT_CMP
[ 1.00000238 2.00000237 3.00000236]
>>> print(y) # doctest: +FLOAT_CMP
[ 0.99999996 0.99999997 0.99999997]
>>> xy = w.all_world2pix(radec, 1)
>>> print(xy) # doctest: +FLOAT_CMP
[[ 1.00000238 0.99999996]
[ 2.00000237 0.99999997]
[ 3.00000236 0.99999997]]
>>> xy = w.all_world2pix(radec, 1, maxiter=3,
... tolerance=1.0e-10, quiet=False)
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 3 iterations, the solution is
diverging at least for one input point.
>>> # Now try to use some diverging data:
>>> divradec = w.all_pix2world([[1.0, 1.0],
... [10000.0, 50000.0],
... [3.0, 1.0]], 1)
>>> print(divradec) # doctest: +FLOAT_CMP
[[ 5.52645627 -72.05171757]
[ 7.15976932 -70.8140779 ]
[ 5.52653698 -72.05170795]]
>>> # First, turn detect_divergence on:
>>> try: # doctest: +FLOAT_CMP
... xy = w.all_world2pix(divradec, 1, maxiter=20,
... tolerance=1.0e-4, adaptive=False,
... detect_divergence=True,
... quiet=False)
... except wcs.wcs.NoConvergence as e:
... print("Indices of diverging points: {{0}}"
... .format(e.divergent))
... print("Indices of poorly converging points: {{0}}"
... .format(e.slow_conv))
... print("Best solution:\\n{{0}}".format(e.best_solution))
... print("Achieved accuracy:\\n{{0}}".format(e.accuracy))
Indices of diverging points: [1]
Indices of poorly converging points: None
Best solution:
[[ 1.00000238e+00 9.99999965e-01]
[ -1.99441636e+06 1.44309097e+06]
[ 3.00000236e+00 9.99999966e-01]]
Achieved accuracy:
[[ 6.13968380e-05 8.59638593e-07]
[ 8.59526812e+11 6.61713548e+11]
[ 6.09398446e-05 8.38759724e-07]]
>>> raise e
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 5 iterations, the solution is
diverging at least for one input point.
>>> # This time turn detect_divergence off:
>>> try: # doctest: +FLOAT_CMP
... xy = w.all_world2pix(divradec, 1, maxiter=20,
... tolerance=1.0e-4, adaptive=False,
... detect_divergence=False,
... quiet=False)
... except wcs.wcs.NoConvergence as e:
... print("Indices of diverging points: {{0}}"
... .format(e.divergent))
... print("Indices of poorly converging points: {{0}}"
... .format(e.slow_conv))
... print("Best solution:\\n{{0}}".format(e.best_solution))
... print("Achieved accuracy:\\n{{0}}".format(e.accuracy))
Indices of diverging points: [1]
Indices of poorly converging points: None
Best solution:
[[ 1.00000009 1. ]
[ nan nan]
[ 3.00000009 1. ]]
Achieved accuracy:
[[ 2.29417358e-06 3.21222995e-08]
[ nan nan]
[ 2.27407877e-06 3.13005639e-08]]
>>> raise e
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 6 iterations, the solution is
diverging at least for one input point.
""".format(docstrings.TWO_OR_MORE_ARGS('naxis', 8),
docstrings.RA_DEC_ORDER(8),
docstrings.RETURNS('pixel coordinates', 8))
def wcs_world2pix(self, *args, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda xy, o: self.wcs.s2p(xy, o)['pixcrd'],
'input', *args, **kwargs)
wcs_world2pix.__doc__ = """
Transforms world coordinates to pixel coordinates, using only
the basic `wcslib`_ WCS transformation. No `SIP`_ or
`distortion paper`_ table lookup transformation is applied.
Parameters
----------
{}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{}
Returns
-------
{}
Notes
-----
The order of the axes for the input world array is determined by
the ``CTYPEia`` keywords in the FITS header, therefore it may
not always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('naxis', 8),
docstrings.RA_DEC_ORDER(8),
docstrings.RETURNS('pixel coordinates', 8))
def pix2foc(self, *args):
return self._array_converter(self._pix2foc, None, *args)
pix2foc.__doc__ = """
Convert pixel coordinates to focal plane coordinates using the
`SIP`_ polynomial distortion convention and `distortion
paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{}
Returns
-------
{}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('focal coordinates', 8))
def p4_pix2foc(self, *args):
return self._array_converter(self._p4_pix2foc, None, *args)
p4_pix2foc.__doc__ = """
Convert pixel coordinates to focal plane coordinates using
`distortion paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{}
Returns
-------
{}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('focal coordinates', 8))
def det2im(self, *args):
return self._array_converter(self._det2im, None, *args)
det2im.__doc__ = """
Convert detector coordinates to image plane coordinates using
`distortion paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{}
Returns
-------
{}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('pixel coordinates', 8))
def sip_pix2foc(self, *args):
if self.sip is None:
if len(args) == 2:
return args[0]
elif len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.pix2foc, None, *args)
sip_pix2foc.__doc__ = """
Convert pixel coordinates to focal plane coordinates using the
`SIP`_ polynomial distortion convention.
The output is in pixel coordinates, relative to ``CRPIX``.
FITS WCS `distortion paper`_ table lookup correction is not
applied, even if that information existed in the FITS file
that initialized this :class:`~astropy.wcs.WCS` object. To
correct for that, use `~astropy.wcs.WCS.pix2foc` or
`~astropy.wcs.WCS.p4_pix2foc`.
Parameters
----------
{}
Returns
-------
{}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('focal coordinates', 8))
def sip_foc2pix(self, *args):
if self.sip is None:
if len(args) == 2:
return args[0]
elif len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.foc2pix, None, *args)
sip_foc2pix.__doc__ = """
Convert focal plane coordinates to pixel coordinates using the
`SIP`_ polynomial distortion convention.
FITS WCS `distortion paper`_ table lookup distortion
correction is not applied, even if that information existed in
the FITS file that initialized this `~astropy.wcs.WCS` object.
Parameters
----------
{}
Returns
-------
{}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('pixel coordinates', 8))
def proj_plane_pixel_scales(self):
"""
Calculate pixel scales along each axis of the image pixel at
the ``CRPIX`` location once it is projected onto the
"plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This method is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
This method only returns sensible answers if the WCS contains
celestial axes, i.e., the `~astropy.wcs.WCS.celestial` WCS object.
Returns
-------
scale : list of `~astropy.units.Quantity`
A vector of projection plane increments corresponding to each
pixel side (axis).
See Also
--------
astropy.wcs.utils.proj_plane_pixel_scales
""" # noqa: E501
from astropy.wcs.utils import proj_plane_pixel_scales # Avoid circular import
values = proj_plane_pixel_scales(self)
units = [u.Unit(x) for x in self.wcs.cunit]
return [value * unit for (value, unit) in zip(values, units)] # Can have different units
def proj_plane_pixel_area(self):
"""
For a **celestial** WCS (see `astropy.wcs.WCS.celestial`), returns pixel
area of the image pixel at the ``CRPIX`` location once it is projected
onto the "plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
This method only returns sensible answers if the WCS contains
celestial axes, i.e., the `~astropy.wcs.WCS.celestial` WCS object.
Returns
-------
area : `~astropy.units.Quantity`
Area (in the projection plane) of the pixel at ``CRPIX`` location.
Raises
------
ValueError
Pixel area is defined only for 2D pixels. Most likely the
`~astropy.wcs.Wcsprm.cd` matrix of the `~astropy.wcs.WCS.celestial`
WCS is not a square matrix of second order.
Notes
-----
Depending on the application, square root of the pixel area can be used to
represent a single pixel scale of an equivalent square pixel
whose area is equal to the area of a generally non-square pixel.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_area
""" # noqa: E501
from astropy.wcs.utils import proj_plane_pixel_area # Avoid circular import
value = proj_plane_pixel_area(self)
unit = u.Unit(self.wcs.cunit[0]) * u.Unit(self.wcs.cunit[1]) # 2D only
return value * unit
def to_fits(self, relax=False, key=None):
"""
Generate an `~astropy.io.fits.HDUList` object with all of the
information stored in this object. This should be logically identical
to the input FITS file, but it will be normalized in a number of ways.
See `to_header` for some warnings about the output produced.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`astropy:relaxwrite` for details.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
header = self.to_header(relax=relax, key=key)
hdu = fits.PrimaryHDU(header=header)
hdulist = fits.HDUList(hdu)
self._write_det2im(hdulist)
self._write_distortion_kw(hdulist)
return hdulist
def to_header(self, relax=None, key=None):
"""Generate an `astropy.io.fits.Header` object with the basic WCS
and SIP information stored in this object. This should be
logically identical to the input FITS file, but it will be
normalized in a number of ways.
.. warning::
This function does not write out FITS WCS `distortion
paper`_ information, since that requires multiple FITS
header data units. To get a full representation of
everything in this object, use `to_fits`.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`astropy:relaxwrite` for details.
If the ``relax`` keyword argument is not given and any
keywords were omitted from the output, an
`~astropy.utils.exceptions.AstropyWarning` is displayed.
To override this, explicitly pass a value to ``relax``.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
header : `astropy.io.fits.Header`
Notes
-----
The output header will almost certainly differ from the input in a
number of respects:
1. The output header only contains WCS-related keywords. In
particular, it does not contain syntactically-required
keywords such as ``SIMPLE``, ``NAXIS``, ``BITPIX``, or
``END``.
2. Deprecated (e.g. ``CROTAn``) or non-standard usage will
be translated to standard (this is partially dependent on
whether ``fix`` was applied).
3. Quantities will be converted to the units used internally,
basically SI with the addition of degrees.
4. Floating-point quantities may be given to a different decimal
precision.
5. Elements of the ``PCi_j`` matrix will be written if and
only if they differ from the unit matrix. Thus, if the
matrix is unity then no elements will be written.
6. Additional keywords such as ``WCSAXES``, ``CUNITia``,
``LONPOLEa`` and ``LATPOLEa`` may appear.
7. The original keycomments will be lost, although
`to_header` tries hard to write meaningful comments.
8. Keyword order may be changed.
"""
# default precision for numerical WCS keywords
precision = WCSHDO_P14 # Defined by C-ext # noqa: F821
display_warning = False
if relax is None:
display_warning = True
relax = False
if relax not in (True, False):
do_sip = relax & WCSHDO_SIP
relax &= ~WCSHDO_SIP
else:
do_sip = relax
relax = WCSHDO_all if relax is True else WCSHDO_safe # Defined by C-ext # noqa: F821
relax = precision | relax
if self.wcs is not None:
if key is not None:
orig_key = self.wcs.alt
self.wcs.alt = key
header_string = self.wcs.to_header(relax)
header = fits.Header.fromstring(header_string)
keys_to_remove = ["", " ", "COMMENT"]
for kw in keys_to_remove:
if kw in header:
del header[kw]
# Check if we can handle TPD distortion correctly
if int(_parsed_version[0]) * 10 + int(_parsed_version[1]) < 71:
for kw, val in header.items():
if kw[:5] in ('CPDIS', 'CQDIS') and val == 'TPD':
warnings.warn(
f"WCS contains a TPD distortion model in {kw}. WCSLIB "
f"{_wcs.__version__} is writing this in a format incompatible with "
f"current versions - please update to 7.4 or use the bundled WCSLIB.",
AstropyWarning)
elif int(_parsed_version[0]) * 10 + int(_parsed_version[1]) < 74:
for kw, val in header.items():
if kw[:5] in ('CPDIS', 'CQDIS') and val == 'TPD':
warnings.warn(
f"WCS contains a TPD distortion model in {kw}, which requires WCSLIB "
f"7.4 or later to store in a FITS header (having {_wcs.__version__}).",
AstropyWarning)
else:
header = fits.Header()
if do_sip and self.sip is not None:
if self.wcs is not None and any(not ctyp.endswith('-SIP') for ctyp in self.wcs.ctype):
self._fix_ctype(header, add_sip=True)
for kw, val in self._write_sip_kw().items():
header[kw] = val
if not do_sip and self.wcs is not None and any(self.wcs.ctype) and self.sip is not None:
# This is called when relax is not False or WCSHDO_SIP
# The default case of ``relax=None`` is handled further in the code.
header = self._fix_ctype(header, add_sip=False)
if display_warning:
full_header = self.to_header(relax=True, key=key)
missing_keys = []
for kw, val in full_header.items():
if kw not in header:
missing_keys.append(kw)
if len(missing_keys):
warnings.warn(
"Some non-standard WCS keywords were excluded: {} "
"Use the ``relax`` kwarg to control this.".format(
', '.join(missing_keys)),
AstropyWarning)
# called when ``relax=None``
# This is different from the case of ``relax=False``.
if any(self.wcs.ctype) and self.sip is not None:
header = self._fix_ctype(header, add_sip=False, log_message=False)
# Finally reset the key. This must be called after ``_fix_ctype``.
if key is not None:
self.wcs.alt = orig_key
return header
def _fix_ctype(self, header, add_sip=True, log_message=True):
"""
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header.
add_sip : bool
Flag indicating whether "-SIP" should be added or removed from CTYPE keywords.
Remove "-SIP" from CTYPE when writing out a header with relax=False.
This needs to be done outside ``to_header`` because ``to_header`` runs
twice when ``relax=False`` and the second time ``relax`` is set to ``True``
to display the missing keywords.
If the user requested SIP distortion to be written out add "-SIP" to
CTYPE if it is missing.
"""
_add_sip_to_ctype = """
Inconsistent SIP distortion information is present in the current WCS:
SIP coefficients were detected, but CTYPE is missing "-SIP" suffix,
therefore the current WCS is internally inconsistent.
Because relax has been set to True, the resulting output WCS will have
"-SIP" appended to CTYPE in order to make the header internally consistent.
However, this may produce incorrect astrometry in the output WCS, if
in fact the current WCS is already distortion-corrected.
Therefore, if current WCS is already distortion-corrected (eg, drizzled)
then SIP distortion components should not apply. In that case, for a WCS
that is already distortion-corrected, please remove the SIP coefficients
from the header.
"""
if log_message:
if add_sip:
log.info(_add_sip_to_ctype)
for i in range(1, self.naxis+1):
# strip() must be called here to cover the case of alt key= " "
kw = f'CTYPE{i}{self.wcs.alt}'.strip()
if kw in header:
if add_sip:
val = header[kw].strip("-SIP") + "-SIP"
else:
val = header[kw].strip("-SIP")
header[kw] = val
else:
continue
return header
def to_header_string(self, relax=None):
"""
Identical to `to_header`, but returns a string containing the
header cards.
"""
return str(self.to_header(relax))
def footprint_to_file(self, filename='footprint.reg', color='green',
width=2, coordsys=None):
"""
Writes out a `ds9`_ style regions file. It can be loaded
directly by `ds9`_.
Parameters
----------
filename : str, optional
Output file name - default is ``'footprint.reg'``
color : str, optional
Color to use when plotting the line.
width : int, optional
Width of the region line.
coordsys : str, optional
Coordinate system. If not specified (default), the ``radesys``
value is used. For all possible values, see
http://ds9.si.edu/doc/ref/region.html#RegionFileFormat
"""
comments = ('# Region file format: DS9 version 4.0 \n'
'# global color=green font="helvetica 12 bold '
'select=1 highlite=1 edit=1 move=1 delete=1 '
'include=1 fixed=0 source\n')
coordsys = coordsys or self.wcs.radesys
if coordsys not in ('PHYSICAL', 'IMAGE', 'FK4', 'B1950', 'FK5',
'J2000', 'GALACTIC', 'ECLIPTIC', 'ICRS', 'LINEAR',
'AMPLIFIER', 'DETECTOR'):
raise ValueError("Coordinate system '{}' is not supported. A valid"
" one can be given with the 'coordsys' argument."
.format(coordsys))
with open(filename, mode='w') as f:
f.write(comments)
f.write(f'{coordsys}\n')
f.write('polygon(')
ftpr = self.calc_footprint()
if ftpr is not None:
ftpr.tofile(f, sep=',')
f.write(f') # color={color}, width={width:d} \n')
def _get_naxis(self, header=None):
_naxis = []
if (header is not None and
not isinstance(header, (str, bytes))):
for naxis in itertools.count(1):
try:
_naxis.append(header[f'NAXIS{naxis}'])
except KeyError:
break
if len(_naxis) == 0:
_naxis = [0, 0]
elif len(_naxis) == 1:
_naxis.append(0)
self._naxis = _naxis
def printwcs(self):
print(repr(self))
def __repr__(self):
'''
Return a short description. Simply porting the behavior from
the `printwcs()` method.
'''
description = ["WCS Keywords\n",
f"Number of WCS axes: {self.naxis!r}"]
sfmt = ' : ' + "".join(["{"+f"{i}"+"!r} " for i in range(self.naxis)])
keywords = ['CTYPE', 'CRVAL', 'CRPIX']
values = [self.wcs.ctype, self.wcs.crval, self.wcs.crpix]
for keyword, value in zip(keywords, values):
description.append(keyword+sfmt.format(*value))
if hasattr(self.wcs, 'pc'):
for i in range(self.naxis):
s = ''
for j in range(self.naxis):
s += ''.join(['PC', str(i+1), '_', str(j+1), ' '])
s += sfmt
description.append(s.format(*self.wcs.pc[i]))
s = 'CDELT' + sfmt
description.append(s.format(*self.wcs.cdelt))
elif hasattr(self.wcs, 'cd'):
for i in range(self.naxis):
s = ''
for j in range(self.naxis):
s += "".join(['CD', str(i+1), '_', str(j+1), ' '])
s += sfmt
description.append(s.format(*self.wcs.cd[i]))
description.append(f"NAXIS : {' '.join(map(str, self._naxis))}")
return '\n'.join(description)
def get_axis_types(self):
"""
Similar to `self.wcsprm.axis_types <astropy.wcs.Wcsprm.axis_types>`
but provides the information in a more Python-friendly format.
Returns
-------
result : list of dict
Returns a list of dictionaries, one for each axis, each
containing attributes about the type of that axis.
Each dictionary has the following keys:
- 'coordinate_type':
- None: Non-specific coordinate type.
- 'stokes': Stokes coordinate.
- 'celestial': Celestial coordinate (including ``CUBEFACE``).
- 'spectral': Spectral coordinate.
- 'scale':
- 'linear': Linear axis.
- 'quantized': Quantized axis (``STOKES``, ``CUBEFACE``).
- 'non-linear celestial': Non-linear celestial axis.
- 'non-linear spectral': Non-linear spectral axis.
- 'logarithmic': Logarithmic axis.
- 'tabular': Tabular axis.
- 'group'
- Group number, e.g. lookup table number
- 'number'
- For celestial axes:
- 0: Longitude coordinate.
- 1: Latitude coordinate.
- 2: ``CUBEFACE`` number.
- For lookup tables:
- the axis number in a multidimensional table.
``CTYPEia`` in ``"4-3"`` form with unrecognized algorithm code will
generate an error.
"""
if self.wcs is None:
raise AttributeError(
"This WCS object does not have a wcsprm object.")
coordinate_type_map = {
0: None,
1: 'stokes',
2: 'celestial',
3: 'spectral'}
scale_map = {
0: 'linear',
1: 'quantized',
2: 'non-linear celestial',
3: 'non-linear spectral',
4: 'logarithmic',
5: 'tabular'}
result = []
for axis_type in self.wcs.axis_types:
subresult = {}
coordinate_type = (axis_type // 1000) % 10
subresult['coordinate_type'] = coordinate_type_map[coordinate_type]
scale = (axis_type // 100) % 10
subresult['scale'] = scale_map[scale]
group = (axis_type // 10) % 10
subresult['group'] = group
number = axis_type % 10
subresult['number'] = number
result.append(subresult)
return result
def __reduce__(self):
"""
Support pickling of WCS objects. This is done by serializing
to an in-memory FITS file and dumping that as a string.
"""
hdulist = self.to_fits(relax=True)
buffer = io.BytesIO()
hdulist.writeto(buffer)
dct = self.__dict__.copy()
dct['_alt_wcskey'] = self.wcs.alt
return (__WCS_unpickle__,
(self.__class__, dct, buffer.getvalue(),))
def dropaxis(self, dropax):
"""
Remove an axis from the WCS.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS with naxis to be chopped to naxis-1
dropax : int
The index of the WCS to drop, counting from 0 (i.e., python convention,
not FITS convention)
Returns
-------
`~astropy.wcs.WCS`
A new `~astropy.wcs.WCS` instance with one axis fewer
"""
inds = list(range(self.wcs.naxis))
inds.pop(dropax)
# axis 0 has special meaning to sub
# if wcs.wcs.ctype == ['RA','DEC','VLSR'], you want
# wcs.sub([1,2]) to get 'RA','DEC' back
return self.sub([i+1 for i in inds])
def swapaxes(self, ax0, ax1):
"""
Swap axes in a WCS.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to have its axes swapped
ax0 : int
ax1 : int
The indices of the WCS to be swapped, counting from 0 (i.e., python
convention, not FITS convention)
Returns
-------
`~astropy.wcs.WCS`
A new `~astropy.wcs.WCS` instance with the same number of axes,
but two swapped
"""
inds = list(range(self.wcs.naxis))
inds[ax0], inds[ax1] = inds[ax1], inds[ax0]
return self.sub([i+1 for i in inds])
def reorient_celestial_first(self):
"""
Reorient the WCS such that the celestial axes are first, followed by
the spectral axis, followed by any others.
Assumes at least celestial axes are present.
"""
return self.sub([WCSSUB_CELESTIAL, WCSSUB_SPECTRAL, WCSSUB_STOKES]) # Defined by C-ext # noqa: F821 E501
def slice(self, view, numpy_order=True):
"""
Slice a WCS instance using a Numpy slice. The order of the slice should
be reversed (as for the data) compared to the natural WCS order.
Parameters
----------
view : tuple
A tuple containing the same number of slices as the WCS system.
The ``step`` method, the third argument to a slice, is not
presently supported.
numpy_order : bool
Use numpy order, i.e. slice the WCS so that an identical slice
applied to a numpy array will slice the array and WCS in the same
way. If set to `False`, the WCS will be sliced in FITS order,
meaning the first slice will be applied to the *last* numpy index
but the *first* WCS axis.
Returns
-------
wcs_new : `~astropy.wcs.WCS`
A new resampled WCS axis
"""
if hasattr(view, '__len__') and len(view) > self.wcs.naxis:
raise ValueError("Must have # of slices <= # of WCS axes")
elif not hasattr(view, '__len__'): # view MUST be an iterable
view = [view]
if not all(isinstance(x, slice) for x in view):
# We need to drop some dimensions, but this may not always be
# possible with .sub due to correlated axes, so instead we use the
# generalized slicing infrastructure from astropy.wcs.wcsapi.
return SlicedFITSWCS(self, view)
# NOTE: we could in principle use SlicedFITSWCS as above for all slicing,
# but in the simple case where there are no axes dropped, we can just
# create a full WCS object with updated WCS parameters which is faster
# for this specific case and also backward-compatible.
wcs_new = self.deepcopy()
if wcs_new.sip is not None:
sip_crpix = wcs_new.sip.crpix.tolist()
for i, iview in enumerate(view):
if iview.step is not None and iview.step < 0:
raise NotImplementedError("Reversing an axis is not "
"implemented.")
if numpy_order:
wcs_index = self.wcs.naxis - 1 - i
else:
wcs_index = i
if iview.step is not None and iview.start is None:
# Slice from "None" is equivalent to slice from 0 (but one
# might want to downsample, so allow slices with
# None,None,step or None,stop,step)
iview = slice(0, iview.stop, iview.step)
if iview.start is not None:
if iview.step not in (None, 1):
crpix = self.wcs.crpix[wcs_index]
cdelt = self.wcs.cdelt[wcs_index]
# equivalently (keep this comment so you can compare eqns):
# wcs_new.wcs.crpix[wcs_index] =
# (crpix - iview.start)*iview.step + 0.5 - iview.step/2.
crp = ((crpix - iview.start - 1.)/iview.step
+ 0.5 + 1./iview.step/2.)
wcs_new.wcs.crpix[wcs_index] = crp
if wcs_new.sip is not None:
sip_crpix[wcs_index] = crp
wcs_new.wcs.cdelt[wcs_index] = cdelt * iview.step
else:
wcs_new.wcs.crpix[wcs_index] -= iview.start
if wcs_new.sip is not None:
sip_crpix[wcs_index] -= iview.start
try:
# range requires integers but the other attributes can also
# handle arbitrary values, so this needs to be in a try/except.
nitems = len(builtins.range(self._naxis[wcs_index])[iview])
except TypeError as exc:
if 'indices must be integers' not in str(exc):
raise
warnings.warn("NAXIS{} attribute is not updated because at "
"least one index ('{}') is no integer."
"".format(wcs_index, iview), AstropyUserWarning)
else:
wcs_new._naxis[wcs_index] = nitems
if wcs_new.sip is not None:
wcs_new.sip = Sip(self.sip.a, self.sip.b, self.sip.ap, self.sip.bp,
sip_crpix)
return wcs_new
def __getitem__(self, item):
# "getitem" is a shortcut for self.slice; it is very limited
# there is no obvious and unambiguous interpretation of wcs[1,2,3]
# We COULD allow wcs[1] to link to wcs.sub([2])
# (wcs[i] -> wcs.sub([i+1])
return self.slice(item)
def __iter__(self):
# Having __getitem__ makes Python think WCS is iterable. However,
# Python first checks whether __iter__ is present, so we can raise an
# exception here.
raise TypeError(f"'{self.__class__.__name__}' object is not iterable")
@property
def axis_type_names(self):
"""
World names for each coordinate axis
Returns
-------
list of str
A list of names along each axis.
"""
names = list(self.wcs.cname)
types = self.wcs.ctype
for i in range(len(names)):
if len(names[i]) > 0:
continue
names[i] = types[i].split('-')[0]
return names
@property
def celestial(self):
"""
A copy of the current WCS with only the celestial axes included
"""
return self.sub([WCSSUB_CELESTIAL]) # Defined by C-ext # noqa: F821
@property
def is_celestial(self):
return self.has_celestial and self.naxis == 2
@property
def has_celestial(self):
try:
return self.wcs.lng >= 0 and self.wcs.lat >= 0
except InconsistentAxisTypesError:
return False
@property
def spectral(self):
"""
A copy of the current WCS with only the spectral axes included
"""
return self.sub([WCSSUB_SPECTRAL]) # Defined by C-ext # noqa: F821
@property
def is_spectral(self):
return self.has_spectral and self.naxis == 1
@property
def has_spectral(self):
try:
return self.wcs.spec >= 0
except InconsistentAxisTypesError:
return False
@property
def has_distortion(self):
"""
Returns `True` if any distortion terms are present.
"""
return (self.sip is not None or
self.cpdis1 is not None or self.cpdis2 is not None or
self.det2im1 is not None and self.det2im2 is not None)
@property
def pixel_scale_matrix(self):
try:
cdelt = np.diag(self.wcs.get_cdelt())
pc = self.wcs.get_pc()
except InconsistentAxisTypesError:
try:
# for non-celestial axes, get_cdelt doesn't work
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', 'cdelt will be ignored since cd is present', RuntimeWarning)
cdelt = np.dot(self.wcs.cd, np.diag(self.wcs.cdelt))
except AttributeError:
cdelt = np.diag(self.wcs.cdelt)
try:
pc = self.wcs.pc
except AttributeError:
pc = 1
pccd = np.dot(cdelt, pc)
return pccd
def footprint_contains(self, coord, **kwargs):
"""
Determines if a given SkyCoord is contained in the wcs footprint.
Parameters
----------
coord : `~astropy.coordinates.SkyCoord`
The coordinate to check if it is within the wcs coordinate.
**kwargs :
Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel`
Returns
-------
response : bool
True means the WCS footprint contains the coordinate, False means it does not.
"""
return coord.contained_by(self, **kwargs)
def __WCS_unpickle__(cls, dct, fits_data):
"""
Unpickles a WCS object from a serialized FITS string.
"""
self = cls.__new__(cls)
buffer = io.BytesIO(fits_data)
hdulist = fits.open(buffer)
naxis = dct.pop('naxis', None)
if naxis:
hdulist[0].header['naxis'] = naxis
naxes = dct.pop('_naxis', [])
for k, na in enumerate(naxes):
hdulist[0].header[f'naxis{k + 1:d}'] = na
kwargs = dct.pop('_init_kwargs', {})
self.__dict__.update(dct)
wcskey = dct.pop('_alt_wcskey', ' ')
WCS.__init__(self, hdulist[0].header, hdulist, key=wcskey, **kwargs)
self.pixel_bounds = dct.get('_pixel_bounds', None)
return self
def find_all_wcs(header, relax=True, keysel=None, fix=True,
translate_units='',
_do_set=True):
"""
Find all the WCS transformations in the given header.
Parameters
----------
header : str or `~astropy.io.fits.Header` object.
relax : bool or int, optional
Degree of permissiveness:
- `True` (default): Admit all recognized informal extensions of the
WCS standard.
- `False`: Recognize only FITS keywords defined by the
published WCS standard.
- `int`: a bit field selecting specific extensions to accept.
See :ref:`astropy:relaxread` for details.
keysel : sequence of str, optional
A list of flags used to select the keyword types considered by
wcslib. When ``None``, only the standard image header
keywords are considered (and the underlying wcspih() C
function is called). To use binary table image array or pixel
list keywords, *keysel* must be set.
Each element in the list should be one of the following strings:
- 'image': Image header keywords
- 'binary': Binary table image array keywords
- 'pixel': Pixel list keywords
Keywords such as ``EQUIna`` or ``RFRQna`` that are common to
binary table image arrays and pixel lists (including
``WCSNna`` and ``TWCSna``) are selected by both 'binary' and
'pixel'.
fix : bool, optional
When `True` (default), call `~astropy.wcs.Wcsprm.fix` on
the resulting objects to fix any non-standard uses in the
header. `FITSFixedWarning` warnings will be emitted if any
changes were made.
translate_units : str, optional
Specify which potentially unsafe translations of non-standard
unit strings to perform. By default, performs none. See
`WCS.fix` for more information about this parameter. Only
effective when ``fix`` is `True`.
Returns
-------
wcses : list of `WCS`
"""
if isinstance(header, (str, bytes)):
header_string = header
elif isinstance(header, fits.Header):
header_string = header.tostring()
else:
raise TypeError(
"header must be a string or astropy.io.fits.Header object")
keysel_flags = _parse_keysel(keysel)
if isinstance(header_string, str):
header_bytes = header_string.encode('ascii')
else:
header_bytes = header_string
wcsprms = _wcs.find_all_wcs(header_bytes, relax, keysel_flags)
result = []
for wcsprm in wcsprms:
subresult = WCS(fix=False, _do_set=False)
subresult.wcs = wcsprm
result.append(subresult)
if fix:
subresult.fix(translate_units)
if _do_set:
subresult.wcs.set()
return result
def validate(source):
"""
Prints a WCS validation report for the given FITS file.
Parameters
----------
source : str or file-like or `~astropy.io.fits.HDUList`
The FITS file to validate.
Returns
-------
results : list subclass instance
The result is returned as nested lists. The first level
corresponds to the HDUs in the given file. The next level has
an entry for each WCS found in that header. The special
subclass of list will pretty-print the results as a table when
printed.
"""
class _WcsValidateWcsResult(list):
def __init__(self, key):
self._key = key
def __repr__(self):
result = [f" WCS key '{self._key or ' '}':"]
if len(self):
for entry in self:
for i, line in enumerate(entry.splitlines()):
if i == 0:
initial_indent = ' - '
else:
initial_indent = ' '
result.extend(
textwrap.wrap(
line,
initial_indent=initial_indent,
subsequent_indent=' '))
else:
result.append(" No issues.")
return '\n'.join(result)
class _WcsValidateHduResult(list):
def __init__(self, hdu_index, hdu_name):
self._hdu_index = hdu_index
self._hdu_name = hdu_name
list.__init__(self)
def __repr__(self):
if len(self):
if self._hdu_name:
hdu_name = f' ({self._hdu_name})'
else:
hdu_name = ''
result = [f'HDU {self._hdu_index}{hdu_name}:']
for wcs in self:
result.append(repr(wcs))
return '\n'.join(result)
return ''
class _WcsValidateResults(list):
def __repr__(self):
result = []
for hdu in self:
content = repr(hdu)
if len(content):
result.append(content)
return '\n\n'.join(result)
global __warningregistry__
if isinstance(source, fits.HDUList):
hdulist = source
else:
hdulist = fits.open(source)
results = _WcsValidateResults()
for i, hdu in enumerate(hdulist):
hdu_results = _WcsValidateHduResult(i, hdu.name)
results.append(hdu_results)
with warnings.catch_warnings(record=True) as warning_lines:
wcses = find_all_wcs(
hdu.header, relax=_wcs.WCSHDR_reject,
fix=False, _do_set=False)
for wcs in wcses:
wcs_results = _WcsValidateWcsResult(wcs.wcs.alt)
hdu_results.append(wcs_results)
try:
del __warningregistry__
except NameError:
pass
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
warnings.simplefilter(
"always", FITSFixedWarning, append=True)
try:
WCS(hdu.header,
key=wcs.wcs.alt or ' ',
relax=_wcs.WCSHDR_reject,
fix=True, _do_set=False)
except WcsError as e:
wcs_results.append(str(e))
wcs_results.extend([str(x.message) for x in warning_lines])
return results
| 37.984624
| 153
| 0.548875
|
b89cf2a3e9ab463fea34b6311814c6078b5f833d
| 12,998
|
py
|
Python
|
graphql/core/execution/executor.py
|
defrex/graphql-core
|
9c8450fa5b2e3dd0a8ea015ac3b9b5f24028ac91
|
[
"MIT"
] | null | null | null |
graphql/core/execution/executor.py
|
defrex/graphql-core
|
9c8450fa5b2e3dd0a8ea015ac3b9b5f24028ac91
|
[
"MIT"
] | null | null | null |
graphql/core/execution/executor.py
|
defrex/graphql-core
|
9c8450fa5b2e3dd0a8ea015ac3b9b5f24028ac91
|
[
"MIT"
] | 1
|
2019-08-23T12:52:58.000Z
|
2019-08-23T12:52:58.000Z
|
import collections
import functools
import logging
from ..error import GraphQLError
from ..language import ast
from ..language.parser import parse
from ..language.source import Source
from ..pyutils.default_ordered_dict import DefaultOrderedDict
from ..pyutils.defer import Deferred, DeferredDict, DeferredList, defer, succeed
from ..type import GraphQLEnumType, GraphQLInterfaceType, GraphQLList, GraphQLNonNull, GraphQLObjectType, \
GraphQLScalarType, GraphQLUnionType
from ..validation import validate
from .base import ExecutionContext, ExecutionResult, ResolveInfo, Undefined, collect_fields, default_resolve_fn, \
get_field_def, get_operation_root_type
logger = logging.getLogger(__name__)
class Executor(object):
def __init__(self, execution_middlewares=None, default_resolver=default_resolve_fn, map_type=dict):
assert issubclass(map_type, collections.MutableMapping)
self._execution_middlewares = execution_middlewares or []
self._default_resolve_fn = default_resolver
self._map_type = map_type
self._enforce_strict_ordering = issubclass(map_type, collections.OrderedDict)
@property
def enforce_strict_ordering(self):
return self._enforce_strict_ordering
@property
def map_type(self):
return self._map_type
def execute(self, schema, request='', root=None, args=None, operation_name=None, request_context=None,
execute_serially=False, validate_ast=True):
curried_execution_function = functools.partial(
self._execute,
schema,
request,
root,
args,
operation_name,
request_context,
execute_serially,
validate_ast
)
for middleware in self._execution_middlewares:
if hasattr(middleware, 'execution_result'):
curried_execution_function = functools.partial(middleware.execution_result, curried_execution_function)
return curried_execution_function()
def _execute(self, schema, request, root, args, operation_name, request_context, execute_serially, validate_ast):
if not isinstance(request, ast.Document):
if not isinstance(request, Source):
request = Source(request, 'GraphQL request')
request = parse(request)
if validate_ast:
validation_errors = validate(schema, request)
if validation_errors:
return succeed(ExecutionResult(
errors=validation_errors,
invalid=True,
))
return self._execute_graphql_query(
schema,
root or object(),
request,
operation_name,
args or {},
request_context or {},
execute_serially)
def _execute_graphql_query(self, schema, root, ast, operation_name, args, request_context, execute_serially=False):
ctx = ExecutionContext(schema, root, ast, operation_name, args, request_context)
return defer(self._execute_operation, ctx, root, ctx.operation, execute_serially) \
.add_errback(
lambda error: ctx.errors.append(error)
) \
.add_callback(
lambda data: ExecutionResult(data, ctx.errors),
)
def _execute_operation(self, ctx, root, operation, execute_serially):
type = get_operation_root_type(ctx.schema, operation)
if operation.operation == 'mutation' or execute_serially:
execute_serially = True
fields = DefaultOrderedDict(list) \
if (execute_serially or self._enforce_strict_ordering) \
else collections.defaultdict(list)
fields = collect_fields(ctx, type, operation.selection_set, fields, set())
if execute_serially:
return self._execute_fields_serially(ctx, type, root, fields)
return self._execute_fields(ctx, type, root, fields)
def _execute_fields_serially(self, execution_context, parent_type, source_value, fields):
def execute_field_callback(results, response_name):
field_asts = fields[response_name]
result = self._resolve_field(execution_context, parent_type, source_value, field_asts)
if result is Undefined:
return results
def collect_result(resolved_result):
results[response_name] = resolved_result
return results
if isinstance(result, Deferred):
return succeed(result).add_callback(collect_result)
else:
return collect_result(result)
def execute_field(prev_deferred, response_name):
return prev_deferred.add_callback(execute_field_callback, response_name)
return functools.reduce(execute_field, fields.keys(), succeed(self._map_type()))
def _execute_fields(self, execution_context, parent_type, source_value, fields):
contains_deferred = False
results = self._map_type()
for response_name, field_asts in fields.items():
result = self._resolve_field(execution_context, parent_type, source_value, field_asts)
if result is Undefined:
continue
results[response_name] = result
if isinstance(result, Deferred):
contains_deferred = True
if not contains_deferred:
return results
return DeferredDict(results)
def _resolve_field(self, execution_context, parent_type, source, field_asts):
field_ast = field_asts[0]
field_name = field_ast.name.value
field_def = get_field_def(execution_context.schema, parent_type, field_name)
if not field_def:
return Undefined
return_type = field_def.type
resolve_fn = field_def.resolver or self._default_resolve_fn
# Build a dict of arguments from the field.arguments AST, using the variables scope to
# fulfill any variable references.
args = execution_context.get_argument_values(field_def, field_ast)
# The resolve function's optional third argument is a collection of
# information about the current execution state.
info = ResolveInfo(
field_name,
field_asts,
return_type,
parent_type,
execution_context
)
result = self.resolve_or_error(resolve_fn, source, args, info)
return self.complete_value_catching_error(
execution_context, return_type, field_asts, info, result
)
def complete_value_catching_error(self, ctx, return_type, field_asts, info, result):
# If the field type is non-nullable, then it is resolved without any
# protection from errors.
if isinstance(return_type, GraphQLNonNull):
return self.complete_value(ctx, return_type, field_asts, info, result)
# Otherwise, error protection is applied, logging the error and
# resolving a null value for this field if one is encountered.
try:
completed = self.complete_value(ctx, return_type, field_asts, info, result)
if isinstance(completed, Deferred):
def handle_error(error):
ctx.errors.append(error)
return None
return completed.add_errback(handle_error)
return completed
except Exception as e:
logger.exception('GraphQL exception Occured')
ctx.errors.append(e)
return None
def complete_value(self, ctx, return_type, field_asts, info, result):
"""
Implements the instructions for completeValue as defined in the
"Field entries" section of the spec.
If the field type is Non-Null, then this recursively completes the value for the inner type. It throws a field
error if that completion returns null, as per the "Nullability" section of the spec.
If the field type is a List, then this recursively completes the value for the inner type on each item in the
list.
If the field type is a Scalar or Enum, ensures the completed value is a legal value of the type by calling the
`serialize` method of GraphQL type definition.
Otherwise, the field type expects a sub-selection set, and will complete the value by evaluating all
sub-selections.
"""
# If field type is NonNull, complete for inner type, and throw field error if result is null.
if isinstance(result, Deferred):
return result.add_callbacks(
lambda resolved: self.complete_value(
ctx,
return_type,
field_asts,
info,
resolved
),
lambda error: GraphQLError(error.value and str(error.value), field_asts, error)
)
if isinstance(result, Exception):
raise GraphQLError(str(result), field_asts, result)
if isinstance(return_type, GraphQLNonNull):
completed = self.complete_value(
ctx, return_type.of_type, field_asts, info, result
)
if completed is None:
raise GraphQLError(
'Cannot return null for non-nullable field {}.{}.'.format(info.parent_type, info.field_name),
field_asts
)
return completed
# If result is null-like, return null.
if result is None:
return None
# If field type is List, complete each item in the list with the inner type
if isinstance(return_type, GraphQLList):
assert isinstance(result, collections.Iterable), \
'User Error: expected iterable, but did not find one.'
item_type = return_type.of_type
completed_results = []
contains_deferred = False
for item in result:
completed_item = self.complete_value_catching_error(ctx, item_type, field_asts, info, item)
if not contains_deferred and isinstance(completed_item, Deferred):
contains_deferred = True
completed_results.append(completed_item)
return DeferredList(completed_results) if contains_deferred else completed_results
# If field type is Scalar or Enum, serialize to a valid value, returning null if coercion is not possible.
if isinstance(return_type, (GraphQLScalarType, GraphQLEnumType)):
serialized_result = return_type.serialize(result)
if serialized_result is None:
return None
return serialized_result
runtime_type = None
# Field type must be Object, Interface or Union and expect sub-selections.
if isinstance(return_type, GraphQLObjectType):
runtime_type = return_type
elif isinstance(return_type, (GraphQLInterfaceType, GraphQLUnionType)):
runtime_type = return_type.resolve_type(result, info)
if runtime_type and not return_type.is_possible_type(runtime_type):
raise GraphQLError(
u'Runtime Object type "{}" is not a possible type for "{}".'.format(runtime_type, return_type),
field_asts
)
if not runtime_type:
return None
if runtime_type.is_type_of and not runtime_type.is_type_of(result, info):
raise GraphQLError(
u'Expected value of type "{}" but got {}.'.format(return_type, type(result).__name__),
field_asts
)
# Collect sub-fields to execute to complete this value.
subfield_asts = DefaultOrderedDict(list) if self._enforce_strict_ordering else collections.defaultdict(list)
visited_fragment_names = set()
for field_ast in field_asts:
selection_set = field_ast.selection_set
if selection_set:
subfield_asts = collect_fields(
ctx, runtime_type, selection_set,
subfield_asts, visited_fragment_names
)
return self._execute_fields(ctx, runtime_type, result, subfield_asts)
def resolve_or_error(self, resolve_fn, source, args, info):
curried_resolve_fn = functools.partial(resolve_fn, source, args, info)
try:
for middleware in self._execution_middlewares:
if hasattr(middleware, 'run_resolve_fn'):
curried_resolve_fn = functools.partial(middleware.run_resolve_fn, curried_resolve_fn, resolve_fn)
return curried_resolve_fn()
except Exception as e:
logger.exception("An error occurred while resolving field %s.%s"
% (info.parent_type.name, info.field_name))
return e
| 39.628049
| 119
| 0.645484
|
76ef27ba301050e82c5a70805ab4382f0c1558a2
| 9,792
|
py
|
Python
|
torch/distributed/optim/optimizer.py
|
xiaohanhuang/pytorch
|
a31aea8eaa99a5ff72b5d002c206cd68d5467a5e
|
[
"Intel"
] | 183
|
2018-04-06T21:10:36.000Z
|
2022-03-30T15:05:24.000Z
|
torch/distributed/optim/optimizer.py
|
xiaohanhuang/pytorch
|
a31aea8eaa99a5ff72b5d002c206cd68d5467a5e
|
[
"Intel"
] | 818
|
2020-02-07T02:36:44.000Z
|
2022-03-31T23:49:44.000Z
|
torch/distributed/optim/optimizer.py
|
xiaohanhuang/pytorch
|
a31aea8eaa99a5ff72b5d002c206cd68d5467a5e
|
[
"Intel"
] | 58
|
2018-06-05T16:40:18.000Z
|
2022-03-16T15:37:29.000Z
|
from typing import List, Optional
import logging
import torch
import torch.distributed.rpc as rpc
import torch.jit as jit
import torch.nn as nn
from torch import Tensor
from torch.distributed.rpc import RRef
from torch.distributed.optim import functional_optim_map
import torch.distributed.autograd as dist_autograd
from collections import defaultdict
from threading import Lock
logger = logging.getLogger(__name__)
# XXX: we define a _ScriptModuleOptimizer here to explicitly
# compile the FunctionalOptimizer class into TorchScript
# This is because ScriptClass instance still lives in
# python unless you explicitly compile it as an attribute
# in ScriptModule or pass it to a ScriptFunction
# _ScriptLocalOptimizerInterface serves as a common
# interface type for Optimizer ScriptModules.
#
# TODO (wanchaol): remove this once we added TorchScript
# class reference semantics
@jit.interface
class _ScriptLocalOptimizerInterface(object):
def step(self, autograd_ctx_id: int) -> None:
pass
class _ScriptLocalOptimizer(nn.Module):
# TorchScript does not support multithread concurrent compiling.
# request_callback might invoke concurrent compiling, so we
# serialize the compiling with a lock
compile_lock = Lock()
def __init__(self, optim_cls, local_params_rref, *args, **kwargs):
super().__init__()
self._local_params = [rref.local_value() for rref in local_params_rref]
self.optim = optim_cls(
self._local_params,
*args,
**kwargs)
@jit.export
def step(self, autograd_ctx_id: int):
all_local_grads = dist_autograd.get_gradients(autograd_ctx_id)
# apply functional optimizer step with a list of gradients
grads: List[Optional[Tensor]] = [
all_local_grads[p] if p in all_local_grads else None
for p in self._local_params
]
self.optim.step(grads)
# TODO (wanchaol): remove/merge this with ScriptLocalOptimizer once
# we have converted all to functional optimizer in distributed.optim
class _LocalOptimizer(object):
# Ideally we would only need to share a lock for instances of
# _LocalOptimizer that deal with the same parameters. We are
# making a simplifying assumption here that if there is more
# than one instance of _LocalOptimizer per worker, they will
# be optimizing the same parameters (e.g. each data parallel
# trainer will create its own instance of _LocalOptimizer but
# they will all optimize the same parameters on each worker)
global_lock = Lock()
def __init__(self, optim_cls, local_params_rref, *args, **kwargs):
self._local_params = [rref.local_value() for rref in local_params_rref]
self.optim = optim_cls(
self._local_params,
*args,
**kwargs)
def step(self, autograd_ctx_id):
all_local_grads = dist_autograd.get_gradients(autograd_ctx_id)
with _LocalOptimizer.global_lock:
for param, grad in all_local_grads.items():
param.grad = grad
self.optim.step()
def _new_local_optimizer(optim_cls, local_params_rref, *args, **kwargs):
return rpc.RRef(
_LocalOptimizer(optim_cls, local_params_rref, *args, **kwargs))
def _local_optimizer_step(local_optim_rref, autograd_ctx_id):
local_optim = local_optim_rref.local_value()
local_optim.step(autograd_ctx_id)
# new/step functions combined with _ScriptLocalOptimizer to provide GIL-free optimizer
def _new_script_local_optimizer(optim_cls, local_params_rref, *args, **kwargs):
optim = _ScriptLocalOptimizer(optim_cls, local_params_rref, *args, **kwargs)
with _ScriptLocalOptimizer.compile_lock:
script_optim = jit.script(optim)
return rpc.RRef(
script_optim, _ScriptLocalOptimizerInterface)
@jit.script
def _script_local_optimizer_step(
local_optim_rref: RRef[_ScriptLocalOptimizerInterface],
autograd_ctx_id: int
) -> None:
local_optim = local_optim_rref.local_value()
local_optim.step(autograd_ctx_id)
def _wait_for_all(rpc_futs):
# TODO: improve error propagation
exception = None
results = []
for fut in rpc_futs:
try:
results.append(fut.wait())
except Exception as e:
results.append(e)
exception = e
if exception is not None:
raise exception
return results
class DistributedOptimizer:
"""
DistributedOptimizer takes remote references to parameters scattered
across workers and applies the given optimizer locally for each parameter.
This class uses :meth:`~torch.distributed.autograd.get_gradients` in order
to retrieve the gradients for specific parameters.
Concurrent calls to
:meth:`~torch.distributed.optim.DistributedOptimizer.step`,
either from the same or different clients, will
be serialized on each worker -- as each worker's optimizer can only work
on one set of gradients at a time. However, there is no guarantee that
the full forward-backward-optimizer sequence will execute for one client
at a time. This means that the gradients being applied may not correspond
to the latest forward pass executed on a given worker. Also, there is no
guaranteed ordering across workers.
`DistributedOptimizer` creates the local optimizer with TorchScript enabled
by default, so that optimizer updates are not blocked by the Python Global
Interpreter Lock (GIL) in the case of multithreaded training (e.g. Distributed
Model Parallel). This feature is currently enabled for most optimizers. You
can also follow `the recipe`__ in PyTorch tutorials to enable TorchScript support
for your own custom optimizers.
Args:
optimizer_class (optim.Optimizer): the class of optimizer to
instantiate on each worker.
params_rref (list[RRef]): list of RRefs to local or remote parameters
to optimize.
args: arguments to pass to the optimizer constructor on each worker.
kwargs: arguments to pass to the optimizer constructor on each worker.
Example::
>>> import torch.distributed.autograd as dist_autograd
>>> import torch.distributed.rpc as rpc
>>> from torch import optim
>>> from torch.distributed.optim import DistributedOptimizer
>>>
>>> with dist_autograd.context() as context_id:
>>> # Forward pass.
>>> rref1 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 3))
>>> rref2 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 1))
>>> loss = rref1.to_here() + rref2.to_here()
>>>
>>> # Backward pass.
>>> dist_autograd.backward(context_id, [loss.sum()])
>>>
>>> # Optimizer.
>>> dist_optim = DistributedOptimizer(
>>> optim.SGD,
>>> [rref1, rref2],
>>> lr=0.05,
>>> )
>>> dist_optim.step(context_id)
__ https://github.com/pytorch/tutorials/pull/1465
"""
def __init__(self, optimizer_class, params_rref, *args, **kwargs):
torch._C._log_api_usage_once("torch.distributed.optim.DistributedOptimizer")
per_worker_params_rref = defaultdict(list)
for param in params_rref:
per_worker_params_rref[param.owner()].append(param)
if optimizer_class in functional_optim_map and jit._state._enabled:
optim_ctor = functional_optim_map.get(optimizer_class)
else:
optim_ctor = optimizer_class
self.is_functional_optim = (optim_ctor != optimizer_class)
if self.is_functional_optim:
optimizer_new_func = _new_script_local_optimizer
else:
logger.warn(
f"Creating the optimizer {optimizer_class} without TorchScript support, "
"this might result in slow computation time in multithreading environment"
"(i.e. Distributed Model Parallel training on CPU) due to the Python's "
"Global Interpreter Lock (GIL). Please file an issue if you need this "
"optimizer in TorchScript. "
)
optimizer_new_func = _new_local_optimizer
remote_optim_futs = []
for worker, param_rrefs in per_worker_params_rref.items():
remote_optim_rref_fut = rpc.rpc_async(
worker,
optimizer_new_func,
args=(optim_ctor, param_rrefs) + args,
kwargs=kwargs,
)
remote_optim_futs.append(remote_optim_rref_fut)
self.remote_optimizers = _wait_for_all(remote_optim_futs)
def step(self, context_id):
"""
Performs a single optimization step.
This will call :meth:`torch.optim.Optimizer.step` on each worker
containing parameters to be optimized, and will block until all workers
return. The provided ``context_id`` will be used to retrieve the
corresponding :class:`~torch.distributed.autograd.context` that
contains the gradients that should be applied to the parameters.
Args:
context_id: the autograd context id for which we should run the
optimizer step.
"""
dist_autograd._is_valid_context(context_id)
if self.is_functional_optim:
optimizer_step_func = _script_local_optimizer_step
else:
optimizer_step_func = _local_optimizer_step
rpc_futs = []
for optimizer in self.remote_optimizers:
rpc_futs.append(rpc.rpc_async(
optimizer.owner(),
optimizer_step_func,
args=(optimizer, context_id),
))
_wait_for_all(rpc_futs)
| 38.551181
| 90
| 0.683517
|
a5e89099c344887da5fe3df04ffbe54445589245
| 929
|
py
|
Python
|
tests/utils.py
|
broadinstitute/annmas
|
79da783acf41e5aaca4a14ef991c9ab0aac3c59a
|
[
"BSD-3-Clause"
] | 11
|
2021-10-04T16:25:20.000Z
|
2022-03-08T16:55:24.000Z
|
tests/utils.py
|
broadinstitute/longbow
|
5ff6e5f62bb4a5a8206ec0299e1467de33a96eda
|
[
"BSD-3-Clause"
] | 23
|
2021-04-22T07:02:42.000Z
|
2022-03-31T20:23:29.000Z
|
tests/utils.py
|
broadinstitute/annmas
|
79da783acf41e5aaca4a14ef991c9ab0aac3c59a
|
[
"BSD-3-Clause"
] | null | null | null |
import pysam
import filecmp
def assert_bam_files_equal(file1, file2, order_matters=False, compare_header=False):
"""Assert that the contents of the two given bam files are equivalent."""
if order_matters and compare_header:
assert filecmp.cmp(file1, file2)
return
with pysam.AlignmentFile(file1, "rb", check_sq=False, require_index=False) as bam1, \
pysam.AlignmentFile(file2, "rb", check_sq=False, require_index=False) as bam2:
if compare_header:
assert bam1.header == bam2.header
if order_matters:
for read1, read2 in zip(bam1, bam2):
assert read1 == read2
else:
f1_reads = set()
for r in bam1:
f1_reads.add(r)
nreads_2 = 0
for r in bam2:
assert r in f1_reads
nreads_2 += 1
assert nreads_2 == len(f1_reads)
| 25.805556
| 89
| 0.590958
|
710f5e01f57cc5967a68c0625abac0e20e4a88c4
| 27,778
|
py
|
Python
|
tests/functional/coercers/test_coercer_non_null_list_with_default_string_field.py
|
matt-koevort/tartiflette
|
5777866b133d846ce4f8aa03f735fa81832896cd
|
[
"MIT"
] | 530
|
2019-06-04T11:45:36.000Z
|
2022-03-31T09:29:56.000Z
|
tests/functional/coercers/test_coercer_non_null_list_with_default_string_field.py
|
matt-koevort/tartiflette
|
5777866b133d846ce4f8aa03f735fa81832896cd
|
[
"MIT"
] | 242
|
2019-06-04T11:53:08.000Z
|
2022-03-28T07:06:27.000Z
|
tests/functional/coercers/test_coercer_non_null_list_with_default_string_field.py
|
matt-koevort/tartiflette
|
5777866b133d846ce4f8aa03f735fa81832896cd
|
[
"MIT"
] | 36
|
2019-06-21T06:40:27.000Z
|
2021-11-04T13:11:16.000Z
|
import pytest
from tests.functional.coercers.common import resolve_list_field
@pytest.mark.asyncio
@pytest.mark.ttftt_engine(
name="coercion",
resolvers={"Query.nonNullListWithDefaultStringField": resolve_list_field},
)
@pytest.mark.parametrize(
"query,variables,expected",
[
(
"""query { nonNullListWithDefaultStringField }""",
None,
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[defaultstring-scalar-None]"
}
},
),
(
"""query { nonNullListWithDefaultStringField(param: null) }""",
None,
{
"data": None,
"errors": [
{
"message": "Argument < param > of non-null type < [String]! > must not be null.",
"path": ["nonNullListWithDefaultStringField"],
"locations": [{"line": 1, "column": 43}],
"extensions": {
"rule": "5.6.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Values-of-Correct-Type",
"tag": "values-of-correct-type",
},
}
],
},
),
(
"""query { nonNullListWithDefaultStringField(param: [null]) }""",
None,
{"data": {"nonNullListWithDefaultStringField": "SUCCESS-[None]"}},
),
(
"""query { nonNullListWithDefaultStringField(param: "paramDefaultValue") }""",
None,
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[paramdefaultvalue-scalar]"
}
},
),
(
"""query { nonNullListWithDefaultStringField(param: ["paramDefaultValue"]) }""",
None,
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[paramdefaultvalue-scalar]"
}
},
),
(
"""query { nonNullListWithDefaultStringField(param: ["paramDefaultValue", null]) }""",
None,
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[paramdefaultvalue-scalar-None]"
}
},
),
(
"""query ($param: [String]) { nonNullListWithDefaultStringField(param: $param) }""",
None,
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[defaultstring-scalar-None]"
}
},
),
(
"""query ($param: [String]) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": None},
{
"data": {"nonNullListWithDefaultStringField": None},
"errors": [
{
"message": "Argument < param > of non-null type < [String]! > must not be null.",
"path": ["nonNullListWithDefaultStringField"],
"locations": [{"line": 1, "column": 69}],
}
],
},
),
(
"""query ($param: [String]) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": [None]},
{"data": {"nonNullListWithDefaultStringField": "SUCCESS-[None]"}},
),
(
"""query ($param: [String]) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": "varValue"},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[varvalue-scalar]"
}
},
),
(
"""query ($param: [String]) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": ["varValue"]},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[varvalue-scalar]"
}
},
),
(
"""query ($param: [String]) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": ["varValue", None]},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[varvalue-scalar-None]"
}
},
),
(
"""query ($param: [String] = null) { nonNullListWithDefaultStringField(param: $param) }""",
None,
{
"data": {"nonNullListWithDefaultStringField": None},
"errors": [
{
"message": "Argument < param > of non-null type < [String]! > must not be null.",
"path": ["nonNullListWithDefaultStringField"],
"locations": [{"line": 1, "column": 76}],
}
],
},
),
(
"""query ($param: [String] = null) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": None},
{
"data": {"nonNullListWithDefaultStringField": None},
"errors": [
{
"message": "Argument < param > of non-null type < [String]! > must not be null.",
"path": ["nonNullListWithDefaultStringField"],
"locations": [{"line": 1, "column": 76}],
}
],
},
),
(
"""query ($param: [String] = null) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": [None]},
{"data": {"nonNullListWithDefaultStringField": "SUCCESS-[None]"}},
),
(
"""query ($param: [String] = null) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": "varValue"},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[varvalue-scalar]"
}
},
),
(
"""query ($param: [String] = null) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": ["varValue"]},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[varvalue-scalar]"
}
},
),
(
"""query ($param: [String] = null) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": ["varValue", None]},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[varvalue-scalar-None]"
}
},
),
(
"""query ($param: [String] = [null]) { nonNullListWithDefaultStringField(param: $param) }""",
None,
{"data": {"nonNullListWithDefaultStringField": "SUCCESS-[None]"}},
),
(
"""query ($param: [String] = [null]) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": None},
{
"data": {"nonNullListWithDefaultStringField": None},
"errors": [
{
"message": "Argument < param > of non-null type < [String]! > must not be null.",
"path": ["nonNullListWithDefaultStringField"],
"locations": [{"line": 1, "column": 78}],
}
],
},
),
(
"""query ($param: [String] = [null]) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": [None]},
{"data": {"nonNullListWithDefaultStringField": "SUCCESS-[None]"}},
),
(
"""query ($param: [String] = [null]) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": "varValue"},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[varvalue-scalar]"
}
},
),
(
"""query ($param: [String] = [null]) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": ["varValue"]},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[varvalue-scalar]"
}
},
),
(
"""query ($param: [String] = [null]) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": ["varValue", None]},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[varvalue-scalar-None]"
}
},
),
(
"""query ($param: [String] = "varDefault") { nonNullListWithDefaultStringField(param: $param) }""",
None,
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[vardefault-scalar]"
}
},
),
(
"""query ($param: [String] = "varDefault") { nonNullListWithDefaultStringField(param: $param) }""",
{"param": None},
{
"data": {"nonNullListWithDefaultStringField": None},
"errors": [
{
"message": "Argument < param > of non-null type < [String]! > must not be null.",
"path": ["nonNullListWithDefaultStringField"],
"locations": [{"line": 1, "column": 84}],
}
],
},
),
(
"""query ($param: [String] = "varDefault") { nonNullListWithDefaultStringField(param: $param) }""",
{"param": [None]},
{"data": {"nonNullListWithDefaultStringField": "SUCCESS-[None]"}},
),
(
"""query ($param: [String] = "varDefault") { nonNullListWithDefaultStringField(param: $param) }""",
{"param": "varValue"},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[varvalue-scalar]"
}
},
),
(
"""query ($param: [String] = "varDefault") { nonNullListWithDefaultStringField(param: $param) }""",
{"param": ["varValue"]},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[varvalue-scalar]"
}
},
),
(
"""query ($param: [String] = "varDefault") { nonNullListWithDefaultStringField(param: $param) }""",
{"param": ["varValue", None]},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[varvalue-scalar-None]"
}
},
),
(
"""query ($param: [String] = ["varDefault"]) { nonNullListWithDefaultStringField(param: $param) }""",
None,
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[vardefault-scalar]"
}
},
),
(
"""query ($param: [String] = ["varDefault"]) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": None},
{
"data": {"nonNullListWithDefaultStringField": None},
"errors": [
{
"message": "Argument < param > of non-null type < [String]! > must not be null.",
"path": ["nonNullListWithDefaultStringField"],
"locations": [{"line": 1, "column": 86}],
}
],
},
),
(
"""query ($param: [String] = ["varDefault"]) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": [None]},
{"data": {"nonNullListWithDefaultStringField": "SUCCESS-[None]"}},
),
(
"""query ($param: [String] = ["varDefault"]) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": "varValue"},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[varvalue-scalar]"
}
},
),
(
"""query ($param: [String] = ["varDefault"]) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": ["varValue"]},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[varvalue-scalar]"
}
},
),
(
"""query ($param: [String] = ["varDefault"]) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": ["varValue", None]},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[varvalue-scalar-None]"
}
},
),
(
"""query ($param: [String] = ["varDefault", null]) { nonNullListWithDefaultStringField(param: $param) }""",
None,
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[vardefault-scalar-None]"
}
},
),
(
"""query ($param: [String] = ["varDefault", null]) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": None},
{
"data": {"nonNullListWithDefaultStringField": None},
"errors": [
{
"message": "Argument < param > of non-null type < [String]! > must not be null.",
"path": ["nonNullListWithDefaultStringField"],
"locations": [{"line": 1, "column": 92}],
}
],
},
),
(
"""query ($param: [String] = ["varDefault", null]) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": [None]},
{"data": {"nonNullListWithDefaultStringField": "SUCCESS-[None]"}},
),
(
"""query ($param: [String] = ["varDefault", null]) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": "varValue"},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[varvalue-scalar]"
}
},
),
(
"""query ($param: [String] = ["varDefault", null]) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": ["varValue"]},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[varvalue-scalar]"
}
},
),
(
"""query ($param: [String] = ["varDefault", null]) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": ["varValue", None]},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[varvalue-scalar-None]"
}
},
),
(
"""query ($param: [String]!) { nonNullListWithDefaultStringField(param: $param) }""",
None,
{
"data": None,
"errors": [
{
"message": "Variable < $param > of required type < [String]! > was not provided.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: [String]!) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": None},
{
"data": None,
"errors": [
{
"message": "Variable < $param > of non-null type < [String]! > must not be null.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: [String]!) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": [None]},
{"data": {"nonNullListWithDefaultStringField": "SUCCESS-[None]"}},
),
(
"""query ($param: [String]!) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": "varValue"},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[varvalue-scalar]"
}
},
),
(
"""query ($param: [String]!) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": ["varValue"]},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[varvalue-scalar]"
}
},
),
(
"""query ($param: [String]!) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": ["varValue", None]},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[varvalue-scalar-None]"
}
},
),
(
"""query ($param: [String!]) { nonNullListWithDefaultStringField(param: $param) }""",
None,
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[defaultstring-scalar-None]"
}
},
),
(
"""query ($param: [String!]) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": None},
{
"data": {"nonNullListWithDefaultStringField": None},
"errors": [
{
"message": "Argument < param > of non-null type < [String]! > must not be null.",
"path": ["nonNullListWithDefaultStringField"],
"locations": [{"line": 1, "column": 70}],
}
],
},
),
(
"""query ($param: [String!]) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": [None]},
{
"data": None,
"errors": [
{
"message": "Variable < $param > got invalid value < [None] >; Expected non-nullable type < String! > not to be null at value[0].",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: [String!]) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": "varValue"},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[varvalue-scalar]"
}
},
),
(
"""query ($param: [String!]) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": ["varValue"]},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[varvalue-scalar]"
}
},
),
(
"""query ($param: [String!]) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": ["varValue", None]},
{
"data": None,
"errors": [
{
"message": "Variable < $param > got invalid value < ['varValue', None] >; Expected non-nullable type < String! > not to be null at value[1].",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: [String!]!) { nonNullListWithDefaultStringField(param: $param) }""",
None,
{
"data": None,
"errors": [
{
"message": "Variable < $param > of required type < [String!]! > was not provided.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: [String!]!) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": None},
{
"data": None,
"errors": [
{
"message": "Variable < $param > of non-null type < [String!]! > must not be null.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: [String!]!) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": [None]},
{
"data": None,
"errors": [
{
"message": "Variable < $param > got invalid value < [None] >; Expected non-nullable type < String! > not to be null at value[0].",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: [String!]!) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": "varValue"},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[varvalue-scalar]"
}
},
),
(
"""query ($param: [String!]!) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": ["varValue"]},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[varvalue-scalar]"
}
},
),
(
"""query ($param: [String!]!) { nonNullListWithDefaultStringField(param: $param) }""",
{"param": ["varValue", None]},
{
"data": None,
"errors": [
{
"message": "Variable < $param > got invalid value < ['varValue', None] >; Expected non-nullable type < String! > not to be null at value[1].",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($item: String) { nonNullListWithDefaultStringField(param: ["paramDefaultValue", $item]) }""",
None,
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[paramdefaultvalue-scalar-None]"
}
},
),
(
"""query ($item: String) { nonNullListWithDefaultStringField(param: ["paramDefaultValue", $item]) }""",
{"item": None},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[paramdefaultvalue-scalar-None]"
}
},
),
(
"""query ($item: String) { nonNullListWithDefaultStringField(param: ["paramDefaultValue", $item]) }""",
{"item": "varValue"},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[paramdefaultvalue-scalar-varvalue-scalar]"
}
},
),
(
"""query ($item: String = null) { nonNullListWithDefaultStringField(param: ["paramDefaultValue", $item]) }""",
None,
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[paramdefaultvalue-scalar-None]"
}
},
),
(
"""query ($item: String = null) { nonNullListWithDefaultStringField(param: ["paramDefaultValue", $item]) }""",
{"item": None},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[paramdefaultvalue-scalar-None]"
}
},
),
(
"""query ($item: String = null) { nonNullListWithDefaultStringField(param: ["paramDefaultValue", $item]) }""",
{"item": "varValue"},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[paramdefaultvalue-scalar-varvalue-scalar]"
}
},
),
(
"""query ($item: String = "varDefault") { nonNullListWithDefaultStringField(param: ["paramDefaultValue", $item]) }""",
None,
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[paramdefaultvalue-scalar-vardefault-scalar]"
}
},
),
(
"""query ($item: String = "varDefault") { nonNullListWithDefaultStringField(param: ["paramDefaultValue", $item]) }""",
{"item": None},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[paramdefaultvalue-scalar-None]"
}
},
),
(
"""query ($item: String = "varDefault") { nonNullListWithDefaultStringField(param: ["paramDefaultValue", $item]) }""",
{"item": "varValue"},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[paramdefaultvalue-scalar-varvalue-scalar]"
}
},
),
(
"""query ($item: String!) { nonNullListWithDefaultStringField(param: ["paramDefaultValue", $item]) }""",
None,
{
"data": None,
"errors": [
{
"message": "Variable < $item > of required type < String! > was not provided.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($item: String!) { nonNullListWithDefaultStringField(param: ["paramDefaultValue", $item]) }""",
{"item": None},
{
"data": None,
"errors": [
{
"message": "Variable < $item > of non-null type < String! > must not be null.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($item: String!) { nonNullListWithDefaultStringField(param: ["paramDefaultValue", $item]) }""",
{"item": "varValue"},
{
"data": {
"nonNullListWithDefaultStringField": "SUCCESS-[paramdefaultvalue-scalar-varvalue-scalar]"
}
},
),
],
)
async def test_coercion_non_null_list_with_default_string_field(
engine, query, variables, expected
):
assert await engine.execute(query, variables=variables) == expected
| 37.896317
| 166
| 0.419073
|
6151410b04fa38422ffbe469c4231ff8b723ca63
| 2,261
|
py
|
Python
|
pytezos/tools/docstring.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | 1
|
2021-05-20T16:52:08.000Z
|
2021-05-20T16:52:08.000Z
|
pytezos/tools/docstring.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | 1
|
2020-12-30T16:44:56.000Z
|
2020-12-30T16:44:56.000Z
|
pytezos/tools/docstring.py
|
tqtezos/pytezos
|
a4ac0b022d35d4c9f3062609d8ce09d584b5faa8
|
[
"MIT"
] | 1
|
2022-03-20T19:01:00.000Z
|
2022-03-20T19:01:00.000Z
|
import re
import inspect
import types
from functools import update_wrapper
def is_interactive():
import __main__ as main
return not hasattr(main, '__file__')
__interactive_mode__ = is_interactive()
def get_attr_docstring(class_type, attr_name):
if attr_name == 'get':
attr_name = '__call__'
attr = getattr(class_type, attr_name, None)
if attr and attr.__doc__:
return re.sub(r' {3,}', '', attr.__doc__)
def default_attr_filter(x):
return not x.startswith('_')
def get_class_docstring(class_type, attr_filter=default_attr_filter, extended=False):
def attr_format(x):
attr = getattr(class_type, x)
if type(attr) == property:
name = f'.{x}'
else:
if extended:
sig = str(inspect.signature(attr)).replace('self, ', '')
else:
sig = '()'
name = f'.{x}{sig}'
if extended:
doc = get_attr_docstring(class_type, x)
else:
doc = ''
return f'{name}{doc}'
return '\n'.join(map(attr_format, filter(attr_filter, dir(class_type))))
def inline_doc(method):
if not __interactive_mode__:
return method
doc = [repr(method)]
if method.__doc__:
doc.append(re.sub(r' {3,}', '', method.__doc__))
class CustomReprDescriptor:
def __get__(self, instance, owner):
class MethodWrapper:
def __init__(self):
self.class_instance = instance
self.doc = '\n'.join(doc)
def __call__(self, *args, **kwargs):
return method(self.class_instance, *args, **kwargs)
def __repr__(self):
return self.doc
return update_wrapper(MethodWrapper(), method)
return CustomReprDescriptor()
class InlineDocstring(type):
def __new__(mcs, name, bases, attrs, **kwargs):
new_attrs = {}
for attr_name, attr in attrs.items():
if isinstance(attr, types.FunctionType) and attr.__doc__ and not attr_name.startswith('_'):
attr = inline_doc(attr)
new_attrs[attr_name] = attr
return type.__new__(mcs, name, bases, new_attrs, **kwargs)
| 26.290698
| 103
| 0.586466
|
afc46753d633bdebf137ab246b88112dcde8da9a
| 54,847
|
py
|
Python
|
DistributeCralwerSpecialInstitution/Spiders/wuba/tmp.py
|
xiaoshicae/suanhua
|
b7724bff14bf205c00f405bf6fe7af6d8fecd61f
|
[
"Apache-2.0"
] | 3
|
2018-12-14T08:16:22.000Z
|
2019-04-23T01:30:56.000Z
|
DistributeCralwerSpecialInstitution/Spiders/wuba/tmp.py
|
xiaoshicae/suanhua
|
b7724bff14bf205c00f405bf6fe7af6d8fecd61f
|
[
"Apache-2.0"
] | null | null | null |
DistributeCralwerSpecialInstitution/Spiders/wuba/tmp.py
|
xiaoshicae/suanhua
|
b7724bff14bf205c00f405bf6fe7af6d8fecd61f
|
[
"Apache-2.0"
] | 1
|
2018-09-04T09:48:06.000Z
|
2018-09-04T09:48:06.000Z
|
# --*-- encoding=utf-8 --*--#
from lxml import etree
content = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
<meta name="renderer" content="webkit">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<title>北京信用贷款 快速简便【低利息】额度高当天放款 - 北京58同城</title>
<meta name="description" content="北京信用贷款 快速简便【低利息】额度高当天放款,北京全境汽车房产信用贷款!【低利息】额度高﹩当天放款:不成功不收费一、银行抵押贷款1、抵押消费贷:年龄18-65岁,较长可贷30年,年利率5.88%起。2、抵押经营贷:年龄18-65岁,循环适"/>
<meta http-equiv="mobile-agent" content="format=wml;url=http://wap.58.com/bj_danbaobaoxiantouzi/30843376389197.html"/><meta http-equiv="mobile-agent" content="format=xhtml;url=http://wap.58.com/bj_danbaobaoxiantouzi/30843376389197.html"/>
<meta http-equiv="mobile-agent" content="format=xhtml; url=http://i.m.58.com/bj/danbaobaoxiantouzi/30843376389197x.shtml">
<meta http-equiv="mobile-agent" content="format=html5; url=http://i.m.58.com/bj/danbaobaoxiantouzi/30843376389197x.shtml">
<meta http-equiv="mobile-agent" content="format=wml; url=http://i.m.58.com/bj/danbaobaoxiantouzi/30843376389197x.shtml">
<link type="text/css" rel="stylesheet" href='http://c.58cdn.com.cn/ds/shop/v2/base_v20160902172139.css' media="all"/>
<link rel="stylesheet" href='http://c.58cdn.com.cn/ds/weixin/hy_freetel_v20170627180606.css'>
<link type="text/css" rel="stylesheet" href='http://c.58cdn.com.cn/componentsLoader/dist/CompontsLoader_v20170817153613.css'
media="all"/>
<link type="text/css" rel="stylesheet" href='http://c.58cdn.com.cn/ds/detail_new/detail_v20170821185524.css'
media="all"/>
<!-- <link type="text/css" rel="stylesheet" href='http://c.58cdn.com.cn/ds/shop/v2/detail-v2_v20170213171150.css'
media="all"/>
<link type="text/css" rel="stylesheet" href='http://c.58cdn.com.cn/ds/other/ly/hyliststyle_v20170314153313.css'
media="all"/> -->
<!-- bj banjia S -->
<!-- <link type="text/css" rel="stylesheet" href='http://c.58cdn.com.cn/ds/shop/v2/banjiaold_v20160413013423.css'
media="all"/> -->
<!-- bj banjia E -->
<!--头部js start-->
<script>
!function(t, e, w) {
w._ty_key = '-K0MmSsCYqg';
var a = t.createElement(e);
a.async = !0,a.src = ('https:' == t.location.protocol ? 'https://' : 'http://') + 'j2.58cdn.com.cn/pc/hy/tingyun-rum.min.js';
var c = t.getElementsByTagName(e)[0];
c.parentNode.insertBefore(a, c)
}(document, 'script', window);
</script>
<script>
var _hmt = _hmt || [];
(function() {
var hm = document.createElement("script");
hm.src = "https://hm.baidu.com/hm.js?e2d6b2d0ec536275bb1e37b421085803";
var s = document.getElementsByTagName("script")[0];
s.parentNode.insertBefore(hm, s);
})();
</script>
<script type="text/javascript">
document.domain = '58.com';
try {
var ____json4fe = {rootcatentry:{dispid:'8703',name:'商务服务',listname:'shangwu'},catentry:{dispid:'14000',name:'投资担保',listname:'danbaobaoxiantouzi'},locallist:[{dispid:'1', name:'北京', listname:'bj'},{dispid:'1142', name:'朝阳', listname:'chaoyang'},{dispid:'1195', name:'国贸', listname:'guomao'}],infoid:'30843376389197',userid:'43412961916182',linkman:'张经理',is_huzhuan:false,modules:'final',shipin:'',start:(new Date()).getTime(),isbiz:true,isport:true};
____json4fe.modules = 'finalpage';
____json4fe.isBQX = false;
____json4fe._trackPagetype = "detail";
____json4fe.sid = '112965469197063903717833467';
____json4fe.sessionid = '';
____json4fe.shopid = '110502542668756277';
____json4fe._trackURL = "{'cate':'8703,14000','area':'1,1142,1195','is_biz':'true','pagetype':'detail','source':'6','version':'huangye_detail_pc_0001','page':'shangwu_danbaobaoxiantouzi_xinban'}";
____json4fe._trackParams = [{"I":10496,"V":""}];
____json4fe._trackPageview = "/bj/shangwu/danbaobaoxiantouzi/detail/";
____json4fe.linkman = '张经理';
____json4fe.hidetel = '15901413319';
____json4fe.ABVersion = "B";
____json4fe.catetag = "huangye";
____json4fe.visitorid = "-2";
____json4fe.req_version= "1.0.0";
____json4fe.isshowofflinemsg = 0;
} catch (e) {
}
var ajax_param = '{"infoMethod":["shopHotInfo"],"infoVersion":"default","dataParam":"30843376389197_43412961916182_14000_1"}';
var _userid = 43412961916182;
var _line = "p1001";
</script>
<script type="text/javascript">
var ____loadCfg = ['huangye', 'shenghuo', 'finalpage'];
</script>
<script src='http://j2.58cdn.com.cn/js/require_jquery_load.js'></script>
<script type="text/javascript" src='http://j2.58cdn.com.cn/js/jquery-1.8.3.js'></script>
<script type="text/javascript" src="http://tracklog.58.com/referrer4.js"></script>
<script type="text/javascript" src='http://j1.58cdn.com.cn/ui7/js/createElement-lte-IE8_v0.js'></script>
<script type="text/javascript" src='http://j1.58cdn.com.cn/js/5_1/comm_js/boot_finalpage_version_v20170527150113.js'></script>
<script type="text/javascript" src='http://j1.58cdn.com.cn/ds/js/DD_belatedPNG_0.0.8a-min_v20151016152055.js'></script>
<script type="text/javascript" src='http://j1.58cdn.com.cn/ds/detail/weizhan_v20161114140504.js'></script>
<link rel="stylesheet" href="http://api.map.baidu.com/library/SearchInfoWindow/1.5/src/SearchInfoWindow_min.css" />
<script type="text/javascript" src="http://api.map.baidu.com/api?v=2.0&ak=9IpBeHFXCwFHPuxmZyRlO4R1Hib58Kxq"></script>
<script type="text/javascript" src="http://api.map.baidu.com/library/SearchInfoWindow/1.5/src/SearchInfoWindow_min.js"></script>
<script type="text/javascript" src="http://api.map.baidu.com/library/AreaRestriction/1.2/src/AreaRestriction_min.js"></script>
<script type="text/javascript" src='//j1.58cdn.com.cn/webim/js/entry.js'></script>
<script type="text/javascript">
function addCookie(name, value) {
var Days = 30;
var exp = new Date();
exp.setTime(exp.getTime() + Days * 24 * 60 * 60 * 1000);
document.cookie = name + "=" + escape(value) + ";domain=58.com;path=/;expires=" + exp.toGMTString();
}
</script>
<script>
(function () {
var bp = document.createElement('script');
bp.src = '//push.zhanzhang.baidu.com/push.js';
var s = document.getElementsByTagName("script")[0];
s.parentNode.insertBefore(bp, s);
})();
</script>
<script type="text/javascript">
window.WMDA_SDK_CONFIG = ({
api_v: 1,
sdk_v: 0.1,
mode: 'report',
appid: 1411632341505,
key: 'p2ikeuny',
project_id: '1409632296065'
});
(function() {
var wmda = document.createElement('script');
wmda.type='text/javascript';
wmda.async = true;
wmda.src = ('https:' == document.location.protocol ? 'https://' : 'http://') + 'j1.58cdn.com.cn/wmda/js/statistic.js?' + (Math.floor(+new Date() / 60000)) * 60;
var s = document.getElementsByTagName('script')[0];
s.parentNode.insertBefore(wmda, s);
})();
</script>
<!-- GrowingIO -->
<script type='text/javascript'>
var _vds = _vds || [];
window._vds = _vds;
(function(){
_vds.push(['setAccountId', '98e5a48d736e5e14']);
_vds.push(['setImp', false]);
_vds.push(['setPageGroup', 'detail']);
(function(){
var vds = document.createElement('script');
vds.type='text/javascript';
vds.async = true;
vds.src = ('https:' == document.location.protocol ? 'https://' : 'http://') + 'dn-growing.qbox.me/vds.js';
var s = document.getElementsByTagName('script')[0];
s.parentNode.insertBefore(vds, s);
})();
})();
var infoDianpuData = {"lat":"39.915404","bsName":"国贸","userId":43412961916182,"infoId":30843376389197,"cateId":14000,"districtName":"朝阳","cateName":"投资担保","cityId":1,"cityName":"北京","lng":"116.471372","area":"北京","name":"北京盛世博远贷款服务"};
</script>
<script type="text/javascript">
var infoTel400Sign = "870bb2e021744a7ecb6222cf5198c745263bfb39";
</script> <!--头部js end-->
</head>
<body class="">
<input id="report-userid" type="hidden" value="43412961916182">
<input id="report-type" type="hidden" value="tab">
<!--头部 start-->
<!-- =S topbar -->
<div id="commonTopbar" class="commonTopbar">
<script type="text/javascript">
window.wbAsyncInit = function wbAsyncInit( CL ){
CL.invoke('topbar', {
aroundCity: true,
weather: true,
appQR: true, // homepage QR
homepageLink: true,
size: 'default' // default: 1190px, narrow: 1000px
});
/**
* 统一收藏弹窗
*/
CL.invoke('popcollection', {
clickBtn: $('#collect'),
source: "passport",
infoid: ____json4fe.infoid,
callback: function(){
collectText();
}
});
};
</script>
</div>
<!-- =E topbar -->
<!-- =S header -->
<div class="header-wrap" id="header">
<div class="header-inner">
<a class="logo float_l" href="/huangye/" target="_blank" onClick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_logo');">58同城</a>
<a href="http://post.58.com/1/14000/s5" target="_blank" class="postbtn float_r" id="postbtn" onClick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_fabu');">免费发布</a>
</div>
</div>
<!-- =S topbanner -->
<script type="text/javascript">document.write('<scri'+'pt src="http://j1.58cdn.com.cn/ds/tgbrand/js/brand_detialpage_v1.js?temp=' + Math.random() + '"></scri'+'pt>');</script>
<div class="topbannerbar"><div id="brand_detial_top_banner" class="brandad1000" ></div></div>
<!-- =E topbanner -->
<!-- =S nav -->
<div class="nav">
<a href="http://bj.58.com/" target="_blank">北京58同城</a> > <a href="http://bj.58.com/shangwu.shtml" target="_blank">北京商务服务</a>
> <a href='http://bj.58.com/danbaobaoxiantouzi/' target="_blank">北京投资担保</a>
> <a href="http://bj.58.com/chaoyang/danbaobaoxiantouzi/" target="_blank" class="crb_a_1">朝阳投资担保</a>
> <a href="http://bj.58.com/guomao/danbaobaoxiantouzi/" target="_blank" class="crb_a_2">国贸投资担保</a>
</div>
<!-- =E nav --><!-- =E header -->
<!--头部 end-->
<!--基础信息 start-->
<!-- =S infobase -->
<div class="basicinfo clearfix" id="basicinfo">
<!-- =S 主标题 -->
<div class="mainTitle">
<h1>
北京信用贷款 快速简便【低利息】额度高当天放款
</h1>
<!-- =S 发布时间&浏览次数 分享、收藏功能块-->
<div id="index_show" class="mtit_con c_999 f12 clearfix">
<ul class="mtit_con_left fl">
<li title="发布日期" class="time">2017-07-25 发布</li>
<li title="浏览次数" class="count"><em id="totalcount">0</em>次浏览</li>
</ul>
<!-- =S 功能区 -->
<ul class="mtit_con_right">
<!-- hover 为鼠标划入是的状态,该状态需要脚本做处理 -->
<li id="freemsg" class="freemsg"><a href="javascript:setbg('把该信息发送到手机',400,200,'http://my.58.com/downloadinfo/inputmobile/30843376389197/')"><i class="mtit1"></i><span class="ml_1">发送到手机</span></a>
</li>
<li id="collect" class="collect">
<a href="javascript:void(0);">
<i class="mtit2"></i><span class="ml_2">收藏</span></a>
</li>
<li id="newshare" class="newshare">
<i class="mtit3"></i><span class="ml_3">分享</span>
</li>
<li id="report" class="report"><i class="mtit4"></i><span class="ml_4" >举报</span>
</li>
</ul>
<!-- =E 功能区 -->
<!--=S 分享 -->
<div id="newshareBox">
<div class="newshareBox_con">
<div class="bdsharebuttonbox" data-tag="share_1">
<a data-cmd="weixin" class="bds_weixin" title="分享到微信朋友圈"></span>
<a class="bds_sqq" data-cmd="sqq"></a>
<a class="bds_qzone" data-cmd="qzone"></a>
<a class="bds_tsina" data-cmd="tsina"></a>
</div>
<script>
with(document)0[(getElementsByTagName('head')[0]||body).appendChild(createElement('script')).src='http://bdimg.share.baidu.com/static/api/js/share.js?cdnversion='+~(-new Date()/36e5)];
</script>
</div>
</div>
<!--=E 分享 -->
<!--=S 举报 -->
<div id="reportBar" class="reportBox" style=" left:552px;">
<ul class="mtit_con_ul">
<li><a href="http://about.58.com/info/deleteinfo.aspx" target="_blank" onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_main_jubao_dianhua');">电话被冒用</a></li>
<li><a href="http://about.58.com/vote/pc?infoId=30843376389197" target="_blank" onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_main_jubao_xinxi');">信息虚假违法</a></li>
<li><a href="http://about.58.com/voteProof?voteSource=1&cateCode=718&infoId=30843376389197" target="_blank" onClick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_main_jubao_qiye');">企业被冒用</a></li>
<li><a href="http://110.58.com?detail_url=http://bj58.com/danbaobaoxiantouzi/30843376389197x.shtml&postId=30843376389197&category=1005" onClick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_main_jubao_baoan');" target="_blank">我要报案</a></li>
</ul>
</div>
<script type="text/javascript">
function report(){
boot.require('config, dom, extension.string', function(Frame, config, dom, string){
var now = new Date(),
datestr = [
now.getFullYear(),
string.leftPad(now.getUTCMonth(), 2, '0'),
string.leftPad(now.getUTCDate(), 2, '0'),
string.leftPad(now.getHours(), 2, '0'),
string.leftPad(now.getMinutes(), 2, '0')
].join('');
_gaq.push(['pageTracker._trackEvent', '58_detail_report', config.j.infoid, datestr]);
setbg('信息已成交',200,100,'http://image.58.com/booker/report/fininsh/?infoid=30843376389197');
});
}
</script>
<!--=E 举报 -->
</div> <!-- =E 发布时间&浏览次数 分享、收藏功能块-->
</div>
<!-- =E 主标题 -->
<!-- =S 基础信息列表 -->
<div class="col_sub sumary no_col_left">
<ul class="suUl" wltMark="2" iswltMember=1>
<!--=S 类别 -->
<li>
<div class="su_tit">类 别:</div>
<div class="su_con">贷款</div>
</li>
<!--=E 类别 -->
<!--=S 小类 -->
<li>
<div class="su_tit spacing2">小 类:</div>
<div class="su_con">个人信贷 企业信贷 抵押贷款 质押贷款</div>
</li>
<!--=E 小类 -->
<!--=S 服务区域 -->
<li>
<div class="su_tit">服务区域:</div>
<div class="su_con quyuline">
<a href="/chaoyang/danbaobaoxiantouzi/" target="_blank">朝阳</a> <a href="/guomao/danbaobaoxiantouzi/" target="_blank">国贸</a> </div>
</li>
<!--=E 服务区域 -->
<li>
<div class="su_tit spacing3">联 系 人:</div>
<div class="su_con">
<a target="_blank" href="" rel="nofollow">张经理</a>
<!--微聊icon start-->
<a class="btn-IM im-chat" data-im="%7B%22rootcateid%22%3A%228703%22%2C%22cateid%22%3A%2234804%22%2C%22userid%22%3A%2243412961916182%22%2C%22postid%22%3A%2230843376389197%22%7D" href="javascript:;"></a>
<!--微聊icon end-->
<!--微信咨询icon start-->
<span id="weixinWrapBox-new"></span>
<div class="tc_wx_contact" id="tc_wx_guanzhu" style="display: none;">
<div class="tc_wx_contactin">
<div class="tc_wx_contact_close"></div>
<div class="tc_wx_contact_main">
<div class="tc_wx_contact_erweima">
<p class="tc_wx_contact_des">微信扫一扫 随时问随时聊</p>
<div class="tc_wx_erweima_guanzhu"><img src="" /></div>
</div>
<div class="tc_wx_contact_right">
<img src="//img.58cdn.com.cn/ds/detail/weixintc_des.png">
</div>
</div>
</div>
</div>
<div class="weixinBoxMask"></div>
<!--微信咨询icon end-->
</div>
</li> <li>
<div class="su_tit">商家地址:</div>
<div class="su_con">
<a href="/chaoyang/danbaobaoxiantouzi/" target="_blank">朝阳 -</a>
<a href="/guomao/danbaobaoxiantouzi/" target="_blank">国贸</a>
<span class="adr">- 北京</span>
</div>
</li>
<li>
<div class="item-btn">
<a href="javascript:void(0);" class="btn_tocompletetel" id="view-connect" rel="nofollow">查看电话号码</a>
</div>
</li> </ul>
</div><!-- =E 基础信息列表 -->
<!-- =S 用户 店铺 信息 -->
<div class="userinfo">
<div class="userinfotit">
<h2 >北京盛世博远贷款服务</h2>
<div class="userinfo-tag clearfix">
<!-- =S 会员年限 -->
<span class="usertag-members">会员1年</span>
<!-- =E 会员年限 -->
<!-- =S 身份认证 -->
<a href="http://about.58.com/verify.html" target="_blank" rel="nofollow" onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_shop_yyzz&entityId=30843376389197&entityType=0&psid=112965469197063903717833467');"><span class="usertag-business" title="营业执照认证">营业执照认证</span></a>
<!-- =E 身份认证 -->
</div>
</div>
<div class="userinfomain">
<div class="userinfo-intro">
<p>该商家加入58已经<span class="c_num">283</span>天</p>
</div>
<div class="userinfo-link">
<!-- =S 进入店铺 -->
<a href="http://mall.58.com/110502542668756277/?info_cateId=14000&info_local=1" class="vert_entr" onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_shop_jrdp&entityId=30843376389197&entityType=0&psid=112965469197063903717833467');" target="_blank">进入店铺</a>
<!-- =E 进入店铺 -->
<span class="vline">|</span>
<!-- =S 查看信用档案 -->
<a rel="nofollow" target="_blank" href="http://credit.vip.58.com/usercredit?userId=43412961916182" onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_shop_ckda&entityId=30843376389197&entityType=0&psid=112965469197063903717833467');">查看信用档案</a>
<!-- =E 查看信用档案 -->
<span class="vline">|</span>
<!-- =S 进入官网 -->
<a target="_blank" href="http://t5843586611526403.5858.com/" class="vert_entr" onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_shop_jrgw&entityId=30843376389197&entityType=0&psid=112965469197063903717833467');">进入官网</a>
<div class="vert_entr mr" style="display: none;">
微站
<span class="zhan_code_con_r">
<i class="zhan_code_ico"></i>
<div class="hovercode_con_r" style="display: none;">
<div id="zhan_code_list_right" class="zhan_code_list">
<div class="z_code public" style="display: none;" >
<h3>微信扫描二维码关注商家</h3>
<span class="z_code_img"><img src="http://pic2.58.com/ds/qiye/admin/zhan_pc_code.png"></span>
</div>
<div class="z_code mobilesit" style="display: none;">
<h3>扫描二维码访问移动网站</h3>
<span class="z_code_img"><img id="mimg_r" src="http://pic2.58.com/ds/qiye/admin/zhan_pc_code.png"></span>
</div>
<div class="z_code app" style="display: none;">
<h3>扫描二维码下载商家APP</h3>
<span class="z_code_img"><img id="appimg_r" src="http://pic2.58.com/ds/qiye/admin/zhan_pc_code.png"></span>
</div>
</div>
<a class="zhan_code_tit" href="http://weizhan.58.com" onClick="clickLog('from=weizhanfromdetailright&entityId=30843376389197&entityType=0&psid=112965469197063903717833467');">二维码来自58微站通</a>
<div class="zhan_arrow"></div>
<div class="zhan_arrow2"></div>
</div>
</span>
</div>
<!-- =E 进入官网 -->
</div>
</div>
</div><!-- =E 用户 店铺 信息 -->
</div>
<!-- =S 提示语 -->
<div class="warnings">
<p><span class="warnings-t">温馨提示</span>:1、在办理服务前请确认对方资质, 夸大的宣传和承诺不要轻信!2.任何要求预付定金、汇款至个人银行账户等方式均存在风险,谨防上当受骗!</p>
</div>
<!-- =E 提示语 -->
<!-- =E infobase -->
<!--基础信息 end-->
<!--详细信息 start-->
<div class="clearfix" id="content">
<!-- =S contentleft -->
<div class="contentleft" id="contentleft">
<!-- =S 详细描述 -->
<div class="bc">
<!--=S 滚动tab-->
<div class="detaildepict">
<div class="hc">
<div style="top: 0px;" top="601" scrolls="408" class="tabs clearfix " id="detail_tab">
<!--tab详情描述 start-->
<a id="detail_1" class="tab cur" href="javascript:void(0)" onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_tab_miaoshu&entityId=30843376389197&entityType=0&psid=112965469197063903717833467');"><span>详情描述</span></a>
<!--tab详情描述 start-->
<!--tab相关推荐 start-->
<a class="tab" id="ckgdbtn" href="javascript:void(0)" onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_tab_tuijian&entityId=30843376389197&entityType=0&psid=112965469197063903717833467');">
<span>相关推荐</span>
</a>
<!--tab相关推荐 end-->
<a class="tab" id="bzznbtn" href="javascript:void(0);" onClick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_tab_chuangye');">
<span>创业指南</span>
</a>
<!--tab查看电话按钮 start-->
<a class="tab btn_tocompletetel" id="freePhoneBtn" href="javascript:void(0)" rel="nofollow" onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_tab_400&entityId=30843376389197&entityType=0&psid=112965469197063903717833467');">查看电话号码</a>
<!--tab查看电话按钮 end-->
</div>
</div>
</div>
<!--=E 滚动tab -->
<!--=S 主体内容-->
<!--详情描述 start-->
<div id="con_1" class="ContentBox" style="display: block;">
<div class="description">
<section class="des_con" id="phoneFrame_con">
<div id="sub_1">
<!--=S 详细描述文字 -->
<div class="descriptionBox">
<div class="foldingbox">
<article class="description_con"> <p><strong>北京全境汽车房产信用贷款!【低利息】额度高﹩当天放款:不成功不收费</strong></p><p><br><strong>一、银行抵押贷款</strong><br><strong>1</strong><strong>、抵押消费贷</strong><strong>:年龄</strong><strong>18-65</strong><strong>岁,较长可贷</strong><strong>30</strong><strong>年,年利率</strong><strong>5.88%</strong><strong>起。</strong><br><strong>2</strong><strong>、抵押经营贷</strong><strong>:年龄</strong><strong>18-65</strong><strong>岁,循环适用,先息后本年利率</strong><strong>5.88%</strong><strong>起。</strong><br><br><strong>二、房产质押</strong><br><strong>北京·燕郊房产均可操做,额度市值</strong><strong>9</strong><strong>成(单方也可以),放款速度快。月息</strong><strong>0.8%</strong><strong>起,多种产品供您选择,手续简洁,先息后本,等额本息还款方式可选。</strong><br><br><strong>三、个人信用贷款</strong><br><strong>年龄</strong><strong>22-60</strong><strong>岁(退休也可以)额度</strong><strong>10-100</strong><strong>万,快至当天放款,月息</strong><strong>0.38%</strong><strong>起</strong><br><strong>1.</strong><strong>房屋贷</strong><br><strong>2.</strong><strong>保单贷:</strong><br><strong>全国范围生效两年以上的保单或结清</strong><strong>60</strong><strong>天内也可以(平安保险</strong><strong>/</strong><strong>中国人寿</strong><strong>/</strong><strong>新华人寿</strong><strong>/</strong><strong>泰康</strong><strong>/</strong><strong>太平洋</strong><strong>/</strong><strong>中国人保</strong><strong>/</strong><strong>太平人寿</strong><strong>/</strong><strong>阳光人寿</strong><strong>/</strong><strong>生命人寿</strong><strong>/</strong><strong>友邦人寿</strong><strong>/</strong><strong>中宏</strong><strong>/</strong><strong>中意</strong><strong>/</strong><strong>招商信诺</strong><strong>/</strong><strong>太平</strong><strong>/</strong><strong>生命</strong><strong>/</strong><strong>中英</strong><strong>/</strong><strong>工银安盛</strong><strong>/</strong><strong>中美大都会的保单)年缴费的</strong><strong>30</strong><strong>倍。</strong><br><strong>3.</strong><strong>公积金贷:</strong><br><strong>可贷额度为公积金个人缴存额度</strong><strong>×333</strong><strong>倍。</strong><br><strong>4.</strong><strong>社保贷:</strong><br><strong>社保连续缴费</strong><strong>18</strong><strong>个月以上,缴费基数</strong><strong> 4000</strong><strong>元,即可申请,基数</strong><strong> 4000</strong><strong>可贷</strong><strong>30</strong><strong>倍,基数</strong><strong> 6000</strong><strong>可贷</strong><strong>40</strong><strong>倍。</strong><br><strong>5.</strong><strong>北京工资</strong> <strong>信用贷:</strong><br><strong>北京工作半年以上(本地外地人均可)工资金额</strong><strong>2500</strong><strong>以上,蕞高下款</strong><strong>50</strong><strong>万</strong><strong>!</strong></p><p><strong>注:以上业务均可多家银行同时操作,较高下款案例</strong><strong>200</strong><strong>万。</strong></p><p><strong>四、车辆抵押贷<br>当天放款、成数高、利息超低,可只押手续不押车。给不给力来了就知道。<br>1、全款车辆<br>2、贷款流程:看车---签订借款手续---放款</strong><br></p>
</article>
<p>
</p>
<script type="text/javascript">boot.require('business.hidekey', function(Frame, hk){ hk.init();});</script>
<p>联系我时,请说是在58同城看到的,谢谢!</p>
</div>
<div class="foldingbar">
<a href="javascript:void(0);" class="btn-folding"><span onClick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_miaoshu_wenzi')">展开更多描述<i class="ico ico-miniarrdown"></i></span></a>
</div>
<div id="contuser"></div>
</div>
<!--=E 详细描述文字 -->
<!--=S 详细描述图片 -->
<div id="img_player1" class="clearfix">
<ul class="imgplayerlist">
<li><span><img src="http://pic8.58cdn.com.cn/p1/big/n_v23dd8abf13ddd470499fed10782331611.jpg?w=425&h=320" alt="" /></span></li> <li><span><img src="http://pic3.58cdn.com.cn/p1/big/n_v2117b0d0c9f464bc295bc17bc44d1fe13.jpg?w=425&h=320" alt="" /></span></li> <li><span><img src="http://pic5.58cdn.com.cn/p1/big/n_v2d7ef326a2255446a934932c2923f2ee6.jpg?w=425&h=320" alt="" /></span></li> </ul>
</div>
<div class="foldingbar">
<a href="javascript:void(0);" class="btn-folding"><span onClick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_miaoshu_tupian')">展开更多图片<i class="ico ico-miniarrdown"></i></span></a>
</div>
<!--=E 详细描述图片 -->
</div>
</section>
</div>
</div>
<!--详情描述 end-->
<!--累计评价 start-->
<div id="con_2" class="ContentBox" style="display: block;"></div>
<!--累计评价 end-->
<!--=E 主体内容 -->
</div><!-- =E 详细描述 -->
<!-- =S 商家动态 -->
<div class="relatedservices" id="merchant_state" style="display:none">
<div class="merchant-state">
<h3>商家动态</h3>
<div class="state-list"></div>
<div class="more-box" style="display:none">
<a href="javascript://" onclick="pc_hy_detail_shangwu_danbaobaoxiantouzi_N_miaoshu_sjdt_more">展开更多动态<i></i></a>
</div>
</div>
</div>
<!-- =E 商家动态 -->
<!-- =S 猜你喜欢&相关服务 -->
<div class="relatedservices" id="ckgd">
<!--=S 猜你喜欢-->
<div id="n_ckgd2_tuijian" data-title="北京信用贷款 快速简便【低利息】额度高当天放款" data-objtype="贷款" data-third="34804"></div>
<!--=E 猜你喜欢-->
<!--=S 相关服务-->
<div class="footad">
<span style="display:none;" id="keyword">
<!-- google_ad_section_start -->
北京 商务服务 投资担保
<!-- google_ad_section_end -->
</span>
<div id="direct_ad_bottom"></div>
</div>
<!--=E 相关服务-->
</div>
<script type="text/javascript">
var infoTel400Sign = "870bb2e021744a7ecb6222cf5198c745263bfb39";
</script> <!-- =E 猜你喜欢&相关服务 -->
<!-- =S 服务一条龙&本地生活服务大全 -->
<div class="n_ckgd">
<!--=S 服务一条龙 -->
<div class="n_ckgd1">
<h3><i></i>服务一条龙</h3>
<div class="n_fuwu">
<ul class="clearfix">
<li>
<a href='http://bj.58.com/zhuce/' target="_blank" onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_tuijian_yitiaolong_1&entityId=30843376389197&entityType=0&psid=112965469197063903717833467');" >
<div class='img_zhuce'><p></p></div>
</a>
</li>
<li>
<a href='http://bj.58.com/gongzhuang/' target="_blank" onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_tuijian_yitiaolong_2&entityId=30843376389197&entityType=0&psid=112965469197063903717833467');" >
<div class='img_gongzhuang'><p></p></div>
</a>
</li>
<li>
<a href='http://bj.58.com/jiazhuang/' target="_blank" onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_tuijian_yitiaolong_3&entityId=30843376389197&entityType=0&psid=112965469197063903717833467');" >
<div class='img_jiazhuang'><p></p></div>
</a>
</li>
<li>
<a href='http://bj.58.com/caishui/' target="_blank" onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_tuijian_yitiaolong_4&entityId=30843376389197&entityType=0&psid=112965469197063903717833467');" >
<div class='img_caishui'><p></p></div>
</a>
</li>
<li>
<a href='http://bj.58.com/shangbiaozhli/' target="_blank" onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_tuijian_yitiaolong_5&entityId=30843376389197&entityType=0&psid=112965469197063903717833467');" >
<div class='img_shangbiaozhli'><p></p></div>
</a>
</li>
</ul>
</div>
</div>
<!--=E 服务一条龙-->
<!--=S 本地生活服务大全-->
<div id="localRecommend">
<h3><i></i>本地生活服务大全</h3>
<ul>
<li class="noMarginLeft"><a onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_tuijian_daquan_jiazheng');" href="//bj.58.com/shenghuo.shtml" target="_blank">家政服务<i></i></a></li>
<li><a onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_tuijian_daquan_hunqing');" href="//bj.58.com/hunjiehunqing.shtml" target="_blank">婚庆摄影<i></i></a></li>
<li><a onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_tuijian_daquan_shangwu');" href="//bj.58.com/shangwu.shtml" target="_blank">商务服务<i></i></a></li>
<li><a onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_tuijian_daquan_zhuangxiu');" href="//bj.58.com/zhuangxiujc.shtml" target="_blank">装修建材<i></i></a></li>
<li><a onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_tuijian_daquan_jiaoyu');" href="//bj.58.com/jiaoyu.shtml" target="_blank">教育培训<i></i></a></li>
<li><a onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_tuijian_daquan_qiche');" href="//bj.58.com/qichefw.shtml" target="_blank">汽车服务<i></i></a></li>
<li class="noMarginLeft"><a onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_tuijian_daquan_lvyou');" href="//bj.58.com/lvyouxiuxian.shtml" target="_blank">旅游酒店<i></i></a></li>
<li><a onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_tuijian_daquan_xiuxian');" href="//bj.58.com/xiuxianyl.shtml" target="_blank">休闲娱乐<i></i></a></li>
<li><a onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_tuijian_daquan_canyin');" href="//bj.58.com/canyin.shtml" target="_blank">餐饮美食<i></i></a></li>
<li><a onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_tuijian_daquan_liren');" href="//bj.58.com/liren.shtml" target="_blank">丽人<i></i></a></li>
<li><a onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_tuijian_daquan_nonglin');" href="//bj.58.com/nonglinmy.shtml" target="_blank">农林牧副渔<i></i></a></li>
<li><a onclick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_tuijian_daquan_pifacaigou');" href="//bj.58.com/shop.shtml" target="_blank">批发采购<i></i></a></li>
</ul>
</div>
<!--=E 本地生活服务大全-->
</div>
<!-- =E 服务一条龙&本地生活服务大全 -->
<!--=S 便民内容块-->
<div id="bianminbzznId">
</div>
<!--=E 便民内容块-->
<!-- =S 热门推荐 -->
<div class="col detailBottomAd">
<!-- =S 底部广告 -->
<div id="googlead_list"></div>
<!-- =E 底部广告 -->
<!-- =S 热门推荐 -->
<div class="hottui">
<dl>
<dt>热门推荐:</dt>
<dd>
<a href="http://bj.58.com/danbaobaoxiantouzi/31152739191879x.shtml" target="_blank" title="个人14年西城创业投资公司转让不带管理">个人14年西城创业投资公司转让不带管理</a>
<a href="http://bj.58.com/danbaobaoxiantouzi/31152678871624x.shtml" target="_blank" title="转让投资担保公司经济合同担保、融资性担保">转让投资担保公司经济合同担保、融资性担保</a>
<a href="http://bj.58.com/danbaobaoxiantouzi/29914570624580x.shtml" target="_blank" title="微粒贷网贷无抵押信用贷款">微粒贷网贷无抵押信用贷款</a>
<a href="http://bj.58.com/danbaobaoxiantouzi/31151821404078x.shtml" target="_blank" title="转让稀缺金融牌照证券投资咨询公司收购要快">转让稀缺金融牌照证券投资咨询公司收购要快</a>
<a href="http://bj.58.com/danbaobaoxiantouzi/31151650358453x.shtml" target="_blank" title="北京投资担保公司低价转让">北京投资担保公司低价转让</a>
<a href="http://bj.58.com/danbaobaoxiantouzi/31151566057153x.shtml" target="_blank" title="转让幕墙工程公司 门头沟 带2级幕墙资质和2级装饰">转让幕墙工程公司 门头沟 带2级幕墙资质和2级装饰</a>
<a href="http://bj.58.com/danbaobaoxiantouzi/31151486028616x.shtml" target="_blank" title="北京人放款 北京人信贷 疑难房产 可直接下户">北京人放款 北京人信贷 疑难房产 可直接下户</a>
<a href="http://bj.58.com/danbaobaoxiantouzi/31151475534928x.shtml" target="_blank" title="转让东城投资基金公司,地址长期使用,价格合理">转让东城投资基金公司,地址长期使用,价格合理</a>
<a href="http://bj.58.com/danbaobaoxiantouzi/31151406509628x.shtml" target="_blank" title="转让房山16年1000万投资管理公司">转让房山16年1000万投资管理公司</a>
</dd>
</dl>
</div>
<!-- =E 热门推荐 -->
<!-- =S 热门搜索 -->
<div id="a125" class="w zhaoshangad">
<div class="topSearch clearfix">
<h3 class="topSearch_t">热门搜索:</h3>
<div class="topSearch_c">
<a href="/danbaobaoxiantouzi/jh_%E5%8C%97%E4%BA%AC%E4%B8%93%E4%B8%9A%E6%97%A0%E6%8A%B5%E6%8A%BC%E8%B4%B7%E6%AC%BE/">北京专业无抵押贷款</a>
<a href="/danbaobaoxiantouzi/jh_%E5%8C%97%E4%BA%AC%E6%8C%89%E6%8F%AD%E8%B4%B7%E6%AC%BE%E6%96%B9%E4%BE%BF/">北京按揭贷款方便</a>
<a href="/danbaobaoxiantouzi/jh_%E5%8C%97%E4%BA%AC%E6%8A%95%E8%B5%84%E6%8B%85%E4%BF%9D%E8%A1%8C%E4%B8%9A/">北京投资担保行业</a>
<a href="/danbaobaoxiantouzi/jh_%E5%8C%97%E4%BA%AC%E4%BC%81%E4%B8%9A%E8%B4%B7%E6%AC%BE%E4%B8%9A%E5%8A%A1/">北京企业贷款业务</a>
<a href="/danbaobaoxiantouzi/jh_%E5%8C%97%E4%BA%AC%E4%B8%AD%E5%B0%8F%E4%BC%81%E4%B8%9A%E7%BB%8F%E8%90%A5%E6%80%A7%E8%B4%B7%E6%AC%BE/">北京中小企业经营性贷款</a>
<a href="/danbaobaoxiantouzi/jh_%E5%8C%97%E4%BA%AC%E6%AD%A3%E8%A7%84%E9%93%B6%E8%A1%8C%E8%B4%B7%E6%AC%BE/">北京正规银行贷款</a>
<a href="/danbaobaoxiantouzi/jh_%E5%8C%97%E4%BA%AC%E9%91%AB%E8%9E%8D%E5%9F%BA%E6%8A%95%E8%B5%84%E6%8B%85%E4%BF%9D%E5%85%AC%E5%8F%B8/">北京鑫融基投资担保公司</a>
<a href="/danbaobaoxiantouzi/jh_%E5%8C%97%E4%BA%AC%E4%BF%9D%E6%9C%AC%E5%9E%8B%E6%8A%95%E8%B5%84/">北京保本型投资</a>
<a href="/danbaobaoxiantouzi/jh_%E5%8C%97%E4%BA%AC%E4%B8%93%E4%B8%9A%E8%B4%B7%E6%AC%BE%E4%B8%9A%E5%8A%A1/">北京专业贷款业务</a>
<a href="/danbaobaoxiantouzi/jh_%E5%8C%97%E4%BA%AC%E5%A4%A7%E9%A2%9D%E4%BF%A1%E7%94%A8%E5%8D%A1%E5%8A%9E%E7%90%86/">北京大额信用卡办理</a>
</div>
</div>
</div>
<!-- =E 热门搜索 -->
<!-- =S 相关推荐 -->
<div id="a126" class="tuijianui">
<div class="topSearch clearfix">
<h3 class="topSearch_t">相关推荐:</h3>
<div class="topSearch_c">
<a href="http://bj.58.com/danban/" target="_blank" title="北京担保">北京担保</a>
<a href="http://bj.58.com/touzichan/" target="_blank" title="北京投资">北京投资</a>
<a href="http://bj.58.com/diandanghang/" target="_blank" title="北京典当行">北京典当行</a>
<a href="http://bj.58.com/daikuan/" target="_blank" title="北京贷款">北京贷款</a>
<a href="http://bj.58.com/yinhangzhitou/" target="_blank" title="北京银行直投">北京银行直投</a>
</div>
</div>
</div>
<!-- =E 相关推荐 -->
<!-- =S 周边城市 -->
<div id="a127" class="tuijianui">
<div class="topSearch clearfix">
<h3 class="topSearch_t">周边城市:</h3>
<div class="topSearch_c">
<a href="http://tj.58.com/daikuan/" target="_blank" title="天津贷款">天津贷款</a>
<a href="http://sjz.58.com/daikuan/" target="_blank" title="石家庄贷款">石家庄贷款</a>
<a href="http://zjk.58.com/daikuan/" target="_blank" title="张家口贷款">张家口贷款</a>
<a href="http://bd.58.com/daikuan/" target="_blank" title="保定贷款">保定贷款</a>
<a href="http://lf.58.com/daikuan/" target="_blank" title="廊坊贷款">廊坊贷款</a>
<a href="http://ts.58.com/daikuan/" target="_blank" title="唐山贷款">唐山贷款</a>
<a href="http://chengde.58.com/daikuan/" target="_blank" title="承德贷款">承德贷款</a>
<a href="http://qhd.58.com/daikuan/" target="_blank" title="秦皇岛贷款">秦皇岛贷款</a>
<a href="http://cangzhou.58.com/daikuan/" target="_blank" title="沧州贷款">沧州贷款</a>
<a href="http://hs.58.com/daikuan/" target="_blank" title="衡水贷款">衡水贷款</a>
</div>
</div>
</div>
<!-- =E 周边城市-->
<!-- =S 重点城市 -->
<div id="a128" class="tuijianui">
<div class="topSearch clearfix">
<h3 class="topSearch_t">重点城市:</h3>
<div class="topSearch_c">
<a href="http://bj.58.com/danbaobaoxiantouzi/" target="_blank" title="北京投资担保">北京投资担保</a>
<a href="http://sh.58.com/danbaobaoxiantouzi/" target="_blank" title="上海投资担保">上海投资担保</a>
<a href="http://sz.58.com/danbaobaoxiantouzi/" target="_blank" title="深圳投资担保">深圳投资担保</a>
<a href="http://gz.58.com/danbaobaoxiantouzi/" target="_blank" title="广州投资担保">广州投资担保</a>
<a href="http://tj.58.com/danbaobaoxiantouzi/" target="_blank" title="天津投资担保">天津投资担保</a>
<a href="http://hz.58.com/danbaobaoxiantouzi/" target="_blank" title="杭州投资担保">杭州投资担保</a>
<a href="http://cq.58.com/danbaobaoxiantouzi/" target="_blank" title="重庆投资担保">重庆投资担保</a>
<a href="http://wh.58.com/danbaobaoxiantouzi/" target="_blank" title="武汉投资担保">武汉投资担保</a>
<a href="http://xa.58.com/danbaobaoxiantouzi/" target="_blank" title="西安投资担保">西安投资担保</a>
<a href="http://cd.58.com/danbaobaoxiantouzi/" target="_blank" title="成都投资担保">成都投资担保</a>
</div>
</div>
</div>
<!-- =E 重点城市-->
<!-- 类别推荐-->
<div id="a129" class="w zhaoshangad">
<div class="topSearch clearfix">
<h3 class="topSearch_t">类别推荐:</h3>
<div class="topSearch_c">
<!-- 汽车服务 -->
<!-- 餐饮美食 -->
<!-- 商务服务-->
<a href="http://bj.58.com/daikuan/" target="_blank" title="北京贷款公司">北京贷款公司</a>
<a href="http://bj.58.com/touzichan/" target="_blank" title="北京投资公司">北京投资公司</a>
<a href="http://bj.58.com/kuaidi/" target="_blank" title="北京快递公司">北京快递公司</a>
<a href="http://bj.58.com/jianzhusheji/" target="_blank" title="北京建筑设计">北京建筑设计</a>
<a href="http://bj.58.com/wzjianshe/" target="_blank" title="北京网站建设">北京网站建设</a>
<a href="http://bj.58.com/wangzhantg/" target="_blank" title="北京网络推广">北京网络推广</a>
<a href="http://bj.58.com/huoyundaili/" target="_blank" title="北京货运代理">北京货运代理</a>
<a href="http://bj.58.com/jkmenjinwx/" target="_blank" title="北京门禁维修">北京门禁维修</a>
<a href="http://bj.58.com/dayinjiweixiu/" target="_blank" title="北京打印机维修">北京打印机维修</a>
<a href="http://bj.58.com/zulin/" target="_blank" title="北京租赁公司">北京租赁公司</a>
<!-- 农林牧副渔-->
<!-- 丽人-->
<!-- 休闲娱乐-->
<!-- 教育培训-->
<!-- 旅游酒店-->
<!-- 婚庆摄影-->
<!--家政服务-->
<!--批发采购-->
<!--装修建材-->
<!--回收-->
<!--二手-->
<!--宠物-->
</div>
</div>
</div>
</div>
<!-- =E 热门推荐 -->
</div> <!-- =E contentleft -->
<!-- =S contentright -->
<div class="contentright" id="contentright">
<!-- =S 您可能感兴趣&其他人还在看 -->
<div id="sideAD"></div>
<!-- =E 您可能感兴趣&其他人还在看 -->
<!-- =S 其他人浏览 -->
<div class="ad_k">
<a class="tit" href="javascript:void(0);" title="其他人还浏览">其他人还浏览</a>
<ul class="relate">
<li><span class="nob">1</span><a href="/danbaobaoxiantouzi/31164988988874x.shtml" target="_blank" title="转让超级稀缺投资公司收购从速" onClick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_right_qitarenliulan_1');">转让超级稀缺投资公司收购...</a></li>
<li><span class="nob">2</span><a href="/danbaobaoxiantouzi/31164857746252x.shtml" target="_blank" title="典当行牌照转让,私募牌照转让" onClick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_right_qitarenliulan_2');">典当行牌照转让,私募牌照...</a></li>
<li><span class="nob">3</span><a href="/danbaobaoxiantouzi/31164702079156x.shtml" target="_blank" title="转让15年朝阳投资管理,私募备案首选" onClick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_right_qitarenliulan_3');">转让15年朝阳投资管理,...</a></li>
<li><span class="nob">4</span><a href="/danbaobaoxiantouzi/31164482191281x.shtml" target="_blank" title="北京各区个人小额贷款 无抵押贷款 零用贷 当天放款" onClick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_right_qitarenliulan_4');">北京各区个人小额贷款 无...</a></li>
<li><span class="nob">5</span><a href="/danbaobaoxiantouzi/31164482203698x.shtml" target="_blank" title="北京个人小额贷款 无抵押贷款 零用贷 当天放款" onClick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_right_qitarenliulan_5');">北京个人小额贷款 无抵押...</a></li>
<li><span class="nob">6</span><a href="/danbaobaoxiantouzi/31164482180163x.shtml" target="_blank" title="北京各区个人小额贷款 无抵押贷款 零用贷 当天放款" onClick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_right_qitarenliulan_6');">北京各区个人小额贷款 无...</a></li>
<li><span class="nob">7</span><a href="/danbaobaoxiantouzi/26992473933502x.shtml" target="_blank" title="外汇、期货开户,多金融领域合作,寻优秀代理机构合作" onClick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_right_qitarenliulan_7');">外汇、期货开户,多金融领...</a></li>
<li><span class="nob">8</span><a href="/danbaobaoxiantouzi/31164353940406x.shtml" target="_blank" title="转让投资管理、融资租赁公司,催收公司,资产管理公司" onClick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_right_qitarenliulan_8');">转让投资管理、融资租赁公...</a></li>
<li><span class="nob">9</span><a href="/danbaobaoxiantouzi/31164208698031x.shtml" target="_blank" title="转让西城区附近中字头资产管理公司、价格便宜" onClick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_right_qitarenliulan_9');">转让西城区附近中字头资产...</a></li>
<li><span class="nob">10</span><a href="/danbaobaoxiantouzi/31164078682817x.shtml" target="_blank" title="北京投资担保公司转让" onClick="clickLog('from=pc_hy_detail_shangwu_danbaobaoxiantouzi_N_right_qitarenliulan_10');">北京投资担保公司转让</a></li>
</ul>
</div>
<!-- =E 其他人浏览 -->
</div>
<!-- =E contentright -->
</div>
<!--详细信息 end-->
<!--底部 start-->
<!-- =S footer -->
<div id="footer" class="footer">
<div id="upWrap"> <a target="_blank" href="http://about.58.com/help.html" rel="nofollow">常见问题</a><span>|</span> <a target="_blank" href="http://about.58.com/" rel="nofollow">帮助中心</a><span>|</span> <a target="_blank" href="http://about.58.com/feedback.html" rel="nofollow">意见反馈</a><span>|</span> <a target="_blank" href="http://about.58.com/home/" rel="nofollow">了解58同城</a><span>|</span> <a target="_blank" href="http://about.58.com/hr/" rel="nofollow">加入58同城</a><span>|</span> <a target="_blank" href="http://fanqizha.58.com/" rel="nofollow">反欺诈联盟</a><span>|</span> <a target="_blank" href="http://110.58.com" rel="nofollow">报案平台</a><span>|</span> <a target="_blank" href="http://e.58.com" rel="nofollow">推广服务</a><span>|</span> <a target="_blank" href="http://biz.58.com" rel="nofollow">渠道招商</a><span>|</span> <a target="_blank" href="http://baozhang.58.com" rel="nofollow">先行赔付</a><span>|</span> <a target="_blank" href="http://ir.58.com?PGTID=0d100000-0000-13da-8041-51f9a3a739fa&ClickID=2" rel="nofollow">Investor Relations</a> </div>
<div id="downWrap"> <em>2005-2016 58.com版权所有</em><span>|</span> <em>京公网备案信息110105000809</em><span>|</span> <a target="_blank" href="http://www.miibeian.gov.cn/" rel="nofollow">京ICP证060405</a><span>|</span><em>乙测资字11018014</em><span>|</span> <a target="_blank" href="http://ir.58.com" rel="nofollow">Investor Relations</a><span>|</span><em>违法信息举报:4008135858 jubao@58ganji.com</em> </div>
<div class="fotBtmIcon">
<a target="_blank" id="fotBm_1" href="http://fanqizha.58.com"></a>
<a target="_blank" id="fotBm_2" href="http://www.12377.cn/"></a>
<a target="_blank" id="fotBm_3" href="http://www.12377.cn/node_548446.htm"></a>
<a target="_blank" id="fotBm_4" href="https://credit.szfw.org/CX20120918001650001720.html"></a>
<a target="_blank" id="fotBm_5" href="http://img.58cdn.com.cn/ui6/index/qyxinyong.jpg?v=3"></a>
<a target="_blank" id="fotBm_6" href="http://about.58.com/fqz/index.html"></a>
</div>
</div><!-- =E footer -->
<!--底部 end-->
<!--尾部js start-->
<script type="text/javascript" src='http://j1.58cdn.com.cn/componentsLoader/dist/ComponentsLoader_v20170713170536.js'></script>
<!-- HYE-3066 三端qq帮帮展现及点击埋点需求 -->
<script type="text/javascript">
var bb_wtl = 1;
</script>
<script type="text/javascript" src='http://j1.58cdn.com.cn/ds/detail/tongjiMIQQ_v20161101201914.js'></script>
<script type="text/javascript">
document.domain = "58.com";
var userid = GetCookieValue("UserID");
var username = GetCookieValue("UserName");
Counter58.userid = userid;
Counter58.uname = username;
Counter58.infoid =30843376389197;
Counter58.listControl = "userlist";
Counter58.totalControl = "totalcount";
Counter58.create();
</script>
<!--divOwner和divContacter顺序不能颠倒-->
<noscript>
<div style="display:inline;">
<img height="1" width="1" style="border-style:none;" alt=""
src="http://www.googleadservices.com/pagead/conversion/1020842622/?label=eGavCIK0jgIQ_qTj5gM&guid=ON&script=0"/>
</div>
</noscript>
<div style="display:none">
<script type="text/javascript" dec="商家保证展示">
$(document).ready(function () {
var baozheng = '';
if ("" != baozheng) {
var baozhenghtml = "";
var baozhengList = baozheng.split(",");
for (var i = 0; i < baozhengList.length; i++) {
baozhenghtml += "<li>" + baozhengList[i] + "</li>";
}
if ("" != baozhenghtml) {
$("#baozheng").html(baozhenghtml);
}
}
})
</script>
<!-- 显示微站js -->
<style>.hovercode_con {
width: auto;
}</style>
<!-- 显示微站js -->
<script>
boot.require('business.bang', function (Frame, bang) {
bang.init();
});
var catentry_name = '';
if (____json4fe.catentry && ____json4fe.catentry.name) {
catentry_name = ____json4fe.catentry.name;
}
var qiujianding_config = {
category: catentry_name,
title: '$info.getTitle()',
url: 'http://bj58.com/danbaobaoxiantouzi/' + '30843376389197' + 'x.shtml',
ui_version: '7'
};
var bds_config = {};
//投诉中心信息
var conpConfig = {
infoid:'30843376389197',
sname:'北京盛世博远贷款服务',
sid:'43412961916182',
sphone:'15901413319'
}
</script>
</div>
<div style="display:none">
<script type="text/javascript" src='http://j1.58cdn.com.cn/ds/detail/tongjiMIQQ_v20161101201914.js'></script>
<script type="text/javascript">
boot.require('business.hidekey', function (Frame, hk) {
hk.init();
});
</script>
</div>
<div class="foot-b30 clearfix"></div>
<script type="text/javascript" src='http://j1.58cdn.com.cn/ecom/js/abl_yuyu_pingfen_v20170313190836.js'></script>
<script type="text/javascript">
var _shopid = '110502542668756277';
var _cateid = 718;
var _infoid = 30843376389197;
var _title = "北京信用贷款 快速简便【低利息】额度高当天放款";
var tab_height = $("#detail_tab").offset().top;
var _pingjiastate = 0;
var _yuyuestate = 0;
var tss = (new Date()).valueOf();
$(document).ready(function() {
$("#detail_2").click(function() {
if(_pingjiastate == 0 ){
setIframeSrc();
}
_pingjiastate = 1;
showpingjia(1, 0);
});
$("#detail_3").click(function() {
if(_yuyuestate == 0 ){
setIframeSrc();
}
_yuyuestate = 1;
showyuyue(1, 0, "", "")
});
});
function setIframeSrc(){
var time = (new Date()).valueOf();
var yuyueurl = "http://order.58.com/post/index?refer=20&infoid=30843376389197&cityid=1&price=-1.00&cateid=718&shopid=110502542668756277&amount=1&pingjia=0&yuyue=0&ts=1503478269363&sn=94c26698ad3b7bc27c647f26d52d6e0d&PPGTID=157663858197063785000913709";
if(_pingjiastate!=0 && _yuyuestate!=0){
yuyueurl = "http://order.58.com/post/index?refer=20&infoid=30843376389197&cityid=1&price=-1.00&cateid=718&shopid=110502542668756277&amount=1&pingjia=1&yuyue=1&ts=1503478269363&sn=a7671defb857e3c2192cc79d627655f7&PPGTID=157663858197063785000913709";
}else if(_pingjiastate!=0){
yuyueurl = "http://order.58.com/post/index?refer=20&infoid=30843376389197&cityid=1&price=-1.00&cateid=718&shopid=110502542668756277&amount=1&pingjia=1&yuyue=0&ts=1503478269363&sn=cacc1ba922638737b5b08edd61be4e28&PPGTID=157663858197063785000913709";
}else if( _yuyuestate!=0){
yuyueurl = "http://order.58.com/post/index?refer=20&infoid=30843376389197&cityid=1&price=-1.00&cateid=718&shopid=110502542668756277&amount=1&pingjia=0&yuyue=1&ts=1503478269363&sn=8bfcf2f3d57c146012fd66bcf59956a6&PPGTID=157663858197063785000913709";
}
yuyueurl += "&tse=" + time + "&tss=" + tss +"&orderin=2";
$('#yuyueurl,#mianfei').attr('href',yuyueurl)
}
function createIframe(url){
var iframe = document.createElement("iframe");
iframe.src = url;
iframe.id="reserve";
iframe.className="class";
iframe.width=638
iframe.scrolling="no";
$('#iframe_id').append(iframe);
}
function popDialog() {
var infoid = 30843376389197;
$('.fe_window_mask1').removeClass("none");
$('#iframe_id').removeClass("none");
var url = "http://order.58.com/RandomCodeValidate/setUserValidateCode/" +infoid;
$.ajax({
url:url,
type:'GET',
dataType:'jsonp',
data: '',
complete: function(data){}
});
}
function GetCookieValue(name) {
var arr = document.cookie.match(new RegExp(name + "=([^&;]+)"));
if (arr != null) return decodeURI(arr[1]);
else return "";
}
function setLogoutLink(){
$("#logoutBtn").attr("href", "https://passport.58.com/logout?path=&back=now");
}
function setLoginLink(){
$("#loginBtn").attr("href", "https://passport.58.com/login/?path="+encodeURIComponent(location.href));
}
function initLoginUser() {
if(""!=GetCookieValue("UserID")){
$("#login").empty();
var html='<span>';
var userName = GetCookieValue("UserName");
var userId = GetCookieValue("UserID");
html += userName ;
html += '</span>';
html += '<span class="gap">|</span><a id="logoutBtn" href="javascript:void(0)" target="_self">退出</a>';
$("#login").append(html);
}
var userId = GetCookieValue("UserID");
var url = "http://message.58.com/api/msgcount/?userid=";
var url =url + userId;
url += "&type=3";
$.post(url, function (sysmsg){$("#sysmsgCount").empty();$("#sysmsgCount").append("(" + sysmsg.count + ")");}, "jsonp");
setLogoutLink();
}
function closebtn(id) {
initLoginUser();
$("#reserve").remove();
$("#" + id).addClass("none");
$(".fe_window_mask1").addClass("none");
}
</script>
<div id="iframe_id" class="iframediv none"></div>
<div class="fe_window_mask1 none">
<iframe scrolling="no" frameborder="0" width="100%" height="100%"
style="background-color:#000;filter:Alpha(Opacity=0);opacity:0" src="about:blank"></iframe>
</div>
<script type="text/javascript">
var infoTel400Sign = "870bb2e021744a7ecb6222cf5198c745263bfb39";
</script> <script>
document.write('<script src="http://track.58.com/adsys/postpageads"></' + 'script>');
</script>
<script type="text/javascript" src="http://j1.58cdn.com.cn/jiaoyou/js/cities_v1.js?v=120625"></script>
<script type="text/javascript">
var xxfwConfig = {namespace:'huangyedetailpc'};
</script>
<script type="text/javascript" src='http://j1.58cdn.com.cn/resource/xxzl/xxfw/xxfw.min_v20161021110430.js'></script>
<script type="text/javascript" src='http://j1.58cdn.com.cn/js/v8/boot/boot_huangye_v20170821182103.js'></script>
<script>
require(['_pkg/huangye/huangye_shenghuo_final_dom'],function(){});
</script>
<script src="//wechat.58.com/google-analytics"></script>
<script type="text/javascript">
var infoTel400Sign = "870bb2e021744a7ecb6222cf5198c745263bfb39";
</script><!--尾部js end-->
<script type="text/javascript">
var infoTel400Sign = "870bb2e021744a7ecb6222cf5198c745263bfb39";
</script></body>
</html>
"""
html = etree.HTML(content)
contact = html.xpath('//div[@class="userinfo"]/div[1]/h2/text()')[0].strip()
flag = html.xpath('//div[@class="userinfo"]/div[2]/div[2]/a[1]/text()')[0]
print(flag)
store_url = html.xpath('//div[@class="userinfomain"]/div[2]/a[1]')[0]
print(etree.tostring(store_url).decode())
| 52.839114
| 2,599
| 0.602622
|
7a568feb46a7051d274396209b37a564030c7ba7
| 14,082
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20181201/get_security_rule.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20181201/get_security_rule.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20181201/get_security_rule.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetSecurityRuleResult',
'AwaitableGetSecurityRuleResult',
'get_security_rule',
]
@pulumi.output_type
class GetSecurityRuleResult:
"""
Network security rule.
"""
def __init__(__self__, access=None, description=None, destination_address_prefix=None, destination_address_prefixes=None, destination_application_security_groups=None, destination_port_range=None, destination_port_ranges=None, direction=None, etag=None, id=None, name=None, priority=None, protocol=None, provisioning_state=None, source_address_prefix=None, source_address_prefixes=None, source_application_security_groups=None, source_port_range=None, source_port_ranges=None):
if access and not isinstance(access, str):
raise TypeError("Expected argument 'access' to be a str")
pulumi.set(__self__, "access", access)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if destination_address_prefix and not isinstance(destination_address_prefix, str):
raise TypeError("Expected argument 'destination_address_prefix' to be a str")
pulumi.set(__self__, "destination_address_prefix", destination_address_prefix)
if destination_address_prefixes and not isinstance(destination_address_prefixes, list):
raise TypeError("Expected argument 'destination_address_prefixes' to be a list")
pulumi.set(__self__, "destination_address_prefixes", destination_address_prefixes)
if destination_application_security_groups and not isinstance(destination_application_security_groups, list):
raise TypeError("Expected argument 'destination_application_security_groups' to be a list")
pulumi.set(__self__, "destination_application_security_groups", destination_application_security_groups)
if destination_port_range and not isinstance(destination_port_range, str):
raise TypeError("Expected argument 'destination_port_range' to be a str")
pulumi.set(__self__, "destination_port_range", destination_port_range)
if destination_port_ranges and not isinstance(destination_port_ranges, list):
raise TypeError("Expected argument 'destination_port_ranges' to be a list")
pulumi.set(__self__, "destination_port_ranges", destination_port_ranges)
if direction and not isinstance(direction, str):
raise TypeError("Expected argument 'direction' to be a str")
pulumi.set(__self__, "direction", direction)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if priority and not isinstance(priority, int):
raise TypeError("Expected argument 'priority' to be a int")
pulumi.set(__self__, "priority", priority)
if protocol and not isinstance(protocol, str):
raise TypeError("Expected argument 'protocol' to be a str")
pulumi.set(__self__, "protocol", protocol)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if source_address_prefix and not isinstance(source_address_prefix, str):
raise TypeError("Expected argument 'source_address_prefix' to be a str")
pulumi.set(__self__, "source_address_prefix", source_address_prefix)
if source_address_prefixes and not isinstance(source_address_prefixes, list):
raise TypeError("Expected argument 'source_address_prefixes' to be a list")
pulumi.set(__self__, "source_address_prefixes", source_address_prefixes)
if source_application_security_groups and not isinstance(source_application_security_groups, list):
raise TypeError("Expected argument 'source_application_security_groups' to be a list")
pulumi.set(__self__, "source_application_security_groups", source_application_security_groups)
if source_port_range and not isinstance(source_port_range, str):
raise TypeError("Expected argument 'source_port_range' to be a str")
pulumi.set(__self__, "source_port_range", source_port_range)
if source_port_ranges and not isinstance(source_port_ranges, list):
raise TypeError("Expected argument 'source_port_ranges' to be a list")
pulumi.set(__self__, "source_port_ranges", source_port_ranges)
@property
@pulumi.getter
def access(self) -> str:
"""
The network traffic is allowed or denied. Possible values are: 'Allow' and 'Deny'.
"""
return pulumi.get(self, "access")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
A description for this rule. Restricted to 140 chars.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="destinationAddressPrefix")
def destination_address_prefix(self) -> Optional[str]:
"""
The destination address prefix. CIDR or destination IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
"""
return pulumi.get(self, "destination_address_prefix")
@property
@pulumi.getter(name="destinationAddressPrefixes")
def destination_address_prefixes(self) -> Optional[Sequence[str]]:
"""
The destination address prefixes. CIDR or destination IP ranges.
"""
return pulumi.get(self, "destination_address_prefixes")
@property
@pulumi.getter(name="destinationApplicationSecurityGroups")
def destination_application_security_groups(self) -> Optional[Sequence['outputs.ApplicationSecurityGroupResponse']]:
"""
The application security group specified as destination.
"""
return pulumi.get(self, "destination_application_security_groups")
@property
@pulumi.getter(name="destinationPortRange")
def destination_port_range(self) -> Optional[str]:
"""
The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "destination_port_range")
@property
@pulumi.getter(name="destinationPortRanges")
def destination_port_ranges(self) -> Optional[Sequence[str]]:
"""
The destination port ranges.
"""
return pulumi.get(self, "destination_port_ranges")
@property
@pulumi.getter
def direction(self) -> str:
"""
The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are: 'Inbound' and 'Outbound'.
"""
return pulumi.get(self, "direction")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def priority(self) -> Optional[int]:
"""
The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter
def protocol(self) -> str:
"""
Network protocol this rule applies to. Possible values are 'Tcp', 'Udp', and '*'.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="sourceAddressPrefix")
def source_address_prefix(self) -> Optional[str]:
"""
The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from.
"""
return pulumi.get(self, "source_address_prefix")
@property
@pulumi.getter(name="sourceAddressPrefixes")
def source_address_prefixes(self) -> Optional[Sequence[str]]:
"""
The CIDR or source IP ranges.
"""
return pulumi.get(self, "source_address_prefixes")
@property
@pulumi.getter(name="sourceApplicationSecurityGroups")
def source_application_security_groups(self) -> Optional[Sequence['outputs.ApplicationSecurityGroupResponse']]:
"""
The application security group specified as source.
"""
return pulumi.get(self, "source_application_security_groups")
@property
@pulumi.getter(name="sourcePortRange")
def source_port_range(self) -> Optional[str]:
"""
The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "source_port_range")
@property
@pulumi.getter(name="sourcePortRanges")
def source_port_ranges(self) -> Optional[Sequence[str]]:
"""
The source port ranges.
"""
return pulumi.get(self, "source_port_ranges")
class AwaitableGetSecurityRuleResult(GetSecurityRuleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSecurityRuleResult(
access=self.access,
description=self.description,
destination_address_prefix=self.destination_address_prefix,
destination_address_prefixes=self.destination_address_prefixes,
destination_application_security_groups=self.destination_application_security_groups,
destination_port_range=self.destination_port_range,
destination_port_ranges=self.destination_port_ranges,
direction=self.direction,
etag=self.etag,
id=self.id,
name=self.name,
priority=self.priority,
protocol=self.protocol,
provisioning_state=self.provisioning_state,
source_address_prefix=self.source_address_prefix,
source_address_prefixes=self.source_address_prefixes,
source_application_security_groups=self.source_application_security_groups,
source_port_range=self.source_port_range,
source_port_ranges=self.source_port_ranges)
def get_security_rule(network_security_group_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
security_rule_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSecurityRuleResult:
"""
Network security rule.
:param str network_security_group_name: The name of the network security group.
:param str resource_group_name: The name of the resource group.
:param str security_rule_name: The name of the security rule.
"""
__args__ = dict()
__args__['networkSecurityGroupName'] = network_security_group_name
__args__['resourceGroupName'] = resource_group_name
__args__['securityRuleName'] = security_rule_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20181201:getSecurityRule', __args__, opts=opts, typ=GetSecurityRuleResult).value
return AwaitableGetSecurityRuleResult(
access=__ret__.access,
description=__ret__.description,
destination_address_prefix=__ret__.destination_address_prefix,
destination_address_prefixes=__ret__.destination_address_prefixes,
destination_application_security_groups=__ret__.destination_application_security_groups,
destination_port_range=__ret__.destination_port_range,
destination_port_ranges=__ret__.destination_port_ranges,
direction=__ret__.direction,
etag=__ret__.etag,
id=__ret__.id,
name=__ret__.name,
priority=__ret__.priority,
protocol=__ret__.protocol,
provisioning_state=__ret__.provisioning_state,
source_address_prefix=__ret__.source_address_prefix,
source_address_prefixes=__ret__.source_address_prefixes,
source_application_security_groups=__ret__.source_application_security_groups,
source_port_range=__ret__.source_port_range,
source_port_ranges=__ret__.source_port_ranges)
| 46.322368
| 481
| 0.697912
|
d6e67b0ed8ce521fcb8e74c3461dd7ab936e4f0d
| 4,403
|
py
|
Python
|
dailyfresh/dailyfresh/settings.py
|
sunxiao01/dailyfresh_01
|
844e87027a9c0ac82da74e515508eb0a2e66b74d
|
[
"MIT"
] | 1
|
2018-03-10T02:27:59.000Z
|
2018-03-10T02:27:59.000Z
|
dailyfresh/dailyfresh/settings.py
|
sunxiao01/dailyfresh_01
|
844e87027a9c0ac82da74e515508eb0a2e66b74d
|
[
"MIT"
] | null | null | null |
dailyfresh/dailyfresh/settings.py
|
sunxiao01/dailyfresh_01
|
844e87027a9c0ac82da74e515508eb0a2e66b74d
|
[
"MIT"
] | null | null | null |
"""
Django settings for dailyfresh project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
import sys
sys.path.insert(1, os.path.join(BASE_DIR, 'apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9pg8x-v5d72tzwi=+e2t7(20or)^n=qvn9!z4po!^on-s!&zsu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tinymce',
'haystack',
'cart',
'goods',
'orders',
'users',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'dailyfresh.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dailyfresh.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'dailyfresh_01',
'HOST': '192.168.102.128',
'PORT': '3306',
'USER': 'root',
'PASSWORD': 'mysql',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
AUTH_USER_MODEL = 'users.User'
# 配置django的第三方邮件服务器
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # 导入邮件模块
EMAIL_HOST = 'smtp.yeah.net' # 发邮件主机
EMAIL_PORT = 25 # 发邮件端口
EMAIL_HOST_USER = 'dailyfreshzxc@yeah.net' # 授权的邮箱
EMAIL_HOST_PASSWORD = 'dailyfresh123' # 邮箱授权时获得的密码,非注册登录密码
EMAIL_FROM = '天天生鲜<dailyfreshzxc@yeah.net>' # 发件人抬头
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://192.168.102.128:6379/5",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
}
# Session
# http://django-redis-chs.readthedocs.io/zh_CN/latest/#session-backend
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "default"
# 搭配login_required使用的
LOGIN_URL = '/users/login'
DEFAULT_FILE_STORAGE = 'utils.fastdfs.storage.FastDFSStorage'
CLIENT_CONF = os.path.join(BASE_DIR, 'utils/fastdfs/client.conf')
SERVER_IP = 'http://192.168.102.128:8888/'
TINYMCE_DEFAULT_CONFIG = {
'theme': 'advanced', # 丰富样式
'width': 600,
'height': 400,
}
HAYSTACK_CONNECTIONS = {
'default': {
# 使用whoosh引擎:提示,如果不需要使用jieba框架实现分词,就使用whoosh_backend
'ENGINE': 'haystack.backends.whoosh_cn_backend.WhooshEngine',
# 索引文件路径
'PATH': os.path.join(BASE_DIR, 'whoosh_index'),
}
}
# 当添加、修改、删除数据时,自动生成索引
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
| 26.053254
| 71
| 0.693164
|
ebd16685fbf4b075d43635cee62fb573745c74ad
| 8,613
|
py
|
Python
|
reproman/resource/ssh.py
|
chaselgrove/reproman
|
7af2e407fb60d782dc049e62082744600eff0574
|
[
"MIT"
] | 13
|
2017-03-02T16:33:08.000Z
|
2019-01-12T19:19:08.000Z
|
reproman/resource/ssh.py
|
chaselgrove/reproman
|
7af2e407fb60d782dc049e62082744600eff0574
|
[
"MIT"
] | 313
|
2017-01-13T03:36:30.000Z
|
2019-01-24T19:16:08.000Z
|
reproman/resource/ssh.py
|
chaselgrove/reproman
|
7af2e407fb60d782dc049e62082744600eff0574
|
[
"MIT"
] | 6
|
2017-01-12T19:44:01.000Z
|
2019-01-12T19:19:18.000Z
|
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the reproman package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Resource sub-class to provide management of a SSH connection."""
import attr
import os
import stat
import getpass
import uuid
from ..log import LoggerHelper
# OPT: invoke, fabric and paramiko is imported at the point of use
import logging
lgr = logging.getLogger('reproman.resource.ssh')
# Add Paramiko logging for log levels below DEBUG
if lgr.getEffectiveLevel() < logging.DEBUG:
LoggerHelper("paramiko").get_initialized_logger()
from .base import Resource
from ..utils import attrib
from ..utils import command_as_string
from reproman.dochelpers import borrowdoc
from reproman.resource.session import Session
from ..support.exceptions import CommandError
# Silence CryptographyDeprecationWarning's.
# TODO: We should bump the required paramiko version and drop the code below
# once paramiko cuts a release that includes
# <https://github.com/paramiko/paramiko/pull/1379>.
import warnings
warnings.filterwarnings(action="ignore", module=".*paramiko.*")
@attr.s
class SSH(Resource):
# Generic properties of any Resource
name = attrib(default=attr.NOTHING)
# Configurable options for each "instance"
host = attrib(default=attr.NOTHING,
doc="DNS or IP address of server or ssh_config nickname")
port = attrib(
doc="Port to connect to on remote host")
key_filename = attrib(
doc="Path to SSH private key file")
user = attrib(
doc="Username to use to log into remote environment")
id = attrib()
type = attrib(default='ssh') # Resource type
# Current instance properties, to be set by us, not augmented by user
status = attrib()
_connection = attrib()
def _connection_open(self):
try:
self.status = "CONNECTING"
self._connection.open()
self.status = "ONLINE"
except:
self.status = "CONNECTION ERROR"
raise
def connect(self, password=None):
"""Open a connection to the environment resource.
Parameters
----------
password : string
We don't allow users to pass in a password via the command line
but do allow tests to authenticate by passing a password as
a parameter to this method.
"""
# Convert key_filename to a list
# See: https://github.com/ReproNim/reproman/commit/3807f1287c39ea2393bae26803e6da8122ac5cff
from fabric import Connection
from paramiko import AuthenticationException
connect_kwargs = {}
if self.key_filename:
connect_kwargs["key_filename"] = [self.key_filename]
if password:
connect_kwargs["password"] = password
self._connection = Connection(
self.host,
user=self.user,
port=self.port,
connect_kwargs=connect_kwargs
)
if self.key_filename:
auth = self.key_filename
elif password is None:
auth = "SSH config"
else:
auth = "password"
lgr.debug("SSH connecting to %s@%s:%s, authenticating with %s",
self._connection.user, self._connection.host,
self._connection.port, # Fabric defaults to 22.
auth)
try:
self._connection_open()
except AuthenticationException:
password = getpass.getpass(
prompt="Password for {}: ".format(self.name))
self._connection = Connection(
self.host,
user=self.user,
port=self.port,
connect_kwargs={'password': password}
)
self._connection_open()
def create(self):
"""
Register the SSH connection to the reproman inventory registry.
Yields
-------
dict : config parameters to capture in the inventory file
"""
if not self.id:
self.id = str(uuid.uuid4())
self.status = 'N/A'
yield {
'id': self.id,
'status': self.status,
'host': self.host,
'user': self.user,
'port': self.port,
'key_filename': self.key_filename,
}
def delete(self):
self._connection = None
return
def start(self):
# Not a SSH feature
raise NotImplementedError
def stop(self):
# Not a SSH feature
raise NotImplementedError
def get_session(self, pty=False, shared=None):
"""
Log into remote environment and get the command line
"""
if not self._connection:
self.connect()
return (PTYSSHSession if pty else SSHSession)(
connection=self._connection
)
# Alias SSH class so that it can be discovered by the ResourceManager.
@attr.s
class Ssh(SSH):
pass
from reproman.resource.session import POSIXSession
@attr.s
class SSHSession(POSIXSession):
connection = attrib(default=attr.NOTHING)
@borrowdoc(Session)
def _execute_command(self, command, env=None, cwd=None, with_shell=False,
handle_permission_denied=True):
# TODO -- command_env is not used etc...
# command_env = self.get_updated_env(env)
from invoke.exceptions import UnexpectedExit
command = self._prefix_command(command_as_string(command), env=env,
cwd=cwd, with_shell=with_shell)
try:
result = self.connection.run(command, hide=True)
except UnexpectedExit as e:
if 'permission denied' in e.result.stderr.lower() and handle_permission_denied:
# Issue warning once
if not getattr(self, '_use_sudo_warning', False):
lgr.warning(
"Permission is denied for %s. From now on will use 'sudo' "
"in such cases",
command
)
self._use_sudo_warning = True
return self._execute_command(
"sudo " + command, # there was command_as_string
env=env,
cwd=cwd,
handle_permission_denied=False
)
else:
result = e.result
if result.return_code not in [0, None]:
msg = "Failed to run %r. Exit code=%d. out=%s err=%s" \
% (command, result.return_code, result.stdout, result.stderr)
raise CommandError(str(command), msg, result.return_code,
result.stdout, result.stderr)
else:
lgr.log(8, "Finished running %r with status %s", command,
result.return_code)
return (result.stdout, result.stderr)
@borrowdoc(Session)
def put(self, src_path, dest_path, uid=-1, gid=-1):
dest_path = self._prepare_dest_path(src_path, dest_path, local=False)
sftp = self.connection.sftp()
self.transfer_recursive(
src_path,
dest_path,
os.path.isdir,
os.listdir,
sftp.mkdir,
self.connection.put
)
if uid > -1 or gid > -1:
self.chown(dest_path, uid, gid, recursive=True)
@borrowdoc(Session)
def get(self, src_path, dest_path=None, uid=-1, gid=-1):
dest_path = self._prepare_dest_path(src_path, dest_path)
sftp = self.connection.sftp()
self.transfer_recursive(
src_path,
dest_path,
lambda f: stat.S_ISDIR(sftp.stat(f).st_mode),
sftp.listdir,
os.mkdir,
self.connection.get
)
if uid > -1 or gid > -1:
self.chown(dest_path, uid, gid, remote=False, recursive=True)
@attr.s
class PTYSSHSession(SSHSession):
"""Interactive SSH Session"""
@borrowdoc(Session)
def open(self):
lgr.debug("Opening TTY connection via SSH.")
self.interactive_shell()
@borrowdoc(Session)
def close(self):
# XXX ?
pass
def interactive_shell(self):
"""Open an interactive TTY shell.
"""
self.connection.run('/bin/bash', pty=True)
print('Exited terminal session.')
| 31.665441
| 99
| 0.577964
|
c34dcd0c3d2f0ab4cf5bfbb3deb994debdff732d
| 8,661
|
py
|
Python
|
tests/tests_domain/test_domain2D_shapely.py
|
uwe-iben/torchphysics
|
f0a56539cff331d49caaa90bc2fdd0d238b298f8
|
[
"Apache-2.0"
] | null | null | null |
tests/tests_domain/test_domain2D_shapely.py
|
uwe-iben/torchphysics
|
f0a56539cff331d49caaa90bc2fdd0d238b298f8
|
[
"Apache-2.0"
] | null | null | null |
tests/tests_domain/test_domain2D_shapely.py
|
uwe-iben/torchphysics
|
f0a56539cff331d49caaa90bc2fdd0d238b298f8
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import torch
import shapely.geometry as s_geo
from shapely.ops import triangulate
from torchphysics.problem.domains.domain2D.shapely_polygon import ShapelyPolygon
from torchphysics.problem.spaces import R2, R1
from torchphysics.problem.spaces.points import Points
# Test ShapelyPolygon
def test_create_poly2D():
P = ShapelyPolygon(R2('x'), vertices=[[0, 10], [10, 5], [10, 2], [0, 0]])
def test_dim_poly2D():
P = ShapelyPolygon(R2('x'), [[0, 10], [10, 5], [10, 2], [0, 0]])
assert P.dim == 2
def test_get_volume_poly2D():
P = ShapelyPolygon(R2('x'), [[0, 1], [1, 0], [1, 1]])
assert P._get_volume() == 0.5
def test_get_volume_poly2D_boundary():
P = ShapelyPolygon(R2('x'), [[0, 1], [0, 0], [1, 0], [1, 1]])
assert P.boundary._get_volume() == 4.0
def test_call_poly2D():
P = ShapelyPolygon(R2('x'), [[0, 1], [1, 0], [1, 1]])
assert P(t=3) == P
def test_call_poly2D_boundary():
P = ShapelyPolygon(R2('x'), [[0, 1], [1, 0], [1, 1]]).boundary
assert P(t=3) == P
def test_check_no_input_poly2D():
with pytest.raises(ValueError):
_ = ShapelyPolygon(R2('x'))
def test_cant_create_variable_poly2D():
with pytest.raises(TypeError):
_ = ShapelyPolygon(R2('x'), vertices=lambda t : t)
def test_ordering_of_corners_poly2D():
P = ShapelyPolygon(R2('x'), [[0, 10], [0, 0], [10, 2], [10.0, 5]])
order = torch.tensor([[0, 10.0], [0, 0], [10, 2], [10, 5], [0, 10]])
assert torch.equal(torch.tensor(P.polygon.exterior.coords), order)
P = ShapelyPolygon(R2('x'), [[0, 10.0], [10, 5], [10, 2], [0, 0]])
assert torch.equal(torch.tensor(P.polygon.exterior.coords), order)
def test_volume_of_poly2D():
P = ShapelyPolygon(R2('x'), [[0, 10], [0, 0], [10, 2], [10, 8]])
assert torch.isclose(P._get_volume(), torch.tensor(80.0))
def test_inside_poly2D():
P = ShapelyPolygon(R2('x'), [[0, 10], [0, 0], [10, 2], [10, 8]])
points = torch.tensor([[5, 5], [0, 0], [10, 2.0], [-3, 4]])
points = Points(points, R2('x'))
inside = P._contains(points)
assert inside[0]
assert not inside[1]
assert not inside[2]
assert not inside[3]
def test_on_boundary_poly2D():
P = ShapelyPolygon(R2('x'), [[0, 10], [0, 0], [10, 2], [10, 8]])
points = torch.tensor([[5, 5.0], [0, 0], [10, 2], [-3, 4], [0, 8]])
points = Points(points, R2('x'))
on_bound = P.boundary._contains(points)
assert not on_bound[0]
assert on_bound[1]
assert on_bound[2]
assert not on_bound[3]
assert on_bound[4]
def test_random_sampling_on_boundary_poly2D():
P = ShapelyPolygon(R2('x'), [[0, 10], [0, 0], [10, 2], [10, 8]]).boundary
points = P.sample_random_uniform(n=500)
assert points.as_tensor.shape == (500, 2)
assert all(P._contains(points))
def test_grid_sampling_on_boundary_poly2D():
P = ShapelyPolygon(R2('x'), [[0, 10], [0, 0], [10, 0], [10, 10]]).boundary
points = P.sample_grid(15)
assert points.as_tensor.shape == (15, 2)
assert all(P._contains(points))
def test_random_sampling_on_boundary_for_hole_in_poly2D():
h = s_geo.Polygon(shell=[[0.20, 0.15], [0.5, 0.25], [0.25, 0.5]])
p = s_geo.Polygon(shell=[[0, 0], [1, 0], [0, 1]], holes=[h.exterior.coords])
P = ShapelyPolygon(R2('x'), shapely_polygon=p)
H = ShapelyPolygon(R2('x'), shapely_polygon=h)
points = P.boundary.sample_random_uniform(500)
assert points.as_tensor.shape == (500, 2)
assert any(H.boundary._contains(points))
assert all(P.boundary._contains(points))
def test_grid_sampling_on_boundary_for_hole_in_poly2D():
h = s_geo.Polygon(shell=[[0.20, 0.15], [0.5, 0.25], [0.25, 0.5]])
p = s_geo.Polygon(shell=[[0, 0], [1, 0], [0, 1]], holes=[h.exterior.coords])
P = ShapelyPolygon(R2('x'), shapely_polygon=p)
H = ShapelyPolygon(R2('x'), shapely_polygon=h)
points = P.boundary.sample_grid(500)
assert points.as_tensor.shape == (500, 2)
assert any(H.boundary._contains(points))
assert all(P.boundary._contains(points))
def test_random_sampling_inside_poly2D():
P = ShapelyPolygon(R2('x'), [[0, 10], [0, 0], [10, 2], [10, 8]])
time = Points(torch.tensor([[2.0], [1.1]]), R1('t'))
points = P.sample_random_uniform(10, params=time)
assert points.as_tensor.shape == (20, 2)
assert all(P._contains(points))
def test_random_sampling_inside_poly2D_with_density():
P = ShapelyPolygon(R2('x'), [[0, 10], [0, 0], [10, 2], [10, 8]])
points = P.sample_random_uniform(d=1)
assert all(P._contains(points))
def test_random_sampling_inside_poly2D_2():
P = ShapelyPolygon(R2('x'), [[0, 10], [0, 0], [10, 0], [10, 10]])
points = P.sample_random_uniform(50)
assert points.as_tensor.shape == (50, 2)
assert all(P._contains(points))
P = ShapelyPolygon(R2('x'), [[0, 0], [0.3, 0], [0.3, 0.9], [0.5, 0.9], [0.5, 0.85],
[1, 0.85], [1, 0.1], [0.4, 0.1], [0.4, 0], [2, 0],
[2, 1], [0, 1]])
points = P.sample_random_uniform(50)
assert points.as_tensor.shape == (50, 2)
assert all(P._contains(points))
def test_add_additional_points_if_some_missing_poly2D():
P = ShapelyPolygon(R2('x'), [[0, 10], [0, 0], [10, 0], [10, 10]])
T = triangulate(P.polygon)[0]
points = torch.ones((4, 2))
n = 4
points = P._check_enough_points_sampled(n, points, T, 'cpu')
assert points.shape == (4, 2)
n = 8
points = P._check_enough_points_sampled(n, points, T, 'cpu')
assert points.shape == (8, 2)
assert all(P._contains(points))
def test_bounds_for_poly2D():
P = ShapelyPolygon(R2('x'), [[0, 10], [0, 0], [10, 2], [10, 8]])
bounds = P.bounding_box()
assert bounds == [0, 10, 0, 10]
P = ShapelyPolygon(R2('x'), [[0, 10], [0, 0], [10, 2], [10, 8], [5, 20]])
bounds = P.bounding_box()
assert bounds == [0, 10, 0, 20]
def test_grid_sampling_inside_poly2D():
P = ShapelyPolygon(R2('x'), [[0, 10], [0, 0], [10, 2], [10, 8]])
points = P.sample_grid(250)
assert points.as_tensor.shape == (250, 2)
assert all(P._contains(points))
def test_grid_sampling_inside_poly2D_no_extra_points():
P = ShapelyPolygon(R2('x'), [[0, 1], [1, 1], [1, 0], [0, 0]])
points = P.sample_grid(100)
assert points.as_tensor.shape == (100, 2)
assert all(P._contains(points))
def test_grid_sampling_inside_poly2D():
P = ShapelyPolygon(R2('x'), [[0, 10], [0, 0], [10, 2], [10, 8]])
points = P.sample_grid(d=11)
assert all(P._contains(points))
def test_random_sampling_inside_concav_poly2D():
P = ShapelyPolygon(R2('x'), [[0, 0], [0, -5], [-10, -5], [-10, -10], [10, -10],
[10, 10], [-10, 10], [-10, 0]])
points = P.sample_grid(263)
assert points.as_tensor.shape == (263, 2)
assert all(P._contains(points))
def test_boundary_normal_for_concav_poly2D():
P = ShapelyPolygon(R2('x'), [[0, 0], [0, -10], [10, -10], [10, 10], [-10, 10], [-10, 0]])
points = torch.tensor([[0, -5], [5, -10], [5, 10], [-10, 7], [-4, 0], [10, 0]])
points = Points(points, R2('x'))
normals = P.boundary.normal(points)
assert torch.allclose(normals[0], torch.tensor([-1.0, 0]))
assert torch.allclose(normals[1], torch.tensor([0.0, -1]))
assert torch.allclose(normals[2], torch.tensor([0.0, 1]))
assert torch.allclose(normals[3], torch.tensor([-1.0, 0]))
assert torch.allclose(normals[4], torch.tensor([0.0, -1]))
assert torch.allclose(normals[5], torch.tensor([1.0, 0]))
def test_boundary_normal_poly2D():
P = ShapelyPolygon(R2('x'), [[0, 10], [0, 0], [10, 2], [10, 8]])
points = torch.tensor([[0, 5], [10, 5], [1, 2.0/10], [9, 8+2.0/10]])
points = Points(points, R2('x'))
normals = P.boundary.normal(points)
assert torch.allclose(normals[0], torch.tensor([-1.0, 0]))
assert torch.allclose(normals[1], torch.tensor([1.0, 0]))
norm = torch.sqrt(torch.tensor(2**2+10**2))
assert torch.allclose(normals[3], torch.tensor([2.0/norm, 10/norm]))
assert torch.allclose(normals[2], torch.tensor([2.0/norm, -10/norm]))
def test_boundary_normal_poly2D_with_hole():
h = s_geo.Polygon(shell=[[0.15, 0.15], [0.25, 0.15], [0.15, 0.25]])
p = s_geo.Polygon(shell=[[0, 0], [1, 0], [0, 1]], holes=[h.exterior.coords])
P = ShapelyPolygon(R2('x'), shapely_polygon=p)
points = torch.tensor([[0.5, 0], [0, 0.5], [0.2, 0.15]])
points = Points(points, R2('x'))
normals = P.boundary.normal(points)
assert normals.shape == (3, 2)
assert torch.allclose(normals[0], torch.tensor([0.0, -1]))
assert torch.allclose(normals[1], torch.tensor([-1.0, 0]))
assert torch.allclose(normals[2], torch.tensor([0.0, 1]))
| 36.855319
| 93
| 0.609745
|
4bf5aa4b204fd6ae028a22b9eb191a9d3f74d87c
| 1,376
|
py
|
Python
|
src/abp_sim/scripts/record_distance.py
|
koverman47/ABP-Simulation-Platform
|
ccaf5591fbf1967ae8df5cf3efd4a7e8e64ddd10
|
[
"MIT"
] | 3
|
2020-11-14T02:21:34.000Z
|
2020-11-14T02:23:30.000Z
|
src/abp_sim/scripts/record_distance.py
|
koverman47/ABP-Simulation-Platform
|
ccaf5591fbf1967ae8df5cf3efd4a7e8e64ddd10
|
[
"MIT"
] | null | null | null |
src/abp_sim/scripts/record_distance.py
|
koverman47/ABP-Simulation-Platform
|
ccaf5591fbf1967ae8df5cf3efd4a7e8e64ddd10
|
[
"MIT"
] | 2
|
2020-12-03T01:50:22.000Z
|
2022-03-22T22:18:49.000Z
|
#!/usr/bin/env python
import sys
import rospy
import rospkg
from tf.transformations import euler_from_quaternion
from gazebo_msgs.srv import GetModelState, GetModelStateRequest
s1 = [None, None, None]
s2= [None, None, None]
def record():
rospy.init_node('abp_recorder_dist')
rospy.wait_for_service('/gazebo/get_model_state')
get_model_srv = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)
m1 = GetModelStateRequest()
m2 = GetModelStateRequest()
m1.model_name = 'satlet0'
m2.model_name = 'satlet1'
f = open('logs/states.log', 'w')
global truth, estimate, update
rate = rospy.Rate(1)
counter = 0
while not rospy.is_shutdown():
req = get_model_srv(m1)
rp = req.pose.position
ra = req.pose.orientation
ang = euler_from_quaternion([ra.x, ra.y, ra.z, ra.w])
s1 = [rp.x, rp.y, ang[2]]
req = get_model_srv(m2)
rp = req.pose.position
ra = req.pose.orientation
ang = euler_from_quaternion([ra.x, ra.y, ra.z, ra.w])
s2 = [rp.x, rp.y, ang[2]]
f.write("%d,%s,t\n" % (counter, ",".join(map(str, s1))))
f.write("%d,%s,e\n" % (counter, ",".join(map(str, s2))))
counter += 1
rate.sleep()
f.close()
if __name__ == "__main__":
try:
record()
except rospy.ROSInterruptException:
pass
| 25.962264
| 80
| 0.616279
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.