hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b1b037006db3a060579d4c109cbdf688ca3beca1
| 12,930
|
py
|
Python
|
tests/replication/slave/storage/test_events.py
|
rvleij/synapse
|
77d9357226687a177c865bcdeaa0e750612fc078
|
[
"Apache-2.0"
] | 2
|
2020-04-30T18:38:02.000Z
|
2020-07-08T21:38:28.000Z
|
tests/replication/slave/storage/test_events.py
|
rvleij/synapse
|
77d9357226687a177c865bcdeaa0e750612fc078
|
[
"Apache-2.0"
] | null | null | null |
tests/replication/slave/storage/test_events.py
|
rvleij/synapse
|
77d9357226687a177c865bcdeaa0e750612fc078
|
[
"Apache-2.0"
] | 1
|
2020-01-27T23:33:39.000Z
|
2020-01-27T23:33:39.000Z
|
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from canonicaljson import encode_canonical_json
from synapse.events import FrozenEvent, _EventInternalMetadata
from synapse.events.snapshot import EventContext
from synapse.handlers.room import RoomEventSource
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.storage.roommember import RoomsForUser
from ._base import BaseSlavedStoreTestCase
USER_ID = "@feeling:blue"
USER_ID_2 = "@bright:blue"
OUTLIER = {"outlier": True}
ROOM_ID = "!room:blue"
logger = logging.getLogger(__name__)
def dict_equals(self, other):
me = encode_canonical_json(self.get_pdu_json())
them = encode_canonical_json(other.get_pdu_json())
return me == them
def patch__eq__(cls):
eq = getattr(cls, "__eq__", None)
cls.__eq__ = dict_equals
def unpatch():
if eq is not None:
cls.__eq__ = eq
return unpatch
class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
STORE_TYPE = SlavedEventStore
def setUp(self):
# Patch up the equality operator for events so that we can check
# whether lists of events match using assertEquals
self.unpatches = [patch__eq__(_EventInternalMetadata), patch__eq__(FrozenEvent)]
return super(SlavedEventStoreTestCase, self).setUp()
def tearDown(self):
[unpatch() for unpatch in self.unpatches]
def test_get_latest_event_ids_in_room(self):
create = self.persist(type="m.room.create", key="", creator=USER_ID)
self.replicate()
self.check("get_latest_event_ids_in_room", (ROOM_ID,), [create.event_id])
join = self.persist(
type="m.room.member",
key=USER_ID,
membership="join",
prev_events=[(create.event_id, {})],
)
self.replicate()
self.check("get_latest_event_ids_in_room", (ROOM_ID,), [join.event_id])
def test_redactions(self):
self.persist(type="m.room.create", key="", creator=USER_ID)
self.persist(type="m.room.member", key=USER_ID, membership="join")
msg = self.persist(type="m.room.message", msgtype="m.text", body="Hello")
self.replicate()
self.check("get_event", [msg.event_id], msg)
redaction = self.persist(type="m.room.redaction", redacts=msg.event_id)
self.replicate()
msg_dict = msg.get_dict()
msg_dict["content"] = {}
msg_dict["unsigned"]["redacted_by"] = redaction.event_id
msg_dict["unsigned"]["redacted_because"] = redaction
redacted = FrozenEvent(msg_dict, msg.internal_metadata.get_dict())
self.check("get_event", [msg.event_id], redacted)
def test_backfilled_redactions(self):
self.persist(type="m.room.create", key="", creator=USER_ID)
self.persist(type="m.room.member", key=USER_ID, membership="join")
msg = self.persist(type="m.room.message", msgtype="m.text", body="Hello")
self.replicate()
self.check("get_event", [msg.event_id], msg)
redaction = self.persist(
type="m.room.redaction", redacts=msg.event_id, backfill=True
)
self.replicate()
msg_dict = msg.get_dict()
msg_dict["content"] = {}
msg_dict["unsigned"]["redacted_by"] = redaction.event_id
msg_dict["unsigned"]["redacted_because"] = redaction
redacted = FrozenEvent(msg_dict, msg.internal_metadata.get_dict())
self.check("get_event", [msg.event_id], redacted)
def test_invites(self):
self.persist(type="m.room.create", key="", creator=USER_ID)
self.check("get_invited_rooms_for_local_user", [USER_ID_2], [])
event = self.persist(type="m.room.member", key=USER_ID_2, membership="invite")
self.replicate()
self.check(
"get_invited_rooms_for_local_user",
[USER_ID_2],
[
RoomsForUser(
ROOM_ID,
USER_ID,
"invite",
event.event_id,
event.internal_metadata.stream_ordering,
)
],
)
def test_push_actions_for_user(self):
self.persist(type="m.room.create", key="", creator=USER_ID)
self.persist(type="m.room.join", key=USER_ID, membership="join")
self.persist(
type="m.room.join", sender=USER_ID, key=USER_ID_2, membership="join"
)
event1 = self.persist(type="m.room.message", msgtype="m.text", body="hello")
self.replicate()
self.check(
"get_unread_event_push_actions_by_room_for_user",
[ROOM_ID, USER_ID_2, event1.event_id],
{"highlight_count": 0, "notify_count": 0},
)
self.persist(
type="m.room.message",
msgtype="m.text",
body="world",
push_actions=[(USER_ID_2, ["notify"])],
)
self.replicate()
self.check(
"get_unread_event_push_actions_by_room_for_user",
[ROOM_ID, USER_ID_2, event1.event_id],
{"highlight_count": 0, "notify_count": 1},
)
self.persist(
type="m.room.message",
msgtype="m.text",
body="world",
push_actions=[
(USER_ID_2, ["notify", {"set_tweak": "highlight", "value": True}])
],
)
self.replicate()
self.check(
"get_unread_event_push_actions_by_room_for_user",
[ROOM_ID, USER_ID_2, event1.event_id],
{"highlight_count": 1, "notify_count": 2},
)
def test_get_rooms_for_user_with_stream_ordering(self):
"""Check that the cache on get_rooms_for_user_with_stream_ordering is invalidated
by rows in the events stream
"""
self.persist(type="m.room.create", key="", creator=USER_ID)
self.persist(type="m.room.member", key=USER_ID, membership="join")
self.replicate()
self.check("get_rooms_for_user_with_stream_ordering", (USER_ID_2,), set())
j2 = self.persist(
type="m.room.member", sender=USER_ID_2, key=USER_ID_2, membership="join"
)
self.replicate()
self.check(
"get_rooms_for_user_with_stream_ordering",
(USER_ID_2,),
{(ROOM_ID, j2.internal_metadata.stream_ordering)},
)
def test_get_rooms_for_user_with_stream_ordering_with_multi_event_persist(self):
"""Check that current_state invalidation happens correctly with multiple events
in the persistence batch.
This test attempts to reproduce a race condition between the event persistence
loop and a worker-based Sync handler.
The problem occurred when the master persisted several events in one batch. It
only updates the current_state at the end of each batch, so the obvious thing
to do is then to issue a current_state_delta stream update corresponding to the
last stream_id in the batch.
However, that raises the possibility that a worker will see the replication
notification for a join event before the current_state caches are invalidated.
The test involves:
* creating a join and a message event for a user, and persisting them in the
same batch
* controlling the replication stream so that updates are sent gradually
* between each bunch of replication updates, check that we see a consistent
snapshot of the state.
"""
self.persist(type="m.room.create", key="", creator=USER_ID)
self.persist(type="m.room.member", key=USER_ID, membership="join")
self.replicate()
self.check("get_rooms_for_user_with_stream_ordering", (USER_ID_2,), set())
# limit the replication rate
repl_transport = self.server_to_client_transport
repl_transport.autoflush = False
# build the join and message events and persist them in the same batch.
logger.info("----- build test events ------")
j2, j2ctx = self.build_event(
type="m.room.member", sender=USER_ID_2, key=USER_ID_2, membership="join"
)
msg, msgctx = self.build_event()
self.get_success(
self.storage.persistence.persist_events([(j2, j2ctx), (msg, msgctx)])
)
self.replicate()
event_source = RoomEventSource(self.hs)
event_source.store = self.slaved_store
current_token = self.get_success(event_source.get_current_key())
# gradually stream out the replication
while repl_transport.buffer:
logger.info("------ flush ------")
repl_transport.flush(30)
self.pump(0)
prev_token = current_token
current_token = self.get_success(event_source.get_current_key())
# attempt to replicate the behaviour of the sync handler.
#
# First, we get a list of the rooms we are joined to
joined_rooms = self.get_success(
self.slaved_store.get_rooms_for_user_with_stream_ordering(USER_ID_2)
)
# Then, we get a list of the events since the last sync
membership_changes = self.get_success(
self.slaved_store.get_membership_changes_for_user(
USER_ID_2, prev_token, current_token
)
)
logger.info(
"%s->%s: joined_rooms=%r membership_changes=%r",
prev_token,
current_token,
joined_rooms,
membership_changes,
)
# the membership change is only any use to us if the room is in the
# joined_rooms list.
if membership_changes:
self.assertEqual(
joined_rooms, {(ROOM_ID, j2.internal_metadata.stream_ordering)}
)
event_id = 0
def persist(self, backfill=False, **kwargs):
"""
Returns:
synapse.events.FrozenEvent: The event that was persisted.
"""
event, context = self.build_event(**kwargs)
if backfill:
self.get_success(
self.storage.persistence.persist_events(
[(event, context)], backfilled=True
)
)
else:
self.get_success(self.storage.persistence.persist_event(event, context))
return event
def build_event(
self,
sender=USER_ID,
room_id=ROOM_ID,
type="m.room.message",
key=None,
internal={},
state=None,
depth=None,
prev_events=[],
auth_events=[],
prev_state=[],
redacts=None,
push_actions=[],
**content
):
if depth is None:
depth = self.event_id
if not prev_events:
latest_event_ids = self.get_success(
self.master_store.get_latest_event_ids_in_room(room_id)
)
prev_events = [(ev_id, {}) for ev_id in latest_event_ids]
event_dict = {
"sender": sender,
"type": type,
"content": content,
"event_id": "$%d:blue" % (self.event_id,),
"room_id": room_id,
"depth": depth,
"origin_server_ts": self.event_id,
"prev_events": prev_events,
"auth_events": auth_events,
}
if key is not None:
event_dict["state_key"] = key
event_dict["prev_state"] = prev_state
if redacts is not None:
event_dict["redacts"] = redacts
event = FrozenEvent(event_dict, internal_metadata_dict=internal)
self.event_id += 1
if state is not None:
state_ids = {key: e.event_id for key, e in state.items()}
context = EventContext.with_state(
state_group=None, current_state_ids=state_ids, prev_state_ids=state_ids
)
else:
state_handler = self.hs.get_state_handler()
context = self.get_success(state_handler.compute_event_context(event))
self.master_store.add_push_actions_to_staging(
event.event_id, {user_id: actions for user_id, actions in push_actions}
)
return event, context
| 35.424658
| 89
| 0.613998
|
cf4a1f58bc3f0a53992c6ff9033e1b6edee71fe6
| 219
|
py
|
Python
|
Python 3 - Fundamentos/Desafio 31.py
|
Paimonz/Python-Estudos
|
bbc4d7abb2eebff7f160eac54a290fd9ea7a1cff
|
[
"MIT"
] | null | null | null |
Python 3 - Fundamentos/Desafio 31.py
|
Paimonz/Python-Estudos
|
bbc4d7abb2eebff7f160eac54a290fd9ea7a1cff
|
[
"MIT"
] | null | null | null |
Python 3 - Fundamentos/Desafio 31.py
|
Paimonz/Python-Estudos
|
bbc4d7abb2eebff7f160eac54a290fd9ea7a1cff
|
[
"MIT"
] | null | null | null |
v = int(input ('\nQual a distância da viagem em KM?: '))
if v >= 200:
pv = 0.5 * v
print (f'O preço para essa viagem será de {pv}')
else:
pv = 0.45 * v
print (f'O preço para essa viagem será de R$:{pv}')
| 31.285714
| 56
| 0.575342
|
81928363da052a13b9cf7bdde677113f1e551b82
| 3,172
|
py
|
Python
|
hospital/models.py
|
djdays/hms
|
ef82611497c5503c37ea1077fd6c843e49eafe2b
|
[
"MIT"
] | null | null | null |
hospital/models.py
|
djdays/hms
|
ef82611497c5503c37ea1077fd6c843e49eafe2b
|
[
"MIT"
] | null | null | null |
hospital/models.py
|
djdays/hms
|
ef82611497c5503c37ea1077fd6c843e49eafe2b
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
departments=[('Cardiologist','Cardiologist'),
('Dermatologists','Dermatologists'),
('Emergency Medicine Specialists','Emergency Medicine Specialists'),
('Allergists/Immunologists','Allergists/Immunologists'),
('Anesthesiologists','Anesthesiologists'),
('Colon and Rectal Surgeons','Colon and Rectal Surgeons')
]
class Doctor(models.Model):
user=models.OneToOneField(User,on_delete=models.CASCADE)
profile_pic= models.ImageField(upload_to='profile_pic/DoctorProfilePic/',null=True,blank=True)
address = models.CharField(max_length=40)
mobile = models.CharField(max_length=20,null=True)
department= models.CharField(max_length=50,choices=departments,default='Cardiologist')
status=models.BooleanField(default=False)
@property
def get_name(self):
return self.user.first_name+" "+self.user.last_name
@property
def get_id(self):
return self.user.id
def __str__(self):
return "{} ({})".format(self.user.first_name,self.department)
class Patient(models.Model):
user=models.OneToOneField(User,on_delete=models.CASCADE)
profile_pic= models.ImageField(upload_to='profile_pic/PatientProfilePic/',null=True,blank=True)
address = models.CharField(max_length=40)
mobile = models.CharField(max_length=20,null=False)
symptoms = models.CharField(max_length=100,null=False)
assignedDoctorId = models.PositiveIntegerField(null=True)
admitDate=models.DateField(auto_now=True)
status=models.BooleanField(default=False)
@property
def get_name(self):
return self.user.first_name+" "+self.user.last_name
@property
def get_id(self):
return self.user.id
def __str__(self):
return self.user.first_name+" ("+self.symptoms+")"
class Appointment(models.Model):
patientId=models.PositiveIntegerField(null=True)
doctorId=models.PositiveIntegerField(null=True)
patientName=models.CharField(max_length=40,null=True)
doctorName=models.CharField(max_length=40,null=True)
appointmentDate=models.DateField(auto_now=True)
description=models.TextField(max_length=500)
status=models.BooleanField(default=False)
class PatientDischargeDetails(models.Model):
patientId=models.PositiveIntegerField(null=True)
patientName=models.CharField(max_length=40)
assignedDoctorName=models.CharField(max_length=40)
address = models.CharField(max_length=40)
mobile = models.CharField(max_length=20,null=True)
symptoms = models.CharField(max_length=100,null=True)
admitDate=models.DateField(null=False)
releaseDate=models.DateField(null=False)
daySpent=models.PositiveIntegerField(null=False)
roomCharge=models.PositiveIntegerField(null=False)
medicineCost=models.PositiveIntegerField(null=False)
doctorFee=models.PositiveIntegerField(null=False)
OtherCharge=models.PositiveIntegerField(null=False)
total=models.PositiveIntegerField(null=False)
#Developed By : J Shridhanajeyan
#instagram.com/iamDJDays
#Youtube :youtube.com/DJDays
| 38.216867
| 100
| 0.741803
|
a9940fda977d1746f3783ecb44b4a036f2869095
| 9,177
|
py
|
Python
|
ophyd/utils/epics_pvs.py
|
mattclarke/ophyd
|
ffa67739f850f70f3bff9be9df30e9046f29534e
|
[
"BSD-3-Clause"
] | null | null | null |
ophyd/utils/epics_pvs.py
|
mattclarke/ophyd
|
ffa67739f850f70f3bff9be9df30e9046f29534e
|
[
"BSD-3-Clause"
] | 4
|
2015-08-13T21:45:43.000Z
|
2016-01-17T03:56:10.000Z
|
ophyd/utils/epics_pvs.py
|
mattclarke/ophyd
|
ffa67739f850f70f3bff9be9df30e9046f29534e
|
[
"BSD-3-Clause"
] | 1
|
2015-06-11T17:29:02.000Z
|
2015-06-11T17:29:02.000Z
|
# vi: ts=4 sw=4 sts=4 expandtab
from enum import IntEnum
import time as ttime
import logging
import functools
import numpy as np
import typing
from .errors import DisconnectedError, OpException
__all__ = ['split_record_field',
'strip_field',
'record_field',
'set_and_wait',
'AlarmStatus',
'AlarmSeverity',
'fmt_time'
]
logger = logging.getLogger(__name__)
class BadPVName(ValueError, OpException):
...
class AlarmSeverity(IntEnum):
NO_ALARM = 0
MINOR = 1
MAJOR = 2
INVALID = 3
class AlarmStatus(IntEnum):
NO_ALARM = 0
READ = 1
WRITE = 2
HIHI = 3
HIGH = 4
LOLO = 5
LOW = 6
STATE = 7
COS = 8
COMM = 9
TIMEOUT = 10
HWLIMIT = 11
CALC = 12
SCAN = 13
LINK = 14
SOFT = 15
BAD_SUB = 16
UDF = 17
DISABLE = 18
SIMM = 19
READ_ACCESS = 20
WRITE_ACCESS = 21
def validate_pv_name(pv):
'''Validates that there is not more than 1 '.' in pv
Parameters
----------
pv : str
The pv to check
Raises
------
BadPVName
'''
if pv.count('.') > 1:
raise BadPVName(pv)
def split_record_field(pv):
'''Splits a pv into (record, field)
Parameters
----------
pv : str
the pv to split
Returns
-------
record : str
field : str
'''
if '.' in pv:
record, field = pv.rsplit('.', 1)
else:
record, field = pv, ''
return record, field
def strip_field(pv):
'''Strip off the field from a record'''
return split_record_field(pv)[0]
def record_field(record, field):
'''Given a record and a field, combine them into
a pv of the form: record.FIELD
'''
record = strip_field(record)
return '%s.%s' % (record, field.upper())
def waveform_to_string(value, type_=str, delim=''):
'''Convert a waveform that represents a string into an actual Python string
Parameters
----------
value
The value to convert
type_ : type, optional
Python type to convert to
delim : str, optional
delimiter to use when joining string
'''
try:
value = delim.join(chr(c) for c in value)
except TypeError:
value = type_(value)
try:
value = value[:value.index('\0')]
except (IndexError, ValueError):
pass
return value
def records_from_db(fn):
'''Naively parse db/template files looking for record names
Returns
-------
records : list
[(record type, record name), ...]
'''
ret = []
for line in open(fn, 'rt').readlines():
line = line.strip()
if line.startswith('#'):
continue
if not (line.startswith('record') or line.startswith('grecord')):
continue
if '(' not in line:
continue
line = line[line.index('(') + 1:]
if ',' not in line:
continue
rtype, record = line.split(',', 1)
rtype = rtype.strip()
record = record.strip()
if record.startswith('"'):
# Surrounded by quotes, easy to parse
record = record[1:]
record = record[:record.index('"')]
else:
# No quotes, and macros may contain parentheses
# Find the first non-matching parenthesis and
# that should denote the end of the record name
#
# $(P)$(R)Record)
# ^
in_paren = 0
for i, c in enumerate(record):
if c == '(':
in_paren += 1
elif c == ')':
in_paren -= 1
if in_paren < 0:
record = record[:i]
break
ret.append((rtype, record))
return ret
def raise_if_disconnected(fcn):
'''Decorator to catch attempted access to disconnected EPICS channels.'''
@functools.wraps(fcn)
def wrapper(self, *args, **kwargs):
if self.connected:
return fcn(self, *args, **kwargs)
else:
raise DisconnectedError('{} is not connected'.format(self.name))
return wrapper
def set_and_wait(signal, val, poll_time=0.01, timeout=10, rtol=None,
atol=None):
"""Set a signal to a value and wait until it reads correctly.
For floating point values, it is strongly recommended to set a tolerance.
If tolerances are unset, the values will be compared exactly.
Parameters
----------
signal : EpicsSignal (or any object with `get` and `put`)
val : object
value to set signal to
poll_time : float, optional
how soon to check whether the value has been successfully set
timeout : float, optional
maximum time to wait for value to be successfully set
rtol : float, optional
allowed absolute tolerance between the readback and setpoint values
atol : float, optional
allowed relative tolerance between the readback and setpoint values
Raises
------
TimeoutError if timeout is exceeded
"""
signal.put(val)
expiration_time = ttime.time() + timeout if timeout is not None else None
current_value = signal.get()
if atol is None and hasattr(signal, 'tolerance'):
atol = signal.tolerance
if rtol is None and hasattr(signal, 'rtolerance'):
rtol = signal.rtolerance
try:
enum_strings = signal.enum_strs
except AttributeError:
enum_strings = ()
if atol is not None:
within_str = ['within {!r}'.format(atol)]
else:
within_str = []
if rtol is not None:
within_str.append('(relative tolerance of {!r})'.format(rtol))
if within_str:
within_str = ' '.join([''] + within_str)
else:
within_str = ''
while not _compare_maybe_enum(val, current_value, enum_strings, atol,
rtol):
logger.debug("Waiting for %s to be set from %r to %r%s...",
signal.name, current_value, val, within_str)
ttime.sleep(poll_time)
if poll_time < 0.1:
poll_time *= 2 # logarithmic back-off
current_value = signal.get()
if expiration_time is not None and ttime.time() > expiration_time:
raise TimeoutError("Attempted to set %r to value %r and timed "
"out after %r seconds. Current value is %r." %
(signal, val, timeout, current_value))
def _compare_maybe_enum(a, b, enums, atol, rtol):
if enums:
# convert enum values to strings if necessary first:
if not isinstance(a, str):
a = enums[a]
if not isinstance(b, str):
b = enums[b]
# then compare the strings
return a == b
# if either relative/absolute tolerance is used, use numpy
# to compare:
if atol is not None or rtol is not None:
return np.allclose(a, b,
rtol=rtol if rtol is not None else 1e-5,
atol=atol if atol is not None else 1e-8,
)
ret = (a == b)
try:
return bool(ret)
except ValueError:
return np.all(ret)
_type_map = {'number': (float, np.floating),
'array': (np.ndarray, list, tuple),
'string': (str, ),
'integer': (int, np.integer),
}
def data_type(val):
'''Determine the JSON-friendly type name given a value
Returns
-------
str
One of {'number', 'integer', 'array', 'string'}
Raises
------
ValueError if the type is not recognized
'''
bad_iterables = (str, bytes, dict)
if isinstance(val, typing.Iterable) and not isinstance(val, bad_iterables):
return 'array'
for json_type, py_types in _type_map.items():
if isinstance(val, py_types):
return json_type
raise ValueError(
f'Cannot determine the appropriate bluesky-friendly data type for '
f'value {val} of Python type {type(val)}. '
f'Supported types include: int, float, str, and iterables such as '
f'list, tuple, np.ndarray, and so on.'
)
def data_shape(val):
'''Determine data-shape (dimensions)
Returns
-------
list
Empty list if val is number or string, otherwise
``list(np.ndarray.shape)``
'''
if data_type(val) != 'array':
return []
try:
return list(val.shape)
except AttributeError:
return [len(val)]
# Vendored from pyepics v3.3.0
# e33b9290282c93f8dfe0fbe81ced55cbcab99564
# Copyright 2010 Matthew Newville, The University of Chicago.
# All rights reserved.
# Epics Open License
# see other_licenses folder for full license
def fmt_time(tstamp=None):
"simple formatter for time values"
if tstamp is None:
tstamp = ttime.time()
tstamp, frac = divmod(tstamp, 1)
return "%s.%5.5i" % (ttime.strftime("%Y-%m-%d %H:%M:%S",
ttime.localtime(tstamp)),
round(1.e5*frac))
| 25.350829
| 79
| 0.564455
|
9edf3061caa64bc1a203c768065d2a06a1e95b49
| 1,378
|
py
|
Python
|
setup.py
|
pfmoore/placeholder
|
d70e1d43a477f0864e8861bc300109eab7c0eb59
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
pfmoore/placeholder
|
d70e1d43a477f0864e8861bc300109eab7c0eb59
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
pfmoore/placeholder
|
d70e1d43a477f0864e8861bc300109eab7c0eb59
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup, Extension
setup(
name='placeholder',
version='1.2',
description='Operator overloading for fast anonymous functions.',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author='Aric Coady',
author_email='aric.coady@gmail.com',
url='https://github.com/coady/placeholder',
project_urls={'Documentation': 'https://coady.github.io/placeholder'},
license='Apache Software License',
packages=['placeholder'],
package_data={'placeholder': ['py.typed']},
zip_safe=False,
ext_modules=[Extension('placeholder.partials', ['placeholder/partials.c'])],
python_requires='>=3.6',
tests_require=['pytest-cov', 'pytest-parametrized'],
keywords='functional lambda scala underscore',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Software Development :: Libraries :: Python Modules',
'Typing :: Typed',
],
)
| 39.371429
| 80
| 0.650218
|
661dcffdffc446ff9d233b32adde819453b213b1
| 1,762
|
py
|
Python
|
config.py
|
philblower/jupyterlab_flask_sqlalchemy_pattern
|
81c99d5a53a2b0abac8f28c71fab190b32ad5d78
|
[
"MIT"
] | null | null | null |
config.py
|
philblower/jupyterlab_flask_sqlalchemy_pattern
|
81c99d5a53a2b0abac8f28c71fab190b32ad5d78
|
[
"MIT"
] | null | null | null |
config.py
|
philblower/jupyterlab_flask_sqlalchemy_pattern
|
81c99d5a53a2b0abac8f28c71fab190b32ad5d78
|
[
"MIT"
] | null | null | null |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
WTF_CSRF_ENABLED = False
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_ECHO = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
# This allows the application to customize the configuration.
# Add appropriate code if want to implement some app level customization
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
EXPLAIN_TEMPLATE_LOADING = False
DB1 = 'chinook.sqlite'
DB2 = 'jfs1.sqlite'
DB3 = 'jfs2.sqlite'
# SQLALCHEMY_DATABASE_URI is the default connection used if bind key = None (or no bind key is specified in the model)
SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join(basedir, DB1)
# see http://flask-sqlalchemy.pocoo.org/2.3/binds/
SQLALCHEMY_BINDS = {
"db2":"sqlite:///" + os.path.join(basedir, DB2),
"db3":"sqlite:///" + os.path.join(basedir, DB3)
}
class ProductionConfig(Config):
DEBUG = False
EXPLAIN_TEMPLATE_LOADING = False
DB1 = 'chinook.sqlite'
DB2 = 'jfs1.sqlite'
DB3 = 'jfs2.sqlite'
# SQLALCHEMY_DATABASE_URI is the default connection used if bind key = None (or no bind key is specified in the model)
SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join(basedir, DB1)
# see http://flask-sqlalchemy.pocoo.org/2.3/binds/
SQLALCHEMY_BINDS = {
"db2":"sqlite:///" + os.path.join(basedir, DB2),
"db3":"sqlite:///" + os.path.join(basedir, DB3)
}
config = {
"development": DevelopmentConfig,
"production": ProductionConfig,
"default": DevelopmentConfig
}
# Set this to config["key"]. It sets the configuration in app/__init__.py
conf = config["development"]
| 30.912281
| 122
| 0.677639
|
6bfaf97bc788c4b6e14458bbd2ca8171fda987b8
| 271
|
py
|
Python
|
Examples/AppKit/CurrencyConverter/setup.py
|
linuxfood/pyobjc-framework-Cocoa-test
|
3475890f165ab26a740f13d5afe4c62b4423a140
|
[
"MIT"
] | null | null | null |
Examples/AppKit/CurrencyConverter/setup.py
|
linuxfood/pyobjc-framework-Cocoa-test
|
3475890f165ab26a740f13d5afe4c62b4423a140
|
[
"MIT"
] | null | null | null |
Examples/AppKit/CurrencyConverter/setup.py
|
linuxfood/pyobjc-framework-Cocoa-test
|
3475890f165ab26a740f13d5afe4c62b4423a140
|
[
"MIT"
] | null | null | null |
"""
Script for building the example.
Usage:
python3 setup.py py2app
"""
from setuptools import setup
setup(
name="CurrencyConverter",
app=["CurrencyConverter.py"],
data_files=["English.lproj"],
setup_requires=["py2app", "pyobjc-framework-Cocoa"],
)
| 18.066667
| 56
| 0.690037
|
66a1ffc9987725f9cf49163122cb485d49bc2d81
| 29,110
|
py
|
Python
|
gym_miniworld/miniworld.py
|
bdsaglam/gym-westworld
|
e588b7a394d1cb49ab5e24e806ebc8c136608492
|
[
"Apache-2.0"
] | null | null | null |
gym_miniworld/miniworld.py
|
bdsaglam/gym-westworld
|
e588b7a394d1cb49ab5e24e806ebc8c136608492
|
[
"Apache-2.0"
] | null | null | null |
gym_miniworld/miniworld.py
|
bdsaglam/gym-westworld
|
e588b7a394d1cb49ab5e24e806ebc8c136608492
|
[
"Apache-2.0"
] | null | null | null |
from enum import IntEnum
import gym
from gym import spaces
from gym_miniworld.entity import *
from gym_miniworld.math_utils import *
from gym_miniworld.params import *
from gym_miniworld.random_utils import *
from gym_miniworld.room import Room
class MiniWorldEnv(gym.Env):
"""
Base class for MiniWorld environments. Implements the procedural
world generation and simulation logic.
"""
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
# Enumeration of possible actions
class Actions(IntEnum):
# Turn left or right by a small amount
turn_left = 0
turn_right = 1
# Move forward or back by a small amount
move_forward = 2
move_back = 3
# Pick up or drop an object being carried
pickup = 4
drop = 5
# Toggle/activate an object
toggle = 6
# Done completing task
done = 7
def __init__(
self,
seed=None,
max_episode_steps=1500,
obs_width=128,
obs_height=128,
window_width=512,
window_height=512,
params=DEFAULT_PARAMS,
domain_rand=False,
):
# Action enumeration for this environment
self.actions = MiniWorldEnv.Actions
# Actions are discrete integer values
self.action_space = spaces.Discrete(len(self.actions))
# Observations are RGB images with pixels in [0, 255]
self.observation_space = spaces.Box(
low=0,
high=255,
shape=(obs_height, obs_width, 3),
dtype=np.uint8
)
self.reward_range = (-math.inf, math.inf)
# Maximum number of steps per episode
self.max_episode_steps = max_episode_steps
# Simulation parameters, used for domain randomization
self.params = params
# Domain randomization enable/disable flag
self.domain_rand = domain_rand
# Window for displaying the environment to humans
self.window = None
# Invisible window to render into (shadow OpenGL context)
self.shadow_window = pyglet.window.Window(width=1, height=1, visible=False)
# Enable depth testing and backface culling
glEnable(GL_DEPTH_TEST)
glEnable(GL_CULL_FACE)
# Frame buffer used to render observations
self.obs_fb = FrameBuffer(obs_width, obs_height, 8)
# Frame buffer used for human visualization
self.vis_fb = FrameBuffer(window_width, window_height, 16)
# Compute the observation display size
self.obs_disp_width = 256
self.obs_disp_height = obs_height * (self.obs_disp_width / obs_width)
# For displaying text
self.text_label = pyglet.text.Label(
font_name="Arial",
font_size=14,
multiline=True,
width=400,
x=window_width + 5,
y=window_height - (self.obs_disp_height + 19)
)
# Initialize the state
self.seed(seed)
self.construct()
# self.reset()
def close(self):
pass
def seed(self, seed=None):
self.rand = RandGen(seed)
return [seed]
def _construct(self):
raise NotImplementedError
def construct(self):
# List of entities contained
self.entities = []
# List of rooms in the world
self.rooms = []
# Wall segments for collision detection
# Shape is (N, 2, 3)
self.wall_segs = []
# Construct the world
self._construct()
# Compute the min and max x, z extents of the whole floorplan
self.min_x = min([r.min_x for r in self.rooms])
self.max_x = max([r.max_x for r in self.rooms])
self.min_z = min([r.min_z for r in self.rooms])
self.max_z = max([r.max_z for r in self.rooms])
# Generate static data
if len(self.wall_segs) == 0:
self._gen_static_data()
# Create the agent
self.agent = Agent()
# Step count since episode start
self.step_count = 0
self._reset()
# Check if domain randomization is enabled or not
rand = self.rand if self.domain_rand else None
# Randomize elements of the world (domain randomization)
self.params.sample_many(rand, self, [
'sky_color',
'light_pos',
'light_color',
'light_ambient'
])
# Get the max forward step distance
self.max_forward_step = self.params.get_max('forward_step')
# Randomize parameters of the entities
for ent in self.entities:
ent.randomize(self.params, rand)
# Pre-compile static parts of the environment into a display list
self._render_static()
def _reset(self):
raise NotImplementedError
def reset(self):
"""
Reset the simulation at the start of a new episode
This also randomizes many environment parameters (domain randomization)
"""
# Step count since episode start
self.step_count = 0
self._reset()
# Generate the first camera image
obs = self.render_obs()
# Return first observation
return obs
def _get_carry_pos(self, agent_pos, ent):
"""
Compute the position at which to place an object being carried
"""
dist = self.agent.radius + ent.radius + self.max_forward_step
pos = agent_pos + self.agent.dir_vec * 1.05 * dist
# Adjust the Y-position so the object is visible while being carried
y_pos = max(self.agent.cam_height - ent.height - 0.3, 0)
pos = pos + Y_VEC * y_pos
return pos
def move_agent(self, fwd_dist, fwd_drift):
"""
Move the agent forward
"""
next_pos = (
self.agent.pos +
self.agent.dir_vec * fwd_dist +
self.agent.right_vec * fwd_drift
)
if self.intersect(self.agent, next_pos, self.agent.radius):
return False
carrying = self.agent.carrying
if carrying:
next_carrying_pos = self._get_carry_pos(next_pos, carrying)
if self.intersect(carrying, next_carrying_pos, carrying.radius):
return False
carrying.pos = next_carrying_pos
self.agent.pos = next_pos
return True
def turn_agent(self, turn_angle):
"""
Turn the agent left or right
"""
turn_angle *= (math.pi / 180)
orig_dir = self.agent.dir
self.agent.dir += turn_angle
carrying = self.agent.carrying
if carrying:
pos = self._get_carry_pos(self.agent.pos, carrying)
if self.intersect(carrying, pos, carrying.radius):
self.agent.dir = orig_dir
return False
carrying.pos = pos
carrying.dir = self.agent.dir
return True
def step(self, action):
"""
Perform one action and update the simulation
"""
self.step_count += 1
rand = self.rand if self.domain_rand else None
fwd_step = self.params.sample(rand, 'forward_step')
fwd_drift = self.params.sample(rand, 'forward_drift')
turn_step = self.params.sample(rand, 'turn_step')
if action == self.actions.move_forward:
self.move_agent(fwd_step, fwd_drift)
elif action == self.actions.move_back:
self.move_agent(-fwd_step, fwd_drift)
elif action == self.actions.turn_left:
self.turn_agent(turn_step)
elif action == self.actions.turn_right:
self.turn_agent(-turn_step)
# Pick up an object
elif action == self.actions.pickup:
# Position at which we will test for an intersection
test_pos = self.agent.pos + self.agent.dir_vec * 1.5 * self.agent.radius
ent = self.intersect(self.agent, test_pos, 1.2 * self.agent.radius)
if not self.agent.carrying:
if isinstance(ent, Entity):
if not ent.is_static:
self.agent.carrying = ent
# Drop an object being carried
elif action == self.actions.drop:
if self.agent.carrying:
self.agent.carrying.pos[1] = 0
self.agent.carrying = None
# If we are carrying an object, update its position as we move
if self.agent.carrying:
ent_pos = self._get_carry_pos(self.agent.pos, self.agent.carrying)
self.agent.carrying.pos = ent_pos
self.agent.carrying.dir = self.agent.dir
# Generate the current camera image
obs = self.render_obs()
# If the maximum time step count is reached
if self.step_count >= self.max_episode_steps:
done = True
reward = 0
return obs, reward, done, {}
reward = 0
done = False
return obs, reward, done, {}
def add_rect_room(
self,
min_x,
max_x,
min_z,
max_z,
**kwargs
):
"""
Create a rectangular room
"""
# 2D outline coordinates of the room,
# listed in counter-clockwise order when viewed from the top
outline = np.array([
# East wall
[max_x, max_z],
# North wall
[max_x, min_z],
# West wall
[min_x, min_z],
# South wall
[min_x, max_z],
])
return self.add_room(outline=outline, **kwargs)
def add_room(self, **kwargs):
"""
Create a new room
"""
assert len(self.wall_segs) == 0, "cannot add rooms after static data is generated"
room = Room(**kwargs)
self.rooms.append(room)
return room
def connect_rooms(
self,
room_a,
room_b,
min_x=None,
max_x=None,
min_z=None,
max_z=None,
max_y=None
):
"""
Connect two rooms along facing edges
"""
def find_facing_edges():
for idx_a in range(room_a.num_walls):
norm_a = room_a.edge_norms[idx_a]
for idx_b in range(room_b.num_walls):
norm_b = room_b.edge_norms[idx_b]
# Reject edges that are not facing each other
if np.dot(norm_a, norm_b) > -0.9:
continue
dir = room_b.outline[idx_b] - room_a.outline[idx_a]
# Reject edges that are not touching
if np.dot(norm_a, dir) > 0.05:
continue
return idx_a, idx_b
return None, None
idx_a, idx_b = find_facing_edges()
assert idx_a != None, "matching edges not found in connect_rooms"
start_a, end_a = room_a.add_portal(
edge=idx_a,
min_x=min_x,
max_x=max_x,
min_z=min_z,
max_z=max_z,
max_y=max_y
)
start_b, end_b = room_b.add_portal(
edge=idx_b,
min_x=min_x,
max_x=max_x,
min_z=min_z,
max_z=max_z,
max_y=max_y
)
a = room_a.outline[idx_a] + room_a.edge_dirs[idx_a] * start_a
b = room_a.outline[idx_a] + room_a.edge_dirs[idx_a] * end_a
c = room_b.outline[idx_b] + room_b.edge_dirs[idx_b] * start_b
d = room_b.outline[idx_b] + room_b.edge_dirs[idx_b] * end_b
# If the portals are directly connected, stop
if np.linalg.norm(a - d) < 0.001:
return
len_a = np.linalg.norm(b - a)
len_b = np.linalg.norm(d - c)
# Room outline points must be specified in counter-clockwise order
outline = np.stack([c, b, a, d])
outline = np.stack([outline[:, 0], outline[:, 2]], axis=1)
max_y = max_y if max_y != None else room_a.wall_height
room = Room(
outline,
wall_height=max_y,
wall_tex=room_a.wall_tex_name,
floor_tex=room_a.floor_tex_name,
ceil_tex=room_a.ceil_tex_name,
no_ceiling=room_a.no_ceiling,
)
self.rooms.append(room)
room.add_portal(1, start_pos=0, end_pos=len_a)
room.add_portal(3, start_pos=0, end_pos=len_b)
def place_entity(
self,
ent,
room=None,
pos=None,
dir=None,
min_x=None,
max_x=None,
min_z=None,
max_z=None
):
"""
Place an entity/object in the world.
Find a position that doesn't intersect with any other object.
"""
assert len(self.rooms) > 0, "create rooms before calling place_entity"
assert ent.radius is not None, "entity must have physical size defined"
# Generate collision detection data
if len(self.wall_segs) == 0:
self._gen_static_data()
# If an exact position if specified
if pos is not None:
ent.dir = dir if dir is not None else self.rand.float(-math.pi, math.pi)
ent.pos = pos
self.entities.append(ent)
return ent
# Keep retrying until we find a suitable position
while True:
# Pick a room, sample rooms proportionally to floor surface area
r = room if room else self.rand.choice(self.rooms, probs=self.room_probs)
# Choose a random point within the square bounding box of the room
lx = r.min_x if min_x is None else min_x
hx = r.max_x if max_x is None else max_x
lz = r.min_z if min_z is None else min_z
hz = r.max_z if max_z is None else max_z
pos = self.rand.float(
low=[lx + ent.radius, 0, lz + ent.radius],
high=[hx - ent.radius, 0, hz - ent.radius]
)
# Make sure the position is within the room's outline
if not r.point_inside(pos):
continue
# Make sure the position doesn't intersect with any walls
if self.intersect(ent, pos, ent.radius):
continue
# Pick a direction
d = dir if dir is not None else self.rand.float(-math.pi, math.pi)
ent.pos = pos
ent.dir = d
break
self.entities.append(ent)
return ent
def place_agent(
self,
room=None,
dir=None,
min_x=None,
max_x=None,
min_z=None,
max_z=None
):
"""
Place the agent in the environment at a random position
and orientation
"""
return self.place_entity(
self.agent,
room=room,
dir=dir,
min_x=min_x,
max_x=max_x,
min_z=min_z,
max_z=max_z
)
def intersect(self, ent, pos, radius):
"""
Check if an entity intersects with the world
"""
# Ignore the Y position
px, _, pz = pos
pos = np.array([px, 0, pz])
# Check for intersection with walls
if intersect_circle_segs(pos, radius, self.wall_segs):
return True
# Check for entity intersection
for ent2 in self.entities:
# Entities can't intersect with themselves
if ent2 is ent:
continue
px, _, pz = ent2.pos
pos2 = np.array([px, 0, pz])
d = np.linalg.norm(pos2 - pos)
if d < radius + ent2.radius:
return ent2
return None
def near(self, ent0, ent1=None):
"""
Test if the two entities are near each other.
Used for "go to" or "put next" type tasks
"""
if ent1 is None:
ent1 = self.agent
dist = np.linalg.norm(ent0.pos - ent1.pos)
return dist < ent0.radius + ent1.radius + 1.1 * self.max_forward_step
def _load_tex(self, tex_name):
"""
Load a texture, with or without domain randomization
"""
rand = self.rand if self.params.sample(self.rand, 'tex_rand') else None
return Texture.get(tex_name, rand)
def _gen_static_data(self):
"""
Generate static data needed for rendering and collision detection
"""
# Generate the static data for each room
for room in self.rooms:
room._gen_static_data(
self.params,
self.rand if self.domain_rand else None
)
# Concatenate the wall segments
self.wall_segs = np.concatenate([r.wall_segs for r in self.rooms])
# Room selection probabilities
self.room_probs = np.array([r.area for r in self.rooms], dtype=float)
self.room_probs /= np.sum(self.room_probs)
def _reward(self):
"""
Default sparse reward computation
"""
return 1.0 - 0.2 * (self.step_count / self.max_episode_steps)
def _render_static(self):
"""
Render the static elements of the scene into a display list.
Called once at the beginning of each episode.
"""
# TODO: manage this automatically
# glIsList
glDeleteLists(1, 1);
glNewList(1, GL_COMPILE);
# Light position
glLightfv(GL_LIGHT0, GL_POSITION, (GLfloat * 4)(*self.light_pos + [1]))
# Background/minimum light level
glLightfv(GL_LIGHT0, GL_AMBIENT, (GLfloat * 4)(*self.light_ambient))
# Diffuse light color
glLightfv(GL_LIGHT0, GL_DIFFUSE, (GLfloat * 4)(*self.light_color))
# glLightf(GL_LIGHT0, GL_SPOT_CUTOFF, 180)
# glLightf(GL_LIGHT0, GL_SPOT_EXPONENT, 0)
# glLightf(GL_LIGHT0, GL_CONSTANT_ATTENUATION, 0)
# glLightf(GL_LIGHT0, GL_LINEAR_ATTENUATION, 0)
# glLightf(GL_LIGHT0, GL_QUADRATIC_ATTENUATION, 0)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glShadeModel(GL_SMOOTH)
glEnable(GL_COLOR_MATERIAL)
glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE)
# Render the rooms
glEnable(GL_TEXTURE_2D)
for room in self.rooms:
room._render()
# Render the static entities
for ent in self.entities:
if ent.is_static:
ent.render()
glEndList()
def _render_world(
self,
frame_buffer,
render_agent
):
"""
Render the world from a given camera position into a frame buffer,
and produce a numpy image array as output.
"""
# Call the display list for the static parts of the environment
glCallList(1)
# TODO: keep the non-static entities in a different list for efficiency?
# Render the non-static entities
for ent in self.entities:
if not ent.is_static and ent is not self.agent:
ent.render()
# ent.draw_bound()
if render_agent:
self.agent.render()
# Resolve the rendered image into a numpy array
img = frame_buffer.resolve()
return img
def render_top_view(self, frame_buffer=None):
"""
Render a top view of the whole map (from above)
"""
if frame_buffer is None:
frame_buffer = self.obs_fb
# Switch to the default OpenGL context
# This is necessary on Linux Nvidia drivers
self.shadow_window.switch_to()
# Bind the frame buffer before rendering into it
frame_buffer.bind()
# Clear the color and depth buffers
glClearColor(*self.sky_color, 1.0)
glClearDepth(1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# Scene extents to render
min_x = self.min_x - 1
max_x = self.max_x + 1
min_z = self.min_z - 1
max_z = self.max_z + 1
width = max_x - min_x
height = max_z - min_z
aspect = width / height
fb_aspect = frame_buffer.width / frame_buffer.height
# Adjust the aspect extents to match the frame buffer aspect
if aspect > fb_aspect:
# Want to add to denom, add to height
new_h = width / fb_aspect
h_diff = new_h - height
min_z -= h_diff / 2
max_z += h_diff / 2
elif aspect < fb_aspect:
# Want to add to num, add to width
new_w = height * fb_aspect
w_diff = new_w - width
min_x -= w_diff / 2
max_x += w_diff / 2
# Set the projection matrix
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(
min_x,
max_x,
-max_z,
-min_z,
-100, 100.0
)
# Setup the camera
# Y maps to +Z, Z maps to +Y
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
m = [
1, 0, 0, 0,
0, 0, 1, 0,
0, -1, 0, 0,
0, 0, 0, 1,
]
glLoadMatrixf((GLfloat * len(m))(*m))
return self._render_world(
frame_buffer,
render_agent=True
)
def render_obs(self, frame_buffer=None):
"""
Render an observation from the point of view of the agent
"""
if frame_buffer is None:
frame_buffer = self.obs_fb
# Switch to the default OpenGL context
# This is necessary on Linux Nvidia drivers
self.shadow_window.switch_to()
# Bind the frame buffer before rendering into it
frame_buffer.bind()
# Clear the color and depth buffers
glClearColor(*self.sky_color, 1.0)
glClearDepth(1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# Set the projection matrix
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(
self.agent.cam_fov_y,
frame_buffer.width / float(frame_buffer.height),
0.04,
100.0
)
# Setup the camera
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(
# Eye position
*self.agent.cam_pos,
# Target
*(self.agent.cam_pos + self.agent.cam_dir),
# Up vector
0, 1.0, 0.0
)
return self._render_world(
frame_buffer,
render_agent=False
)
def render_depth(self, frame_buffer=None):
"""
Produce a depth map
Values are floating-point, map shape is (H,W,1)
Distances are in meters from the observer
"""
if frame_buffer is None:
frame_buffer = self.obs_fb
# Render the world
self.render_obs(frame_buffer)
return frame_buffer.get_depth_map(0.04, 100.0)
def get_visible_ents(self):
"""
Get a list of visible entities.
Uses OpenGL occlusion queries to approximate visibility.
:return: set of objects visible to the agent
"""
# Allocate the occlusion query ids
num_ents = len(self.entities)
query_ids = (GLuint * num_ents)()
glGenQueries(num_ents, query_ids)
# Switch to the default OpenGL context
# This is necessary on Linux Nvidia drivers
self.shadow_window.switch_to()
# Use the small observation frame buffer
frame_buffer = self.obs_fb
# Bind the frame buffer before rendering into it
frame_buffer.bind()
# Clear the color and depth buffers
glClearColor(*self.sky_color, 1.0)
glClearDepth(1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# Set the projection matrix
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(
self.agent.cam_fov_y,
frame_buffer.width / float(frame_buffer.height),
0.04,
100.0
)
# Setup the cameravisible objects
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(
# Eye position
*self.agent.cam_pos,
# Target
*(self.agent.cam_pos + self.agent.cam_dir),
# Up vector
0, 1.0, 0.0
)
# Render the rooms, without texturing
glDisable(GL_TEXTURE_2D)
for room in self.rooms:
room._render()
# For each entity
for ent_idx, ent in enumerate(self.entities):
if ent is self.agent:
continue
glBeginQuery(GL_ANY_SAMPLES_PASSED, query_ids[ent_idx])
pos = ent.pos
# glColor3f(1, 0, 0)
drawBox(
x_min=pos[0] - 0.1,
x_max=pos[0] + 0.1,
y_min=pos[1],
y_max=pos[1] + 0.2,
z_min=pos[2] - 0.1,
z_max=pos[2] + 0.1
)
glEndQuery(GL_ANY_SAMPLES_PASSED)
vis_objs = set()
# Get query results
for ent_idx, ent in enumerate(self.entities):
if ent is self.agent:
continue
visible = (GLuint * 1)(1)
glGetQueryObjectuiv(query_ids[ent_idx], GL_QUERY_RESULT, visible);
if visible[0] != 0:
vis_objs.add(ent)
# Free the occlusion query ids
glDeleteQueries(1, query_ids)
# img = frame_buffer.resolve()
# return img
return vis_objs
def render(self, mode='human', close=False, view='agent'):
"""
Render the environment for human viewing
"""
if close:
if self.window:
self.window.close()
return
# Render the human-view image
assert view in ['agent', 'top']
if view == 'agent':
img = self.render_obs(self.vis_fb)
else:
img = self.render_top_view(self.vis_fb)
img_width = img.shape[1]
img_height = img.shape[0]
if mode == 'rgb_array':
return img
# Render the agent's view
obs = self.render_obs()
obs_width = obs.shape[1]
obs_height = obs.shape[0]
window_width = img_width + self.obs_disp_width
window_height = img_height
if self.window is None:
config = pyglet.gl.Config(double_buffer=True)
self.window = pyglet.window.Window(
width=window_width,
height=window_height,
resizable=False,
config=config
)
self.window.clear()
self.window.switch_to()
# Bind the default frame buffer
glBindFramebuffer(GL_FRAMEBUFFER, 0);
# Clear the color and depth buffers
glClearColor(0, 0, 0, 1.0)
glClearDepth(1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
# Setup orghogonal projection
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glOrtho(0, window_width, 0, window_height, 0, 10)
# Draw the human render to the rendering window
img_flip = np.ascontiguousarray(np.flip(img, axis=0))
img_data = pyglet.image.ImageData(
img_width,
img_height,
'RGB',
img_flip.ctypes.data_as(POINTER(GLubyte)),
pitch=img_width * 3,
)
img_data.blit(
0,
0,
0,
width=img_width,
height=img_height
)
# Draw the observation
obs = np.ascontiguousarray(np.flip(obs, axis=0))
obs_data = pyglet.image.ImageData(
obs_width,
obs_height,
'RGB',
obs.ctypes.data_as(POINTER(GLubyte)),
pitch=obs_width * 3,
)
obs_data.blit(
img_width,
img_height - self.obs_disp_height,
0,
width=self.obs_disp_width,
height=self.obs_disp_height
)
# Draw the text label in the window
self.text_label.text = "pos: (%.2f, %.2f, %.2f)\nangle: %d\nsteps: %d" % (
*self.agent.pos,
int(self.agent.dir * 180 / math.pi) % 360,
self.step_count
)
self.text_label.draw()
# Force execution of queued commands
glFlush()
# If we are not running the Pyglet event loop,
# we have to manually flip the buffers and dispatch events
if mode == 'human':
self.window.flip()
self.window.dispatch_events()
return img
| 28.539216
| 90
| 0.555857
|
e0033de323bfbb6e11303c2e9b72b24a9a353a63
| 288
|
py
|
Python
|
tests/urls.py
|
benjo456/django-admin-honeypot
|
38eac724c42cd40ffd2628f1f74c5e30cf286dd6
|
[
"MIT"
] | 758
|
2015-01-08T07:46:44.000Z
|
2022-03-31T03:51:19.000Z
|
tests/urls.py
|
benjo456/django-admin-honeypot
|
38eac724c42cd40ffd2628f1f74c5e30cf286dd6
|
[
"MIT"
] | 61
|
2015-01-03T23:28:24.000Z
|
2022-03-28T10:39:51.000Z
|
tests/urls.py
|
benjo456/django-admin-honeypot
|
38eac724c42cd40ffd2628f1f74c5e30cf286dd6
|
[
"MIT"
] | 132
|
2015-02-07T13:19:27.000Z
|
2022-03-13T13:57:30.000Z
|
from django.conf.urls import include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = [
url(r'^admin/', include('admin_honeypot.urls', namespace='admin_honeypot')),
url(r'^secret/', admin.site.urls),
]
| 26.181818
| 80
| 0.729167
|
35f8dfd35be4a239dcb2463b469f2e065abcd63d
| 1,428
|
py
|
Python
|
API/API/urls.py
|
snguyen64/StuddyBuddy
|
c11a8daec3827847a1b350434e2ea62eaed78f7e
|
[
"MIT"
] | null | null | null |
API/API/urls.py
|
snguyen64/StuddyBuddy
|
c11a8daec3827847a1b350434e2ea62eaed78f7e
|
[
"MIT"
] | null | null | null |
API/API/urls.py
|
snguyen64/StuddyBuddy
|
c11a8daec3827847a1b350434e2ea62eaed78f7e
|
[
"MIT"
] | null | null | null |
"""API URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from studdybuddy_api import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^auth/register/$', views.register),
url(r'^auth/login/$', views.login),
url(r'^courses/(?P<id>[0-9]+)/$', views.get_courses),
url(r'^courses/(?P<subject>\w+)/$', views.courses),
url(r'^course/(?P<id>[0-9]+)/store/$', views.store_course),
url(r'^course/(?P<id>[0-9]+)/delete/(?P<subject>\w+)/(?P<number>[0-9]+)/$', views.delete_course),
url(r'^chatroom/create/(?P<id>[0-9]+)/$', views.create_chatroom),
url(r'^chatroom/(?P<name>\w+)/join/(?P<id>[0-9]+)/$', views.join_chatroom),
url(r'^chatroom/list/$', views.get_chatrooms),
url(r'^chatroom/messages/(?P<name>\w+)/$', views.get_messages)
]
| 43.272727
| 101
| 0.657563
|
bac639bb199f95ee57da7d3b6bb7ae10802938f1
| 14,002
|
py
|
Python
|
env/lib/python3.5/site-packages/sklearn/tests/test_random_projection.py
|
Udolf15/recommedMeMovies
|
be5ae74acd98e3f93beaaa5bb55623974fb24247
|
[
"MIT"
] | 8
|
2019-05-29T09:38:30.000Z
|
2021-01-20T03:36:59.000Z
|
venv/lib/python3.7/site-packages/sklearn/tests/test_random_projection.py
|
John1001Song/Big-Data-Robo-Adviser
|
9444dce96954c546333d5aecc92a06c3bfd19aa5
|
[
"MIT"
] | 12
|
2021-03-09T03:01:16.000Z
|
2022-03-11T23:59:36.000Z
|
venv/lib/python3.7/site-packages/sklearn/tests/test_random_projection.py
|
John1001Song/Big-Data-Robo-Adviser
|
9444dce96954c546333d5aecc92a06c3bfd19aa5
|
[
"MIT"
] | 3
|
2019-06-13T07:10:54.000Z
|
2020-09-11T06:01:40.000Z
|
from __future__ import division
import functools
import numpy as np
import scipy.sparse as sp
import pytest
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.exceptions import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
np.full((10, 10), 0.5))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
@pytest.mark.parametrize("random_matrix", all_random_matrix)
def test_basic_property_of_random_matrix(random_matrix):
# Check basic properties of random matrix generation
check_input_size_random_matrix(random_matrix)
check_size_generated(random_matrix)
check_zero_mean_and_unit_norm(random_matrix)
@pytest.mark.parametrize("random_matrix", all_sparse_random_matrix)
def test_basic_property_of_sparse_random_matrix(random_matrix):
check_input_with_sparse_random_matrix(random_matrix)
random_matrix_dense = functools.partial(random_matrix, density=1.0)
check_zero_mean_and_unit_norm(random_matrix_dense)
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [[0, 1, 2]])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
| 39.111732
| 79
| 0.654121
|
2170fdf9ed9a2d095664f5b03d63a6cf9b3c9979
| 4,470
|
py
|
Python
|
tests/test_builder.py
|
NCAR/ecg
|
0f217ace1ac93532a5a6c5f3435db6074965dd65
|
[
"Apache-2.0"
] | null | null | null |
tests/test_builder.py
|
NCAR/ecg
|
0f217ace1ac93532a5a6c5f3435db6074965dd65
|
[
"Apache-2.0"
] | 2
|
2020-06-01T23:18:24.000Z
|
2020-06-01T23:23:56.000Z
|
tests/test_builder.py
|
ncar-xdev/ecgtools
|
007e9d11c4211d819c96797612395eacd7fb70b4
|
[
"Apache-2.0"
] | 1
|
2020-06-02T04:15:17.000Z
|
2020-06-02T04:15:17.000Z
|
import os
import pathlib
import intake
import pandas as pd
import pytest
from ecgtools import Builder, RootDirectory, glob_to_regex
from ecgtools.parsers.cesm import parse_cesm_history
sample_data_dir = pathlib.Path(os.path.dirname(__file__)).parent / 'sample_data'
@pytest.mark.parametrize(
'path, depth, storage_options,include_patterns, exclude_patterns, num_assets',
[
(str(sample_data_dir / 'cmip' / 'CMIP6'), 10, {}, ['*.nc'], [], 59),
(str(sample_data_dir / 'cmip' / 'cmip5'), 10, {}, ['*.nc'], ['*/esmControl/*'], 27),
('s3://ncar-cesm-lens/atm/monthly', 0, {'anon': True}, [], ['*cesmLE-20C*'], 75),
],
)
def test_directory(path, depth, storage_options, include_patterns, exclude_patterns, num_assets):
include_regex, exclude_regex = glob_to_regex(
include_patterns=include_patterns, exclude_patterns=exclude_patterns
)
directory = RootDirectory(
path=path,
depth=depth,
storage_options=storage_options,
include_regex=include_regex,
exclude_regex=exclude_regex,
)
assets = directory.walk()
assert len(assets) == num_assets
@pytest.mark.parametrize(
'paths, depth, storage_options, include_patterns, exclude_patterns, num_assets',
[
(
[
str(sample_data_dir / 'cmip' / 'CMIP6' / 'CMIP' / 'BCC'),
str(sample_data_dir / 'cmip' / 'CMIP6' / 'CMIP' / 'IPSL'),
],
8,
{},
['*.nc'],
[],
27,
),
(
['s3://ncar-cesm-lens/lnd/monthly', 's3://ncar-cesm-lens/ocn/monthly'],
0,
{'anon': True},
[],
['*cesmLE-20C*', '*cesmLE-RCP85*'],
78,
),
],
)
def test_builder_init(
paths, depth, storage_options, include_patterns, exclude_patterns, num_assets
):
builder = Builder(
paths=paths,
depth=depth,
storage_options=storage_options,
include_patterns=include_patterns,
exclude_patterns=exclude_patterns,
)
builder.get_assets()
assert isinstance(builder.assets, list)
assert len(builder.assets) == num_assets
def parsing_func(file):
return {'path': file, 'variable': 'placeholder'}
def post_process_func(df, times=10):
df['my_column'] = 1 * times
return df
@pytest.mark.parametrize(
'paths, depth, storage_options, include_patterns, exclude_patterns, num_assets',
[
(
[
str(sample_data_dir / 'cmip' / 'CMIP6' / 'CMIP' / 'BCC'),
str(sample_data_dir / 'cesm'),
],
1,
{},
['*.nc'],
[],
3,
),
(
['s3://ncar-cesm-lens/lnd/static', 's3://ncar-cesm-lens/ocn/static'],
0,
{'anon': True},
[],
['*cesmLE-20C*', '*cesmLE-RCP85*'],
4,
),
],
)
def test_builder_build(
paths, depth, storage_options, include_patterns, exclude_patterns, num_assets
):
builder = Builder(
paths=paths,
depth=depth,
storage_options=storage_options,
include_patterns=include_patterns,
exclude_patterns=exclude_patterns,
)
builder.get_assets()
assert len(builder.assets) == num_assets
builder.build(
parsing_func=parsing_func,
postprocess_func=post_process_func,
postprocess_func_kwargs={'times': 100},
)
assert isinstance(builder.df, pd.DataFrame)
assert len(builder.df) == num_assets
assert set(builder.df.columns) == {'path', 'variable', 'my_column'}
def test_builder_save(tmp_path):
builder = Builder(paths=[str(sample_data_dir / 'cesm')], depth=5, include_patterns=['*.nc'])
builder.get_assets()
builder.assets.append('cesm/nonexistent_file.nc') # Add an invalid file
with pytest.warns(UserWarning):
builder.parse(parsing_func=parse_cesm_history).clean_dataframe()
with pytest.warns(UserWarning):
builder.save(
name='test',
path_column_name='path',
directory=str(tmp_path),
data_format='netcdf',
variable_column_name='variables',
aggregations=[],
groupby_attrs=[],
)
assert not builder.invalid_assets.empty
cat = intake.open_esm_datastore(str(tmp_path / 'test.json'))
assert isinstance(cat.df, pd.DataFrame)
| 29.602649
| 97
| 0.591499
|
876d7e28ac8c8c6e8e08392f119b3c2b3a37333b
| 617
|
py
|
Python
|
packages/hagrid/hagrid/auth.py
|
vishalbelsare/PySyft
|
fb04404fcfbef82fad1fb47407b35a24e9afb599
|
[
"Apache-1.1"
] | 8,428
|
2017-08-10T09:17:49.000Z
|
2022-03-31T08:20:14.000Z
|
packages/hagrid/hagrid/auth.py
|
vishalbelsare/PySyft
|
fb04404fcfbef82fad1fb47407b35a24e9afb599
|
[
"Apache-1.1"
] | 4,779
|
2017-08-09T23:19:00.000Z
|
2022-03-29T11:49:36.000Z
|
packages/hagrid/hagrid/auth.py
|
vishalbelsare/PySyft
|
fb04404fcfbef82fad1fb47407b35a24e9afb599
|
[
"Apache-1.1"
] | 2,307
|
2017-08-10T08:52:12.000Z
|
2022-03-30T05:36:07.000Z
|
# stdlib
from typing import Optional
class AuthCredentials:
def __init__(
self,
username: str,
key_path: Optional[str] = None,
password: Optional[str] = None,
) -> None:
self.username = username
self.key_path = key_path
self.password = password
@property
def uses_key(self) -> bool:
return bool(self.username and self.key_path)
@property
def uses_password(self) -> bool:
return bool(self.username and self.password)
@property
def valid(self) -> bool:
return bool(self.uses_key or self.uses_password)
| 22.851852
| 56
| 0.622366
|
87f9a4ea2305750d53062608a0058d447379a174
| 7,473
|
py
|
Python
|
python-trunk/sfapi2/sflib/ZSI/__init__.py
|
raychorn/svn_molten-magma
|
8aa2ff2340707eecae6514943e86f5afba9cd54a
|
[
"CC0-1.0"
] | null | null | null |
python-trunk/sfapi2/sflib/ZSI/__init__.py
|
raychorn/svn_molten-magma
|
8aa2ff2340707eecae6514943e86f5afba9cd54a
|
[
"CC0-1.0"
] | null | null | null |
python-trunk/sfapi2/sflib/ZSI/__init__.py
|
raychorn/svn_molten-magma
|
8aa2ff2340707eecae6514943e86f5afba9cd54a
|
[
"CC0-1.0"
] | null | null | null |
#! /usr/bin/env python
# $Header: /CVS/sfsrc/sfapi2/sflib/ZSI/__init__.py,v 1.1 2004/07/14 16:47:38 sfscript Exp $
'''ZSI: Zolera Soap Infrastructure.
Copyright 2001, Zolera Systems, Inc. All Rights Reserved.
'''
_copyright = """ZSI: Zolera Soap Infrastructure.
Copyright 2001, Zolera Systems, Inc. All Rights Reserved.
Copyright 2002-2003, Rich Salz. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, and/or
sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, provided that the above copyright notice(s) and
this permission notice appear in all copies of the Software and that
both the above copyright notice(s) and this permission notice appear in
supporting documentation.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
OR PERFORMANCE OF THIS SOFTWARE.
Except as contained in this notice, the name of a copyright holder
shall not be used in advertising or otherwise to promote the sale, use
or other dealings in this Software without prior written authorization
of the copyright holder.
"""
##
## Stuff imported from elsewhere.
from xml.dom import Node as _Node
import types as _types
##
## Public constants.
ZSI_SCHEMA_URI = 'http://www.zolera.com/schemas/ZSI/'
##
## Not public constants.
_inttypes = [ _types.IntType, _types.LongType ]
_floattypes = [ _types.FloatType ]
_seqtypes = [ _types.TupleType, _types.ListType ]
_stringtypes = [ _types.StringType, _types.UnicodeType ]
##
## Low-level DOM oriented utilities; useful for typecode implementors.
_attrs = lambda E: (E.attributes and E.attributes.values()) or []
_children = lambda E: E.childNodes or []
_child_elements = lambda E: [ n for n in (E.childNodes or [])
if n.nodeType == _Node.ELEMENT_NODE ]
##
## Stuff imported from elsewhere.
from ZSI.wstools.Namespaces import SOAP as _SOAP, SCHEMA as _SCHEMA
##
## Low-level DOM oriented utilities; useful for typecode implementors.
_find_arraytype = lambda E: E.getAttributeNS(_SOAP.ENC, "arrayType")
_find_encstyle = lambda E: E.getAttributeNS(_SOAP.ENV, "encodingStyle")
try:
__empty_nsuri_list
except NameError:
__empty_nsuri_list = []
try:
from xml.dom import EMPTY_NAMESPACE
_empty_nsuri_list = [ EMPTY_NAMESPACE ]
if '' not in _empty_nsuri_list: __empty_nsuri_list.append('')
if None not in _empty_nsuri_list: __empty_nsuri_list.append(None)
except:
_empty_nsuri_list = [ None, '' ]
def _find_attr(E, attr):
for nsuri in _empty_nsuri_list:
try:
v = E.getAttributeNS(nsuri, attr)
if v: return v
except: pass
return None
_find_href = lambda E: _find_attr(E, "href")
_find_xsi_attr = lambda E, attr: \
E.getAttributeNS(_SCHEMA.XSI3, attr) \
or E.getAttributeNS(_SCHEMA.XSI1, attr) \
or E.getAttributeNS(_SCHEMA.XSI2, attr)
_find_type = lambda E: _find_xsi_attr(E, "type")
_textprotect = lambda s: s.replace('&', '&').replace('<', '<')
_textunprotect = lambda s: s.replace('<', '<').replace('&', '&')
def _valid_encoding(elt):
'''Does this node have a valid encoding?
'''
enc = _find_encstyle(elt)
if not enc or enc == _SOAP.ENC: return 1
for e in enc.split():
if e.startswith(_SOAP.ENC):
# XXX Is this correct? Once we find a Sec5 compatible
# XXX encoding, should we check that all the rest are from
# XXX that same base? Perhaps. But since the if test above
# XXX will surely get 99% of the cases, leave it for now.
return 1
return 0
def _backtrace(elt, dom):
'''Return a "backtrace" from the given element to the DOM root,
in XPath syntax.
'''
s = ''
while elt != dom:
name, parent = elt.nodeName, elt.parentNode
if parent is None: break
matches = [ c for c in _child_elements(parent)
if c.nodeName == name ]
if len(matches) == 1:
s = '/' + name + s
else:
i = matches.index(elt) + 1
s = ('/%s[%d]' % (name, i)) + s
elt = parent
return s
##
## Exception classes.
class ZSIException(Exception):
'''Base class for all ZSI exceptions.
'''
pass
class ParseException(ZSIException):
'''Exception raised during parsing.
'''
def __init__(self, str, inheader, elt=None, dom=None):
Exception.__init__(self)
self.str, self.inheader, self.trace = str, inheader, None
if elt and dom:
self.trace = _backtrace(elt, dom)
def __str__(self):
if self.trace:
return self.str + ': [Element trace: ' + self.trace + ']'
return self.str
def __repr__(self):
return "<%s.ParseException at 0x%x>" % (__name__, long(id(self)))
class EvaluateException(ZSIException):
'''Exception raised during data evaluation (serialization).
'''
def __init__(self, str, trace=None):
Exception.__init__(self)
self.str, self.trace = str, trace
def __str__(self):
if self.trace:
return self.str + ': [Element trace: ' + self.trace + ']'
return self.str
def __repr__(self):
return "<%s.EvaluateException at 0x%x>" % (__name__, long(id(self)))
class FaultException(ZSIException):
'''Exception raised when a fault is received.
'''
def __init__(self, fault):
self.fault = fault
self.str = fault.string
def __str__(self):
return self.str
def __repr__(self):
return "<%s.FaultException at 0x%x>" % (__name__, long(id(self)))
##
## Importing the rest of ZSI.
import version
def Version():
return version.Version
from writer import SoapWriter
from parse import ParsedSoap
from fault import Fault, \
FaultFromActor, FaultFromException, FaultFromFaultMessage, \
FaultFromNotUnderstood, FaultFromZSIException
import TC
TC.RegisterType(TC.Void)
TC.RegisterType(TC.String)
TC.RegisterType(TC.URI)
TC.RegisterType(TC.Base64String)
TC.RegisterType(TC.HexBinaryString)
TC.RegisterType(TC.Integer)
TC.RegisterType(TC.Decimal)
TC.RegisterType(TC.Boolean)
TC.RegisterType(TC.Duration)
TC.RegisterType(TC.gDateTime)
TC.RegisterType(TC.gDate)
TC.RegisterType(TC.gYearMonth)
TC.RegisterType(TC.gYear)
TC.RegisterType(TC.gMonthDay)
TC.RegisterType(TC.gDay)
TC.RegisterType(TC.gTime)
TC.RegisterType(TC.Apache.Map)
try:
from ServiceProxy import *
except:
pass
if __name__ == '__main__': print _copyright
| 32.920705
| 92
| 0.666399
|
bce43292baed8403343d093bef07d6f64fd67872
| 281
|
py
|
Python
|
main.py
|
michaelscales88/irrigation-site
|
847bfb5b874d3813b71ea2939fd83459a6a1f991
|
[
"MIT"
] | null | null | null |
main.py
|
michaelscales88/irrigation-site
|
847bfb5b874d3813b71ea2939fd83459a6a1f991
|
[
"MIT"
] | null | null | null |
main.py
|
michaelscales88/irrigation-site
|
847bfb5b874d3813b71ea2939fd83459a6a1f991
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
"""
Start the server by running:
python main.py 8080 development.cfg
"""
import sys
from server import create_server
port = sys.argv[1]
args = sys.argv[1:]
server = create_server(*args)
server.run(port=int(port))
| 21.615385
| 39
| 0.626335
|
c578ccfaeb23a6c00ec198cec15711117f9e1983
| 5,381
|
py
|
Python
|
setup.py
|
lamhoangtung/dvc
|
291eba153637eef0925677c729c5a1a3ca8804a9
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
lamhoangtung/dvc
|
291eba153637eef0925677c729c5a1a3ca8804a9
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
lamhoangtung/dvc
|
291eba153637eef0925677c729c5a1a3ca8804a9
|
[
"Apache-2.0"
] | null | null | null |
import importlib.util
import os
from pathlib import Path
from setuptools import find_packages, setup
from setuptools.command.build_py import build_py as _build_py
# Prevents pkg_resources import in entry point script,
# see https://github.com/ninjaaron/fast-entry_points.
# This saves about 200 ms on startup time for non-wheel installs.
try:
import fastentrypoints # noqa: F401, pylint: disable=unused-import
except ImportError:
pass # not able to import when installing through pre-commit
# Read package meta-data from version.py
# see https://packaging.python.org/guides/single-sourcing-package-version/
pkg_dir = os.path.dirname(os.path.abspath(__file__))
version_path = os.path.join(pkg_dir, "dvc", "version.py")
spec = importlib.util.spec_from_file_location("dvc.version", version_path)
dvc_version = importlib.util.module_from_spec(spec)
spec.loader.exec_module(dvc_version)
version = dvc_version.__version__ # noqa: F821
# To achieve consistency between the build version and the one provided
# by your package during runtime, you need to **pin** the build version.
#
# This custom class will replace the version.py module with a **static**
# `__version__` that your package can read at runtime, assuring consistency.
#
# References:
# - https://docs.python.org/3.7/distutils/extending.html
# - https://github.com/python/mypy
class build_py(_build_py):
def pin_version(self):
path = os.path.join(self.build_lib, "dvc")
self.mkpath(path)
with open(os.path.join(path, "version.py"), "w") as fobj:
fobj.write("# AUTOGENERATED at build time by setup.py\n")
fobj.write('__version__ = "{}"\n'.format(version))
def run(self):
self.execute(self.pin_version, ())
_build_py.run(self)
install_requires = [
"ply>=3.9", # See https://github.com/pyinstaller/pyinstaller/issues/1945
"colorama>=0.3.9",
"configobj>=5.0.6",
"gitpython>3",
"dulwich>=0.20.21",
"pygit2>=1.5.0",
"setuptools>=34.0.0",
"nanotime>=0.5.2",
"pyasn1>=0.4.1",
"voluptuous>=0.11.7",
"jsonpath-ng>=1.5.1",
"requests>=2.22.0",
"grandalf==0.6",
"distro>=1.3.0",
"appdirs>=1.4.3",
"ruamel.yaml>=0.16.1",
"toml>=0.10.1",
"funcy>=1.14",
"pathspec>=0.6.0",
"shortuuid>=0.5.0",
"tqdm>=4.45.0,<5",
"packaging>=19.0",
"zc.lockfile>=1.2.1",
"flufl.lock>=3.2,<4",
"win-unicode-console>=0.5; sys_platform == 'win32'",
"pywin32>=225; sys_platform == 'win32'",
"networkx>=2.1",
"psutil>=5.8.0",
"pydot>=1.2.4",
"speedcopy>=2.0.1; python_version < '3.8' and sys_platform == 'win32'",
"dataclasses==0.7; python_version < '3.7'",
"flatten_dict>=0.3.0,<1",
"tabulate>=0.8.7",
"pygtrie==2.3.2",
"dpath>=2.0.1,<3",
"shtab>=1.3.4,<2",
"rich>=10.0.0",
"dictdiffer>=0.8.1",
"python-benedict>=0.21.1",
"pyparsing==2.4.7",
"typing_extensions>=3.7.4",
"fsspec>=0.8.5",
"diskcache>=5.2.1",
]
# Extra dependencies for remote integrations
gs = ["gcsfs>=0.7.2"]
gdrive = ["pydrive2>=1.8.1", "six >= 1.13.0"]
s3 = ["boto3>=1.9.201"]
azure = ["adlfs>=0.7.0", "azure-identity>=1.4.0", "knack"]
# https://github.com/Legrandin/pycryptodome/issues/465
oss = ["oss2==2.6.1", "pycryptodome>=3.10"]
ssh = ["paramiko[invoke]>=2.7.0"]
# Remove the env marker if/when pyarrow is available for Python3.9
hdfs = ["pyarrow>=2.0.0"]
webhdfs = ["hdfs==2.5.8"]
webdav = ["webdavclient3>=3.14.5"]
# gssapi should not be included in all_remotes, because it doesn't have wheels
# for linux and mac, so it will fail to compile if user doesn't have all the
# requirements, including kerberos itself. Once all the wheels are available,
# we can start shipping it by default.
ssh_gssapi = ["paramiko[invoke,gssapi]>=2.7.0"]
all_remotes = gs + s3 + azure + ssh + oss + gdrive + hdfs + webhdfs + webdav
tests_requirements = (
Path("test_requirements.txt").read_text().strip().splitlines()
)
setup(
name="dvc",
version=version,
description="Git for data scientists - manage your code and data together",
long_description=open("README.rst", "r", encoding="UTF-8").read(),
author="Dmitry Petrov",
author_email="dmitry@dvc.org",
download_url="https://github.com/iterative/dvc",
license="Apache License 2.0",
install_requires=install_requires,
extras_require={
"all": all_remotes,
"gs": gs,
"gdrive": gdrive,
"s3": s3,
"azure": azure,
"oss": oss,
"ssh": ssh,
"ssh_gssapi": ssh_gssapi,
"hdfs": hdfs,
"webhdfs": webhdfs,
"webdav": webdav,
"tests": tests_requirements,
},
keywords="data-science data-version-control machine-learning git"
" developer-tools reproducibility collaboration ai",
python_requires=">=3.6",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
packages=find_packages(exclude=["tests"]),
include_package_data=True,
url="http://dvc.org",
entry_points={"console_scripts": ["dvc = dvc.main:main"]},
cmdclass={"build_py": build_py},
zip_safe=False,
)
| 33.01227
| 79
| 0.642817
|
f9752dd2d7c4d11c21146838c03b4b641767ddd4
| 410
|
py
|
Python
|
tests/urls.py
|
andytwoods/django-gitabix
|
c72867675364072d32be94d31c197430c1d46ae4
|
[
"MIT"
] | null | null | null |
tests/urls.py
|
andytwoods/django-gitabix
|
c72867675364072d32be94d31c197430c1d46ae4
|
[
"MIT"
] | null | null | null |
tests/urls.py
|
andytwoods/django-gitabix
|
c72867675364072d32be94d31c197430c1d46ae4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.conf.urls import url, include
from django.contrib import admin
from django.urls import path
from django.views.generic import TemplateView
urlpatterns = [
path('admin/', admin.site.urls),
path('', TemplateView.as_view(template_name="base.html"), name='home'),
path('', include('django_gitabix.urls')),
]
| 29.285714
| 75
| 0.734146
|
77ec0651a813832953f8129ca6d4e84343560f41
| 5,955
|
py
|
Python
|
eggs/ZODB-4.1.0-py2.7.egg/ZODB/scripts/fsrefs.py
|
salayhin/talkofacta
|
8b5a14245dd467bb1fda75423074c4840bd69fb7
|
[
"MIT"
] | null | null | null |
eggs/ZODB-4.1.0-py2.7.egg/ZODB/scripts/fsrefs.py
|
salayhin/talkofacta
|
8b5a14245dd467bb1fda75423074c4840bd69fb7
|
[
"MIT"
] | null | null | null |
eggs/ZODB-4.1.0-py2.7.egg/ZODB/scripts/fsrefs.py
|
salayhin/talkofacta
|
8b5a14245dd467bb1fda75423074c4840bd69fb7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Check FileStorage for dangling references.
usage: fsrefs.py [-v] data.fs
fsrefs.py checks object sanity by trying to load the current revision of
every object O in the database, and also verifies that every object
directly reachable from each such O exists in the database.
It's hard to explain exactly what it does because it relies on undocumented
features in Python's cPickle module: many of the crucial steps of loading
an object are taken, but application objects aren't actually created. This
saves a lot of time, and allows fsrefs to be run even if the code
implementing the object classes isn't available.
A read-only connection to the specified FileStorage is made, but it is not
recommended to run fsrefs against a live FileStorage. Because a live
FileStorage is mutating while fsrefs runs, it's not possible for fsrefs to
get a wholly consistent view of the database across the entire time fsrefs
is running; spurious error messages may result.
fsrefs doesn't normally produce any output. If an object fails to load, the
oid of the object is given in a message saying so, and if -v was specified
then the traceback corresponding to the load failure is also displayed
(this is the only effect of the -v flag).
Three other kinds of errors are also detected, when an object O loads OK,
and directly refers to a persistent object P but there's a problem with P:
- If P doesn't exist in the database, a message saying so is displayed.
The unsatisifiable reference to P is often called a "dangling
reference"; P is called "missing" in the error output.
- If the current state of the database is such that P's creation has
been undone, then P can't be loaded either. This is also a kind of
dangling reference, but is identified as "object creation was undone".
- If P can't be loaded (but does exist in the database), a message saying
that O refers to an object that can't be loaded is displayed.
fsrefs also (indirectly) checks that the .index file is sane, because
fsrefs uses the index to get its idea of what constitutes "all the objects
in the database".
Note these limitations: because fsrefs only looks at the current revision
of objects, it does not attempt to load objects in versions, or non-current
revisions of objects; therefore fsrefs cannot find problems in versions or
in non-current revisions.
"""
from __future__ import print_function
import traceback
from ZODB.FileStorage import FileStorage
from ZODB.TimeStamp import TimeStamp
from ZODB.utils import u64, oid_repr, get_pickle_metadata
from ZODB.serialize import get_refs
from ZODB.POSException import POSKeyError
# There's a problem with oid. 'data' is its pickle, and 'serial' its
# serial number. 'missing' is a list of (oid, class, reason) triples,
# explaining what the problem(s) is(are).
def report(oid, data, serial, missing):
from_mod, from_class = get_pickle_metadata(data)
if len(missing) > 1:
plural = "s"
else:
plural = ""
ts = TimeStamp(serial)
print("oid %s %s.%s" % (hex(u64(oid)), from_mod, from_class))
print("last updated: %s, tid=%s" % (ts, hex(u64(serial))))
print("refers to invalid object%s:" % plural)
for oid, info, reason in missing:
if isinstance(info, tuple):
description = "%s.%s" % info
else:
description = str(info)
print("\toid %s %s: %r" % (oid_repr(oid), reason, description))
print()
def main(path=None):
verbose = 0
if path is None:
import sys
import getopt
opts, args = getopt.getopt(sys.argv[1:], "v")
for k, v in opts:
if k == "-v":
verbose += 1
path, = args
fs = FileStorage(path, read_only=1)
# Set of oids in the index that failed to load due to POSKeyError.
# This is what happens if undo is applied to the transaction creating
# the object (the oid is still in the index, but its current data
# record has a backpointer of 0, and POSKeyError is raised then
# because of that backpointer).
undone = {}
# Set of oids that were present in the index but failed to load.
# This does not include oids in undone.
noload = {}
for oid in fs._index.keys():
try:
data, serial = fs.load(oid, "")
except (KeyboardInterrupt, SystemExit):
raise
except POSKeyError:
undone[oid] = 1
except:
if verbose:
traceback.print_exc()
noload[oid] = 1
inactive = noload.copy()
inactive.update(undone)
for oid in fs._index.keys():
if oid in inactive:
continue
data, serial = fs.load(oid, "")
refs = get_refs(data)
missing = [] # contains 3-tuples of oid, klass-metadata, reason
for ref, klass in refs:
if klass is None:
klass = '<unknown>'
if ref not in fs._index:
missing.append((ref, klass, "missing"))
if ref in noload:
missing.append((ref, klass, "failed to load"))
if ref in undone:
missing.append((ref, klass, "object creation was undone"))
if missing:
report(oid, data, serial, missing)
if __name__ == "__main__":
main()
| 38.419355
| 78
| 0.664819
|
862c0703f58a1565e2a0e9b088df5afd35363f31
| 2,962
|
py
|
Python
|
ouroboros/snake.py
|
RatJuggler/ouroboros
|
99471f4e9513d21b6ade59c99f939022d0e855bd
|
[
"MIT"
] | null | null | null |
ouroboros/snake.py
|
RatJuggler/ouroboros
|
99471f4e9513d21b6ade59c99f939022d0e855bd
|
[
"MIT"
] | 2
|
2020-04-05T11:45:03.000Z
|
2020-06-19T20:05:16.000Z
|
ouroboros/snake.py
|
RatJuggler/ouroboros
|
99471f4e9513d21b6ade59c99f939022d0e855bd
|
[
"MIT"
] | null | null | null |
import pygame
from typing import List, Optional
from ouroboros.cell import Cell, RIGHT
from ouroboros.display import Display
from ouroboros.sounds import Sounds
from ouroboros.sprite_images import SpriteImages
from ouroboros.utils import Point
class Head(Cell):
def __init__(self, display: Display, images: SpriteImages, at: Point) -> None:
super(Head, self).__init__(display, images, at, RIGHT)
self._prev_cell = None
self._prev_direction = None
def mark_prev(self) -> None:
self._prev_cell = self._cell
self._prev_direction = self._direction
def get_prev_direction(self) -> str:
return self._prev_direction
def grow_body(self) -> 'Body':
return Body(self._display, self._images, self._prev_cell, self._prev_direction)
class Body(Cell):
def __init__(self, display: Display, images: SpriteImages, at_cell: Point, direction: str) -> None:
super(Body, self).__init__(display, images, at_cell, direction)
class Tail(Cell):
def __init__(self, display: Display, images: SpriteImages, at_cell: Point) -> None:
super(Tail, self).__init__(display, images, at_cell, RIGHT)
class Snake:
def __init__(self, head: Head, tail: Tail, sounds: Sounds) -> None:
self._head = head
self._body = []
self._tail = tail
self._sounds = sounds
self._eating = False
@classmethod
def new_snake(cls, display: Display, images: SpriteImages, sounds: Sounds) -> 'Snake':
snake_start = display.get_center()
head = Head(display, images, snake_start)
tail = Tail(display, images, (snake_start[0] - 1, snake_start[1]))
return Snake(head, tail, sounds)
def move_head(self, new_direction: str) -> bool:
self._head.mark_prev()
return self._head.move_in(new_direction) and pygame.sprite.spritecollideany(self._head, self._body) is None
def move_body(self) -> None:
prev_segment_direction = self._head.get_prev_direction()
for segment in self._body:
curr_segment_direction = segment.get_direction()
segment.move_in(prev_segment_direction)
prev_segment_direction = curr_segment_direction
self._tail.move_in(prev_segment_direction)
def render(self) -> None:
follow_direction = self._head.render(str(self._eating))
for segment in self._body:
follow_direction = segment.render(follow_direction)
self._tail.render(follow_direction)
def eats(self, cells: List[Cell]) -> Optional[Cell]:
eats = pygame.sprite.spritecollideany(self._head, cells)
if eats:
self._eating = True
self._body.insert(0, self._head.grow_body())
else:
self._eating = False
return eats
def is_on(self, cell: Cell) -> bool:
return pygame.sprite.collide_rect(cell, self._head) or pygame.sprite.spritecollideany(cell, self._body)
| 34.045977
| 115
| 0.674544
|
7b1ff4f41640e9a586e597c6d0ff18626a0dd82a
| 1,233
|
py
|
Python
|
tests/unit/havok_header_test.py
|
zephenryus/botw-havok
|
9665555551ba0b5b864efb168da402c7f73ff365
|
[
"MIT"
] | 1
|
2020-10-11T07:07:43.000Z
|
2020-10-11T07:07:43.000Z
|
tests/unit/havok_header_test.py
|
zephenryus/botw-havok
|
9665555551ba0b5b864efb168da402c7f73ff365
|
[
"MIT"
] | null | null | null |
tests/unit/havok_header_test.py
|
zephenryus/botw-havok
|
9665555551ba0b5b864efb168da402c7f73ff365
|
[
"MIT"
] | 1
|
2020-10-11T07:07:45.000Z
|
2020-10-11T07:07:45.000Z
|
import unittest
import havok
class TestHavokHeader(unittest.TestCase):
def test_it_can_decompile_a_header(self):
""" @test it can decompile a header
Given the file G-6-2.hksc
When the file is passed to the Havok Header class
Then the header should contain the file signature, correct header size and Havok version string
"""
with open('../assets/G-6-2.hksc', 'rb') as infile:
header = havok.Header(infile)
print(header.version)
self.assertEqual(header.signature, b'W\xe0\xe0W\x10\xc0\xc0\x10')
self.assertEqual(header.size, 64)
self.assertEqual(header.version, 'hk_2014.2.0-r1')
def test_it_can_decompile_a_long_header(self):
""" @test it can decompile a long header
Given the file 19-13.hknm2
When the file is passed to the Header class
Then the header size should be 80 bytes
"""
with open('../assets/19-13.hknm2', 'rb') as infile:
header = havok.Header(infile)
self.assertEqual(header.signature, b'W\xe0\xe0W\x10\xc0\xc0\x10')
self.assertEqual(header.size, 80)
self.assertEqual(header.version, 'hk_2014.2.0-r1')
| 37.363636
| 103
| 0.635036
|
81dd2b7c804bb60eb7d3d49b08bebcb731f3c130
| 915
|
py
|
Python
|
test/test_constant_agent.py
|
CAVED123/Tensorforce
|
823177f77f9047b1e71eccfffc08315ed1636878
|
[
"Apache-2.0"
] | 1
|
2019-10-18T17:36:28.000Z
|
2019-10-18T17:36:28.000Z
|
test/test_constant_agent.py
|
CAVED123/Tensorforce
|
823177f77f9047b1e71eccfffc08315ed1636878
|
[
"Apache-2.0"
] | null | null | null |
test/test_constant_agent.py
|
CAVED123/Tensorforce
|
823177f77f9047b1e71eccfffc08315ed1636878
|
[
"Apache-2.0"
] | 1
|
2020-07-13T03:00:34.000Z
|
2020-07-13T03:00:34.000Z
|
# Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
from test.unittest_agent import UnittestAgent
class TestConstantAgent(UnittestAgent, unittest.TestCase):
num_episodes = 2
config = dict(type='constant')
has_experience = False
has_update = False
| 33.888889
| 80
| 0.695082
|
a81a6e5e122804f6b0826931706c6d8a3a069869
| 3,263
|
py
|
Python
|
twkit/visualization/tweetsperuser.py
|
evaperon/twAwler
|
8e9f2064cad846177ed6547b9f56f053226a2d5e
|
[
"Apache-2.0"
] | 5
|
2018-12-06T16:14:14.000Z
|
2020-05-22T07:36:45.000Z
|
twkit/visualization/tweetsperuser.py
|
evaperon/twAwler
|
8e9f2064cad846177ed6547b9f56f053226a2d5e
|
[
"Apache-2.0"
] | null | null | null |
twkit/visualization/tweetsperuser.py
|
evaperon/twAwler
|
8e9f2064cad846177ed6547b9f56f053226a2d5e
|
[
"Apache-2.0"
] | 3
|
2020-04-20T07:20:18.000Z
|
2021-08-19T17:31:38.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
###########################################
# (c) 2016-2020 Polyvios Pratikakis
# polyvios@ics.forth.gr
###########################################
'''
Tool for generating tweet distributions
Flag "--vectorized" works with uservectors only, subset of users, but faster
Otherwise, counts everything (may take a while)
'''
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from progress.bar import Bar
from twkit.utils import *
import optparse
parser = optparse.OptionParser()
parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False, help='List names of tracked users')
parser.add_option('--vectorized', action='store_true', dest='vectorized', default=False, help='List only vectorized users.')
parser.add_option('-g', '--greek', action='store_true', dest='greek', default=False, help='List only greek users.')
(options, args) = parser.parse_args()
db,api = init_state(use_cache=False)
twittercounts = []
crawlercounts = []
if options.vectorized:
vectors = db.uservectors.find({}, {'tweet_count': 1, 'seen_total': 1})
if options.verbose:
vectors = Bar("Processing:", max=vectors.count(), suffix = '%(index)d/%(max)d - %(eta_td)s').iter(vectors)
for v in vectors:
twittercounts.append(v['tweet_count'])
crawlercounts.append(v['seen_total'])
elif options.greek:
greeks = db.greeks.find().batch_size(1)
if options.verbose:
greeks = Bar("Processing:", max=greeks.count(), suffix = '%(index)d/%(max)d - %(eta_td)s').iter(greeks)
for g in greeks:
cursor = db.tweets.aggregate([
{ '$match': { 'user.id' : g['id'] } },
{ '$group':
{ '_id': '$user.id',
'count': {'$sum': 1}
}
}],
allowDiskUse=True
)
for c in cursor:
who = c['_id']
whou = lookup_user(db, who)
if whou is None:
print("missing user: {}".format(who))
continue
crawlercounts.append(c['count'])
twittercounts.append(whou.get('statuses_count', 0))
else:
cursor = db.tweets.aggregate([
{ '$match': { 'user.id' : {'$gt': 0} } },
{ '$group':
{ '_id': '$user.id',
'count': {'$sum': 1}
}
}],
allowDiskUse=True
)
if options.verbose:
cursor = Bar("Processing:", suffix = '%(index)d/%(max)d - %(eta_td)s').iter(cursor)
for c in cursor:
who = c['_id']
whou = lookup_user(db, who)
if whou is None:
print("missing user: {}".format(who))
continue
crawlercounts.append(c['count'])
twittercounts.append(whou.get('statuses_count', 0))
with open("twittercounts.txt", "w") as f:
for cnt in twittercounts:
f.write("{}\n".format(cnt))
with open("crawlercounts.txt", "w") as f:
for cnt in crawlercounts:
f.write("{}\n".format(cnt))
sorted_tw = np.sort(twittercounts)
sorted_cr = np.sort(crawlercounts)
plt.xscale('log')
plt.yscale('log')
twyvals=np.arange(len(sorted_tw))/float(len(sorted_tw)-1)
cryvals=np.arange(len(sorted_cr))/float(len(sorted_cr)-1)
plt.plot(sorted_tw, 1-twyvals, label='twitter')
plt.plot(sorted_cr, 1-cryvals, label='crawled')
plt.xlabel('Tweets')
plt.ylabel('PMF')
plt.legend(loc=3)
plt.savefig('tweetsperuser.png', bbox_inches='tight', pad_inches=0)
| 31.375
| 124
| 0.63224
|
3e36b25e169e7860b1dab4873cd6de78cc1f4471
| 484
|
py
|
Python
|
plotly/validators/scatterternary/marker/colorbar/titlefont/_size.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 2
|
2020-03-24T11:41:14.000Z
|
2021-01-14T07:59:43.000Z
|
plotly/validators/scatterternary/marker/colorbar/titlefont/_size.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | null | null | null |
plotly/validators/scatterternary/marker/colorbar/titlefont/_size.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 4
|
2019-06-03T14:49:12.000Z
|
2022-01-06T01:05:12.000Z
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='size',
parent_name='scatterternary.marker.colorbar.titlefont',
**kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='colorbars',
min=1,
role='style',
**kwargs
)
| 24.2
| 66
| 0.595041
|
f19845461f725a13ff2f4608436120e9dc301085
| 2,092
|
py
|
Python
|
fpb/base/common.py
|
cloudmercato/python-fpb
|
5da2773e3bb7068bfb9cde920c5b754eecdf342e
|
[
"BSD-3-Clause"
] | 3
|
2021-12-29T20:39:53.000Z
|
2021-12-29T23:53:02.000Z
|
fpb/base/common.py
|
cloudspectatordevelopment/python-fpb
|
5da2773e3bb7068bfb9cde920c5b754eecdf342e
|
[
"BSD-3-Clause"
] | null | null | null |
fpb/base/common.py
|
cloudspectatordevelopment/python-fpb
|
5da2773e3bb7068bfb9cde920c5b754eecdf342e
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import time
import math
import random
import logging
logger = logging.getLogger('fpb.runner')
class TypeTooSmall(Exception):
pass
class Runner:
"""Base class for all runner."""
random = random
math = math
extra_data = {}
_dtype = None
TypeTooSmall = TypeTooSmall
def __init__(self, **kwargs):
for key, value in kwargs.items():
logger.debug("Set '%s' = '%s'", key, value)
setattr(self, key, value)
def get_dtype(self):
"""Used by some framework"""
return self._dtype
def get_prepare_kwargs(self):
return {} # noqa
def get_run_kwargs(self):
return {} # noqa
def check_output(self, output):
pass
def start(self):
logger.info("Prepare test")
prepare_kwargs = self.get_prepare_kwargs()
logger.debug("Prepare kwargs: %s", prepare_kwargs)
prepare_kwargs['dtype'] = self.get_dtype()
data = self.prepare(**prepare_kwargs)
input_size = sys.getsizeof(data)
logger.debug("Prepare done")
logger.info("Run test")
run_kwargs = self.get_run_kwargs()
logger.debug("Run kwargs: %s", run_kwargs)
start_time = time.time()
output = self.run(data, **run_kwargs)
logger.debug("Output: %s", output)
end_time = time.time()
logger.debug("Run done")
self.tear_down()
self.check_output(output)
return (
(end_time - start_time) * 1000,
input_size,
)
def prepare(self, **kwargs):
msg = "Data preparation isn't set." # noqa
raise NotImplementedError(msg)
def tear_down(self):
pass
def run(self, data, **kwargs):
msg = "Run isn't set." # noqa
raise NotImplementedError(msg)
class Runner1dMixin:
def get_prepare_kwargs(self):
return {
'size': self.size,
}
class Runner2dMixin:
def get_prepare_kwargs(self):
return {
'size': self.size,
'size_y': self.size_y,
}
| 22.73913
| 58
| 0.57935
|
53cb7bbdb6b9b3ebdc82991237df334f1034184d
| 3,665
|
py
|
Python
|
run_afferent_stimulation.py
|
FormentoEmanuele/BioS
|
81219019ef47f1b6515791e8cdd4e0c1e2a81e3d
|
[
"MIT"
] | 1
|
2021-02-18T17:06:25.000Z
|
2021-02-18T17:06:25.000Z
|
run_afferent_stimulation.py
|
FormentoEmanuele/BioS
|
81219019ef47f1b6515791e8cdd4e0c1e2a81e3d
|
[
"MIT"
] | null | null | null |
run_afferent_stimulation.py
|
FormentoEmanuele/BioS
|
81219019ef47f1b6515791e8cdd4e0c1e2a81e3d
|
[
"MIT"
] | 1
|
2020-07-19T07:28:28.000Z
|
2020-07-19T07:28:28.000Z
|
import argparse
from simulations.afferent_stimulation import AfferentStimulation
from simulations.parameters import StimParameters, FiberParameters, FIBERS_LENGTH_UM, PROPAGATION_DELAY_MS, \
FASCICLE_RADIUS_UM, MEAN_DIAMETER_UM, STD_DIAMETER
def main():
""" Main script running an AfferentStimulation simulation with cli defined input parameters.
The results from this simulation are saved in the results folder (see Simulation._results_folder).
"""
parser = argparse.ArgumentParser(description="Run a AfferentStimulation simulation.")
parser.add_argument("-n", "--n-fibers", help="number of fibers", type=int, default=100)
parser.add_argument("-a", "--stim-amp", help="Simulation amplitude (mA)", type=float, default=-0.045)
parser.add_argument("--min-stim-amp", help="Simulation min amplitude (mA)", type=float, default=-0.01)
parser.add_argument("-f", "--stim-freq", help="Stimulation frequency (Hz)", type=int, default=40)
parser.add_argument("-p", "--pulse-width", help="Stimulation pulse width (us)", type=float, default=50.)
parser.add_argument("-b", "--bios", help="flag to use bios burst stimulation", action="store_true")
parser.add_argument("--burst-frequency", help="Stimulation frequency within a bios burst (Hz)", type=float,
default=8000.)
parser.add_argument("-d", "--burst-duration", help="Bios burst duration (ms)", type=float, default=20.)
parser.add_argument("-t", "--sim-time", help="Simulation time (ms)", type=int, default=500)
parser.add_argument("--sim-name", help="String to append at the end of the result files", type=str, default="")
parser.add_argument("--plot-response-stats", help="Flag to plot the stimulation response statistics",
action="store_true")
parser.add_argument("-w", "--plot-window", help="Flag to plot a specific window of data", action="store_true")
parser.add_argument("--plot-window-duration", help="Duration in ms of the window to plot", type=float, default=150.)
parser.add_argument("--non-blocking-plots", help="Flag to use non-blocking plots", action="store_true")
parser.add_argument("--results-folder", help="Path to folder where the results are saved", type=str, default=None)
args = parser.parse_args()
stim_parameters = StimParameters(
frequency=args.stim_freq,
pulse_width_ms=args.pulse_width / 1000.,
amplitude_ma=args.stim_amp,
bios=args.bios,
burst_frequency=args.burst_frequency,
burst_duration_ms=args.burst_duration,
min_amplitude_ma=args.min_stim_amp,
)
fiber_parameters = FiberParameters(
n_fibers=args.n_fibers,
length_um=FIBERS_LENGTH_UM,
mean_diameter_um=MEAN_DIAMETER_UM,
std_diameter=STD_DIAMETER,
propagation_delay_ms=PROPAGATION_DELAY_MS,
fascicle_radius_um=FASCICLE_RADIUS_UM
)
simulation = AfferentStimulation(fiber_parameters, stim_parameters, args.sim_time)
if args.results_folder is not None:
simulation.set_results_folder(args.results_folder)
simulation.run()
if args.plot_response_stats:
simulation.plot_stim_response_stats(args.sim_name, block=False)
block = not args.non_blocking_plots
simulation.plot(args.sim_name, block)
if args.plot_window:
start_from_stim_event_n = 3
start_ms = AfferentStimulation.START_STIM_TIME_MS + start_from_stim_event_n * (1000./args.stim_freq) - 1
simulation.plot(args.sim_name, block, window_ms=[start_ms, start_ms+args.plot_window_duration])
simulation.save_results(args.sim_name)
if __name__ == '__main__':
main()
| 54.701493
| 120
| 0.718417
|
482b5921c1699087510a15dc01b315078c2fd70e
| 2,061
|
py
|
Python
|
miniad/tensor.py
|
basicv8vc/miniautodiff
|
1ce038276df45760ab3cb7875b35b46d5ee0b27f
|
[
"MIT"
] | 1
|
2021-09-01T09:05:14.000Z
|
2021-09-01T09:05:14.000Z
|
miniad/tensor.py
|
shaoyf9/miniautodiff
|
1ce038276df45760ab3cb7875b35b46d5ee0b27f
|
[
"MIT"
] | null | null | null |
miniad/tensor.py
|
shaoyf9/miniautodiff
|
1ce038276df45760ab3cb7875b35b46d5ee0b27f
|
[
"MIT"
] | 1
|
2021-08-10T09:29:35.000Z
|
2021-08-10T09:29:35.000Z
|
# encoding: utf-8
import numpy as np
import miniad.functional as F
class Tensor:
def __init__(self, data, children=None, grad_fn=None):
if isinstance(data, np.ndarray):
self.data = data
if isinstance(data, (list, tuple)):
self.data = np.array(data)
else:
self.data = data # scalar is ok
self.data = data
self.grad = None
self.children = children
self.grad_fn = grad_fn
self.save_for_backward = None # something for backward, like exponent in power function, or Tensor + scalar
def __add__(self, other):
out = F.add(self, other)
return out
def __radd__(self, other):
out = F.add(self, other)
return out
def __minus__(self, other):
out = F.minus(self, other)
return out
def __rminus__(self, other):
out = F.minus(self, other)
return out
def __mul__(self, other):
out = F.multiply(self, other)
return out
def __rmul__(self, other):
out = F.multiply(self, other)
return out
def __div__(self, other):
out = F.divison(self, other)
return out
def __rdiv__(self, other):
out = F.divison(self, other)
return out
def backward(self, out_grad=None):
'''Backpropagtaion
Only root node can call backward function
Parameters
----------
out_grad: float
'''
self.grad = np.ones_like(self.data) if self.grad is None else self.grad
self.grad = self.grad if out_grad is None else self.grad + np.ones_like(
self.data) * out_grad
candidates = [self]
while candidates:
curr_node = candidates.pop()
if curr_node.grad_fn is not None:
curr_node.children = curr_node.grad_fn(curr_node,
*curr_node.children)
for item in curr_node.children:
candidates.append(item)
| 25.7625
| 116
| 0.558467
|
b7499b06c23802cc8a7671faa376cb928a221108
| 859
|
py
|
Python
|
conu/apidefs/__init__.py
|
QuirianCordova/reto-ejercicio3
|
56d006fd2f5feeb6d88891cf4d2f90ba3bc60dd5
|
[
"MIT"
] | null | null | null |
conu/apidefs/__init__.py
|
QuirianCordova/reto-ejercicio3
|
56d006fd2f5feeb6d88891cf4d2f90ba3bc60dd5
|
[
"MIT"
] | null | null | null |
conu/apidefs/__init__.py
|
QuirianCordova/reto-ejercicio3
|
56d006fd2f5feeb6d88891cf4d2f90ba3bc60dd5
|
[
"MIT"
] | 1
|
2019-11-14T20:27:38.000Z
|
2019-11-14T20:27:38.000Z
|
# -*- coding: utf-8 -*-
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
This python module contains definitions in form of abstract classes and classes with generic code
which works across all backends. Abstract definitions need to be implemented by backends.
"""
| 40.904762
| 97
| 0.76135
|
cdb74d515efd5c4af0603718e8d4c1a59c3c5601
| 1,278
|
py
|
Python
|
api/stress_test.py
|
stevenman42/architus
|
86f0c065172e27ae02603d6ef4e7d4eedfb6dbe4
|
[
"MIT"
] | null | null | null |
api/stress_test.py
|
stevenman42/architus
|
86f0c065172e27ae02603d6ef4e7d4eedfb6dbe4
|
[
"MIT"
] | null | null | null |
api/stress_test.py
|
stevenman42/architus
|
86f0c065172e27ae02603d6ef4e7d4eedfb6dbe4
|
[
"MIT"
] | null | null | null |
from threading import Thread
from collections import Counter
import requests
from datetime import datetime
import time
results = []
def get(url):
global results
now = datetime.now()
r = requests.get(url)
results.append((r, (datetime.now() - now).total_seconds(), now))
if __name__ == '__main__':
num = 150
rate_ps = 10
# url = 'https://api.archit.us:8000/stats/436189230390050826/messagecount'
url = 'https://api.archit.us:8000/guild_count'
# url = 'http://localhost:5000/guild_count'
print(f'{url} at {rate_ps} r/s')
now = datetime.now()
threads = [Thread(target=get, args=(url,)) for _ in range(num)]
for thread in threads:
thread.start()
time.sleep(1 / rate_ps)
for thread in threads:
thread.join()
total_time = 0
codes = []
for result in sorted(results, key=lambda x: x[2]):
print(f"{result[0].status_code} {result[1]:.2f} {'*' * round(result[1] * 10)}")
total_time += result[1]
codes.append(result[0].status_code)
print('----------------')
for code, count in Counter(codes).items():
print(f"{code}s: {count}")
print(f"Avg time: {total_time/num:.2f}s")
print(f"Total time: {(datetime.now() - now).total_seconds()}s")
| 27.782609
| 87
| 0.611111
|
3a64e7e96526868e21ac7c3c3955e73a877e87c2
| 774
|
py
|
Python
|
gquant/plugin_nodes/transform/sortNode.py
|
philtrade/gQuant
|
08b2a82a257c234b92f097b925f25cab16fd0926
|
[
"Apache-2.0"
] | 2
|
2021-08-13T03:02:22.000Z
|
2022-03-13T15:00:01.000Z
|
gquant/plugin_nodes/transform/sortNode.py
|
philtrade/gQuant
|
08b2a82a257c234b92f097b925f25cab16fd0926
|
[
"Apache-2.0"
] | 3
|
2020-10-06T16:07:34.000Z
|
2021-04-28T20:30:14.000Z
|
gquant/plugin_nodes/transform/sortNode.py
|
philtrade/gQuant
|
08b2a82a257c234b92f097b925f25cab16fd0926
|
[
"Apache-2.0"
] | 1
|
2021-03-22T19:54:38.000Z
|
2021-03-22T19:54:38.000Z
|
from gquant.dataframe_flow import Node
class SortNode(Node):
def process(self, inputs):
"""
Sort the input frames based on a list of columns, which are defined
in the `keys` of the node's conf
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
input_df = inputs[0]
return input_df.sort_values(self.conf['keys'])
def columns_setup(self):
self.delayed_process = True
if __name__ == "__main__":
from gquant.dataloader.csvStockLoader import CsvStockLoader
loader = CsvStockLoader("id0", {}, True, False)
df = loader([])
sf = SortNode("id2", {"keys": ["asset", 'datetime']})
df2 = sf([df])
| 23.454545
| 75
| 0.578811
|
804e59e38a9e380d0c909d33251434ed181415eb
| 2,799
|
py
|
Python
|
Subscription/migrations/0001_initial.py
|
xzengCB/FreeFishMaster
|
14418e108d1a25c56ff2e9801f4256f05f154c67
|
[
"MIT"
] | null | null | null |
Subscription/migrations/0001_initial.py
|
xzengCB/FreeFishMaster
|
14418e108d1a25c56ff2e9801f4256f05f154c67
|
[
"MIT"
] | null | null | null |
Subscription/migrations/0001_initial.py
|
xzengCB/FreeFishMaster
|
14418e108d1a25c56ff2e9801f4256f05f154c67
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-07-20 09:06
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Analysis',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('createdDT', models.DateTimeField(auto_now_add=True)),
('modifiedDT', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='AnalysisItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('createdDT', models.DateTimeField(auto_now_add=True)),
('modifiedDT', models.DateTimeField(auto_now_add=True)),
('analysisID', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Subscription.Analysis')),
],
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('description', models.TextField()),
('price', models.FloatField()),
('link', models.CharField(max_length=512)),
('imgLink', models.CharField(max_length=512)),
('modifiedDT', models.DateTimeField(auto_now_add=True)),
('createdDT', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('keywords', models.CharField(max_length=200)),
('priceLow', models.FloatField(default=0)),
('priceHigh', models.FloatField()),
('createdDT', models.DateTimeField(auto_now_add=True)),
('modifiedDT', models.DateTimeField(auto_now_add=True)),
],
),
migrations.AddField(
model_name='analysisitem',
name='itemID',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Subscription.Item'),
),
migrations.AddField(
model_name='analysis',
name='subscriptionID',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Subscription.Subscription'),
),
]
| 40.565217
| 123
| 0.578778
|
32d457b9418eed87899e5525b4364777248329ef
| 5,965
|
py
|
Python
|
tests/data/azure/storage.py
|
ryohare/cartography
|
3829a97289bc2b28d041e6d7860678e2375dcee1
|
[
"Apache-2.0"
] | 2,322
|
2019-03-02T01:07:20.000Z
|
2022-03-31T20:39:12.000Z
|
tests/data/azure/storage.py
|
ryohare/cartography
|
3829a97289bc2b28d041e6d7860678e2375dcee1
|
[
"Apache-2.0"
] | 462
|
2019-03-07T18:38:11.000Z
|
2022-03-31T14:55:20.000Z
|
tests/data/azure/storage.py
|
ryohare/cartography
|
3829a97289bc2b28d041e6d7860678e2375dcee1
|
[
"Apache-2.0"
] | 246
|
2019-03-03T02:39:23.000Z
|
2022-02-24T09:46:38.000Z
|
DESCRIBE_STORAGE_ACCOUNTS = [
{
"id": "/subscriptions/00-00-00-00/resourceGroups/TestRG/providers/Microsoft.Storage/storageAccounts/testSG1",
"kind": "Storage",
"location": "Central India",
"name": "testSG1",
"is_hns_enabled": True,
"creation_time": "2017-05-24T13:24:47.818801Z",
"primary_location": "Central India",
"provisioning_state": "Succeeded",
"secondary_location": "West US 2",
"status_of_primary": "available",
"status_of_secondary": "available",
"enable_https_traffic_only": False,
"type": "Microsoft.Storage/storageAccounts",
},
{
"id": "/subscriptions/00-00-00-00/resourceGroups/TestRG/providers/Microsoft.Storage/storageAccounts/testSG2",
"kind": "Storage",
"location": "Central India",
"name": "testSG2",
"is_hns_enabled": True,
"creation_time": "2017-05-24T13:24:47.818801Z",
"primary_location": "Central India",
"provisioning_state": "Succeeded",
"secondary_location": "West US 2",
"status_of_primary": "available",
"status_of_secondary": "available",
"enable_https_traffic_only": False,
"type": "Microsoft.Storage/storageAccounts",
},
]
sa1 = "/subscriptions/00-00-00-00/resourceGroups/TestRG/providers/Microsoft.Storage/storageAccounts/testSG1"
sa2 = "/subscriptions/00-00-00-00/resourceGroups/TestRG/providers/Microsoft.Storage/storageAccounts/testSG2"
DESCRIBE_QUEUE_SERVICES = [
{
"id": sa1 + "/queueServices/QS1",
"name": "QS1",
"type": "Microsoft.Storage/storageAccounts/queueServices",
"storage_account_id": sa1,
},
{
"id": sa2 + "/queueServices/QS2",
"name": "QS2",
"type": "Microsoft.Storage/storageAccounts/queueServices",
"storage_account_id": sa2,
},
]
DESCRIBE_TABLE_SERVICES = [
{
"id": sa1 + "/tableServices/TS1",
"name": "TS1",
"type": "Microsoft.Storage/storageAccounts/tableServices",
"storage_account_id": sa1,
},
{
"id": sa2 + "/tableServices/TS2",
"name": "TS2",
"type": "Microsoft.Storage/storageAccounts/tableServices",
"storage_account_id": sa2,
},
]
DESCRIBE_FILE_SERVICES = [
{
"id": sa1 + "/fileServices/FS1",
"name": "FS1",
"type": "Microsoft.Storage/storageAccounts/fileServices",
"storage_account_id": sa1,
},
{
"id": sa2 + "/fileServices/FS2",
"name": "FS2",
"type": "Microsoft.Storage/storageAccounts/fileServices",
"storage_account_id": sa2,
},
]
DESCRIBE_BLOB_SERVICES = [
{
"id": sa1 + "/blobServices/BS1",
"name": "BS1",
"type": "Microsoft.Storage/storageAccounts/blobServices",
"storage_account_id": sa1,
},
{
"id": sa2 + "/blobServices/BS2",
"name": "BS2",
"type": "Microsoft.Storage/storageAccounts/blobServices",
"storage_account_id": sa2,
},
]
DESCRIBE_QUEUE = [
{
"id": sa1 + "/queueServices/QS1/queues/queue1",
"name": "queue1",
"type": "Microsoft.Storage/storageAccounts/queueServices/queues",
"service_id": sa1 + "/queueServices/QS1",
},
{
"id": sa2 + "/queueServices/QS2/queues/queue2",
"name": "queue2",
"type": "Microsoft.Storage/storageAccounts/queueServices/queues",
"service_id": sa2 + "/queueServices/QS2",
},
]
DESCRIBE_TABLES = [
{
"id": sa1 + "/tableServices/TS1/tables/table1",
"name": "table1",
"type": "Microsoft.Storage/storageAccounts/tableServices/tables",
"service_id": sa1 + "/tableServices/TS1",
},
{
"id": sa2 + "/tableServices/TS2/tables/table2",
"name": "table2",
"type": "Microsoft.Storage/storageAccounts/tableServices/tables",
"service_id": sa2 + "/tableServices/TS2",
},
]
DESCRIBE_FILE_SHARES = [
{
"id": sa1 + "/fileServices/FS1/shares/share1",
"name": "share1",
"type": "Microsoft.Storage/storageAccounts/fileServices/shares",
"etag": "\"0x8D589847D51C7DE\"",
"last_modified_time": "2019-05-14T08:20:47Z",
"share_quota": 1024,
"version": "1234567890",
"deleted": True,
"deleted_time": "2019-12-14T08:20:47Z",
"remaining_retention_days": 30,
"service_id": sa1 + "/fileServices/FS1",
},
{
"id": sa2 + "/fileServices/FS2/shares/share2",
"name": "share2",
"type": "Microsoft.Storage/storageAccounts/fileServices/shares",
"etag": "\"0x8D589847D51C7DE\"",
"last_modified_time": "2019-05-14T08:20:47Z",
"share_quota": 1024,
"version": "1234567890",
"remaining_retention_days": 30,
"service_id": sa2 + "/fileServices/FS2",
},
]
DESCRIBE_BLOB_CONTAINERS = [
{
"id": sa1 + "/blobServices/BS1/containers/container1",
"name": "container1",
"type": "Microsoft.Storage/storageAccounts/blobServices/containers",
"etag": "\"0x8D589847D51C7DE\"",
"public_access": "Container",
"lease_status": "Unlocked",
"lease_state": "Available",
"last_modified_time": "2018-03-14T08:20:47Z",
"has_immutability_policy": False,
"has_legal_hold": False,
"service_id": sa1 + "/blobServices/BS1",
},
{
"id": sa2 + "/blobServices/BS2/containers/container2",
"name": "container2",
"type": "Microsoft.Storage/storageAccounts/blobServices/containers",
"etag": "\"0x8D589847D51C7DE\"",
"public_access": "Container",
"lease_status": "Unlocked",
"lease_state": "Available",
"last_modified_time": "2018-03-14T08:20:47Z",
"has_immutability_policy": False,
"has_legal_hold": False,
"service_id": sa2 + "/blobServices/BS2",
},
]
| 31.394737
| 117
| 0.591282
|
732d7120366c2308ced81fe74b798a6b13114103
| 1,201
|
py
|
Python
|
exercise_04/exercise_code/networks/optimizer.py
|
stanley-chang/I2DL
|
78740460e1f52ce7643358fc548281f1bbe73a42
|
[
"RSA-MD"
] | null | null | null |
exercise_04/exercise_code/networks/optimizer.py
|
stanley-chang/I2DL
|
78740460e1f52ce7643358fc548281f1bbe73a42
|
[
"RSA-MD"
] | null | null | null |
exercise_04/exercise_code/networks/optimizer.py
|
stanley-chang/I2DL
|
78740460e1f52ce7643358fc548281f1bbe73a42
|
[
"RSA-MD"
] | null | null | null |
# Naive Optimizer using full batch gradient descent
import os
import pickle
import numpy as np
from exercise_code.networks.linear_model import *
class Optimizer(object):
def __init__ (self, model, learning_rate=5e-5):
self.model = model
self.lr = learning_rate
def step(self, dw):
"""
:param dw: [D+1,1] array gradient of loss w.r.t weights of your linear model
:return weight: [D+1,1] updated weight after one step of gradient descent
"""
weight = self.model.W
#########################################################################
# TODO: #
# Implement the gradient descent for 1 step to compute the weight #
#########################################################################
weight = weight - self.lr*dw
#########################################################################
# END OF YOUR CODE #
#########################################################################
self.model.W = weight
| 42.892857
| 85
| 0.381349
|
62de31c9c809bccc6c280d2428c51549bf10789d
| 492
|
py
|
Python
|
ex03.py
|
Rajab322/lpthw
|
bde26ca21bd1c72807c93fff15a45a1154ba59d7
|
[
"MIT"
] | 329
|
2017-02-25T15:06:58.000Z
|
2022-03-31T18:22:21.000Z
|
ex3.py
|
dkorzhevin/learn-python3-thw-code
|
bea1e954d52ed845c3ade7ed87d7bef7de1651ad
|
[
"MIT"
] | 10
|
2017-02-26T13:55:38.000Z
|
2020-02-20T06:10:26.000Z
|
ex3.py
|
dkorzhevin/learn-python3-thw-code
|
bea1e954d52ed845c3ade7ed87d7bef7de1651ad
|
[
"MIT"
] | 180
|
2017-02-25T20:42:03.000Z
|
2022-02-09T05:21:40.000Z
|
print("I will now count my chickens:")
print("Hens", 25 + 30 / 6)
print("Roosters", 100 - 25 * 3 % 4)
print("Now I will count the eggs:")
print(3 + 2 + 1 - 5 + 4 % 2 - 1 / 4 + 6)
print("Is it true that 3 + 2 < 5 - 7?")
print(3 + 2 < 5 - 7)
print("What is 3 + 2?", 3 + 2)
print("What is 5 - 7?", 5 - 7)
print("Oh, that's why it's False.")
print("How about some more.")
print("Is it greater?", 5 > -2)
print("Is it greater or equal?", 5 >= -2)
print("Is it less or equal?", 5 <= -2)
| 18.923077
| 41
| 0.54878
|
5b5e55e1c88290786e6e42263383014bfa60d5e5
| 21,798
|
py
|
Python
|
app/eval/dtree_parse.py
|
ForomePlatform/anfisa
|
c5bf9ad3200fc4e9cf7be21648b7ee5beb3eb35c
|
[
"Apache-2.0"
] | 8
|
2019-03-26T16:07:46.000Z
|
2021-12-30T13:38:06.000Z
|
app/eval/dtree_parse.py
|
ForomePlatform/anfisa
|
c5bf9ad3200fc4e9cf7be21648b7ee5beb3eb35c
|
[
"Apache-2.0"
] | 13
|
2018-11-07T19:37:20.000Z
|
2022-02-21T17:11:45.000Z
|
app/eval/dtree_parse.py
|
ForomePlatform/anfisa
|
c5bf9ad3200fc4e9cf7be21648b7ee5beb3eb35c
|
[
"Apache-2.0"
] | 15
|
2018-10-16T08:15:11.000Z
|
2022-02-21T14:07:29.000Z
|
# Copyright (c) 2019. Partners HealthCare and other members of
# Forome Association
#
# Developed by Sergey Trifonov based on contributions by Joel Krier,
# Michael Bouzinier, Shamil Sunyaev and other members of Division of
# Genetics, Brigham and Women's Hospital
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys, ast
from hashlib import md5
from forome_tools.log_err import logException
from .code_works import normalizeCode
from .code_parse import parseCodeByPortions
#===============================================
class TreeFragment:
def __init__(self, level, tp, line_diap,
base_instr = None, err_info = None, decision = None,
cond_data = None, cond_atoms = None, label = None):
self.mLevel = level
self.mType = tp
self.mLineDiap = line_diap
self.mBaseInstr = base_instr
self.mErrInfo = err_info
self.mCondData = cond_data
self.mDecision = decision
self.mCondAtoms = cond_atoms if cond_atoms is not None else []
self.mLabel = label
def setLineDiap(self, base_diap, full_diap):
self.mBaseLineDiap = base_diap
self.mFullLineDiap = full_diap
def getLevel(self):
return self.mLevel
def getInstrType(self):
return self.mType
def getBaseInstr(self):
return self.mBaseInstr
def getLineDiap(self):
return self.mLineDiap
def getErrorInfo(self):
return self.mErrInfo
def getCondData(self):
return self.mCondData
def getDecision(self):
return self.mDecision
def getLabel(self):
return self.mLabel
def getCondAtoms(self):
return self.mCondAtoms
#===============================================
class CondAtomInfo:
def __init__(self, cond_data, location, warn_msg = None):
self.mCondData = cond_data
self.mLoc = location
self.mErrorMsg = warn_msg
def setError(self, error_msg):
self.mErrorMsg = error_msg
def getLoc(self):
return self.mLoc
def getCondData(self):
return self.mCondData
def resetCondData(self, values):
self.mCondData[:] = values
def getErrorMsg(self):
return self.mErrorMsg
#===============================================
class ParsedDTree:
def __init__(self, eval_space, dtree_code):
self.mEvalSpace = eval_space
self.mFragments = []
self.mCode = normalizeCode(dtree_code)
self.mDummyLinesReg = set()
self.mLabels = dict()
self.mFirstError = None
hash_h = md5()
code_lines = self.mCode.splitlines()
for parsed_d, err_info, line_diap in parseCodeByPortions(
code_lines, self.mDummyLinesReg):
fragments = []
if err_info is None:
assert len(parsed_d.body) == 1
self.mError = None
self.mCurLineDiap = line_diap
try:
instr_d = parsed_d.body[0]
if isinstance(instr_d, ast.Return):
fragments.append(TreeFragment(0, "Return", line_diap,
decision = self.getReturnValue(instr_d)))
elif (isinstance(instr_d, ast.Expr)
and isinstance(instr_d.value, ast.Call)):
fragments.append(self.processCall(instr_d.value,
len(self.mFragments)))
elif isinstance(instr_d, ast.If):
fragments += self.processIf(instr_d)
else:
self.errorIt(instr_d,
"Instructon must be of if-type")
for frag_h in fragments:
line_from, line_to = frag_h.getLineDiap()
for line_no in range(line_from, line_to):
if line_no not in self.mDummyLinesReg:
hash_h.update(bytes(code_lines[line_no - 1],
"utf-8"))
hash_h.update(b'\n')
except Exception as err:
if self.mError is None:
logException("Exception on parse tree code")
raise err
err_info = self.mError
if err_info is not None:
fragments = [TreeFragment(0, "Error", line_diap,
err_info = err_info)]
if self.mFirstError is None:
self.mFirstError = err_info
self.mFragments += fragments
self.mHashCode = hash_h.hexdigest()
self.mCurLineDiap = None
self.mError = None
self.mCondAtoms = None
for frag_h in self.mFragments:
self.mError = frag_h.getErrorInfo()
if self.mError is not None:
break
if self.mError is None:
for idx, frag_h in enumerate(self.mFragments[:-1]):
if frag_h.getLevel() == 0 and frag_h.getDecision() is not None:
err_info = ("Final instruction not in final place",
frag_h.getLineDiap()[0], 0)
self.mFragments[idx] = TreeFragment(0, "Error",
frag_h.getLineDiap(), err_info = err_info)
self.mError = err_info
break
if self.mError is None:
last_frag_h = self.mFragments[-1]
if last_frag_h.getLevel() > 0 or last_frag_h.getDecision() is None:
err_info = ("Final instruction must return True or False",
last_frag_h.getLineDiap()[0], 0)
self.mFragments[-1] = TreeFragment(0, "Error",
frag_h.getLineDiap(), err_info = err_info)
self.mError = err_info
if self.mFirstError is None:
self.mFirstError = self.mError
def getError(self):
return self.mFirstError
def getTreeCode(self):
return self.mCode
def getEvalSpace(self):
return self.mEvalSpace
def getFragments(self):
return self.mFragments
def getHashCode(self):
return self.mHashCode
def isLineIsDummy(self, line_no):
return line_no in self.mDummyLinesReg
def errorIt(self, it, msg_text):
self.mError = (msg_text,
it.lineno + self.mCurLineDiap[0] - 1, it.col_offset)
if self.mFirstError is None:
self.mFirstError = self.mError
raise RuntimeError()
def errorMsg(self, line_no, col_offset, msg_text):
self.mError = (msg_text,
line_no + self.mCurLineDiap[0] - 1, col_offset)
if self.mFirstError is None:
self.mFirstError = self.mError
raise RuntimeError()
def _regCondAtom(self, cond_data, it, it_name, warn_msg = None):
self.mCondAtoms.append(
CondAtomInfo(cond_data,
[it.lineno + self.mCurLineDiap[0] - 1,
it.col_offset,
it.col_offset + len(it_name)], warn_msg))
#===============================================
def processIf(self, instr_d):
self.mCondAtoms = []
cond_data = self._processCondition(instr_d.test)
if len(instr_d.orelse) > 0:
self.errorIt(instr_d.orelse[0],
"Else instruction is not supported")
line_from, line_to = self.mCurLineDiap
decision = self.getSingleReturnValue(instr_d.body)
line_decision = instr_d.body[0].lineno + line_from - 1
ret = [
TreeFragment(0, "If", (line_from, line_decision),
cond_atoms = self.mCondAtoms, cond_data = cond_data),
TreeFragment(1, "Return",
(line_decision, line_to), decision = decision)]
self.mCondAtoms = None
return ret
#===============================================
def processCall(self, instr, point_no):
assert isinstance(instr, ast.Call)
if instr.func.id != "label":
self.errorIt(instr, "Only label() function supported on top level")
if len(instr.args) != 1 or len(instr.keywords) != 0:
self.errorIt(instr, "Only one argument expected for label()")
if isinstance(instr.args[0], ast.Str):
label = instr.args[0].s
elif isinstance(instr.args[0], ast.Name):
label = instr.args[0].id
else:
self.errorIt(instr.args[0],
"String is expected as argument of label()")
if label in self.mLabels:
self.errorIt(instr, "Duplicate label %s" % label)
self.mLabels[label] = point_no
frag_h = TreeFragment(0, "Label", self.mCurLineDiap, label = label)
return frag_h
#===============================================
def getReturnValue(self, instr):
if isinstance(instr.value, ast.NameConstant):
if instr.value.value in (True, False):
return instr.value.value
self.errorIt(instr,
"Only boolean return (True/False) is expected here")
return None
#===============================================
def getSingleReturnValue(self, body):
assert len(body) >= 1
if len(body) > 1:
self.errorIt(body[1],
"Only one instruction is expected here")
instr = body[0]
if not isinstance(instr, ast.Return):
self.errorIt(instr, "Only return instruction is expected here")
return self.getReturnValue(instr)
#===============================================
def _processCondition(self, it):
if isinstance(it, ast.BoolOp):
if isinstance(it.op, ast.And):
seq = ["and"]
elif isinstance(it.op, ast.Or):
seq = ["or"]
else:
self.errorIt(it, "Logic operation not supported")
for val in it.values:
rep_el = self._processCondition(val)
if rep_el[0] == seq[0]:
seq += rep_el[1:]
else:
seq.append(rep_el)
return seq
if isinstance(it, ast.UnaryOp):
if not isinstance(it.op, ast.Not):
self.errorIt(it, "Unary operation not supported")
return ["not", self._processCondition(it.operand)]
if not isinstance(it, ast.Compare):
self.errorIt(it, "Comparison or logic operation expected")
if len(it.ops) == 1 and (isinstance(it.ops[0], ast.In)
or isinstance(it.ops[0], ast.NotIn)):
return self._processEnumInstr(it)
return self._processNumInstr(it)
#===============================================
def _processEnumInstr(self, it):
assert len(it.comparators) == 1
it_set = it.comparators[0]
if isinstance(it.ops[0], ast.NotIn):
op_mode = "NOT"
else:
assert isinstance(it.ops[0], ast.In)
op_mode = "OR"
if isinstance(it_set, ast.Call):
if (len(it_set.args) != 1 or len(it_set.keywords) > 0
or not it_set.func
or not isinstance(it_set.func, ast.Name)):
self.errorIt(it_set, "Complex call not supported")
if it_set.func.id == "all":
if op_mode == "NOT":
self.errorIt(it_set, "Complex call not supported")
op_mode = "AND"
it_set = it_set.args[0]
else:
self.errorIt(it_set,
"Only pseudo-function all is supported")
variants = self.processIdSet(it_set)
#if len(variants) == 0:
# self.errorIt(it_set, "Empty set")
if isinstance(it.left, ast.Name):
field_name = it.left.id
warn_msg = None
ret = ["enum", field_name, op_mode, variants]
if self.mEvalSpace is not None:
unit_h = self.mEvalSpace.getUnit(field_name)
if unit_h is None:
warn_msg = "Inactive enum field"
if op_mode == "NOT":
ret = []
else:
ret = [None]
else:
if not unit_h.isInDTrees():
self.errorIt(it.left,
"No support for field %s in decision trees"
% field_name)
elif unit_h.getUnitKind() == "func":
self.errorIt(it.left,
"Field %s should be used as function" % field_name)
if unit_h.getUnitKind() != "enum":
self.errorIt(it.left, "Improper enum field name: "
+ field_name)
self._regCondAtom(ret, it.left, it.left.id, warn_msg)
return ret
if isinstance(it.left, ast.Call):
field_name = it.left.func.id
func_args, warn_msg = dict(), None
ret = ["func", field_name, op_mode, variants, func_args]
if self.mEvalSpace is None:
# No parameters w/o eval space, parse only
del ret[2:]
else:
unit_h = self.mEvalSpace.getUnit(field_name)
if unit_h is None:
warn_msg = "Inactive function"
if op_mode == "NOT":
ret = []
else:
ret = [None]
elif unit_h.getUnitKind() != "func":
self.errorIt(it.left, "Improper functional field name: "
+ field_name)
else:
parameters = unit_h.getParameters()[:]
for it_arg in it.left.args:
if len(parameters) == 0:
self.errorIt(it_arg, "Extra argument of function")
func_args[parameters.pop(0)] = self.processJSonData(
it_arg)
for argval_it in it.left.keywords:
if argval_it.arg in func_args:
self.errorIt(argval_it.value,
"Argument %s duplicated" % argval_it.arg)
if argval_it.arg not in parameters:
self.errorIt(argval_it.value,
"Argument %s not expected" % argval_it.arg)
func_args[argval_it.arg] = self.processJSonData(
argval_it.value)
parameters.remove(argval_it.arg)
err_msg = unit_h.validateArgs(func_args)
if err_msg:
self.errorIt(it.left, err_msg)
self._regCondAtom(ret, it.left, it.left.func.id, warn_msg)
return ret
self.errorIt(it.left, "Name of field is expected")
return None
#===============================================
sNumOpTab = [
(ast.Lt, 1, False),
(ast.LtE, 1, True),
(ast.Eq, 0, True),
(ast.GtE, -1, True),
(ast.Gt, -1, False)]
@classmethod
def determineNumOp(cls, op):
for op_class, ord_mode, eq_mode in cls.sNumOpTab:
if isinstance(op, op_class):
return (ord_mode, eq_mode)
return None, None
def _processNumInstr(self, it):
op_modes = []
for op in it.ops:
if len(op_modes) > 1:
op_modes = None
break
if len(op_modes) > 0 and op_modes[0][0] == 0:
break
op_modes.append(self.determineNumOp(op))
if op_modes[-1][0] is None:
self.errorIt(it, "Operation not supported")
if op_modes is not None:
if ((len(op_modes) == 2 and op_modes[0][0] != op_modes[1][0])
or len(op_modes) > 2):
op_modes = None
if not op_modes:
self.errorIt(it, "Unexpected complexity of numeric comparison")
operands = [it.left] + it.comparators[:]
values = []
idx_fld = None
for idx, op in enumerate(operands):
if isinstance(op, ast.Name):
if idx_fld is None:
idx_fld = idx
else:
self.errorIt(op,
"Comparison of two fields not supported")
else:
values.append(self.processFloat(op))
if idx_fld is None:
self.errorIt(it, "Where is a field fo compare?")
field_node = operands[idx_fld]
field_name = field_node.id
bounds = [None, True, None, True]
ret = ["numeric", field_name, bounds]
warn_msg = None
if self.mEvalSpace is not None:
unit_h = self.mEvalSpace.getUnit(field_name)
if unit_h is None:
warn_msg = "Inactive numeric field"
ret = [None]
elif unit_h.getUnitKind() != "numeric":
self.errorIt(operands[idx_fld],
"Improper numeric field name: " + field_name)
if len(operands) == 3 and idx_fld != 1:
self.errorIt(it, "Too complex comparison")
assert len(values) == len(op_modes)
if op_modes[0][0] == 0:
assert len(op_modes) == 1 and len(values) == 1
bounds[0] = values[0]
bounds[2] = values[0]
else:
if op_modes[0][0] < 0:
values = values[::-1]
op_modes = op_modes[::-1]
idx_fld = 0 if idx_fld > 0 else 1
if idx_fld == 0:
assert len(values) == 1
bounds[2] = values[0]
bounds[3] = op_modes[0][1]
else:
bounds[0] = values[0]
bounds[1] = op_modes[0][1]
if len(values) > 1:
bounds[2] = values[1]
bounds[3] = op_modes[1][1]
if bounds[0] is not None and bounds[2] is not None:
if ((bounds[0] == bounds[2] and not (bounds[1] and bounds[3]))
or bounds[0] > bounds[2]):
self.errorIt(it, "Condition never success")
self._regCondAtom(ret, field_node, field_name, warn_msg)
return ret
#===============================================
def processInt(self, it):
if not isinstance(it, ast.Num) or not isinstance(it.n, int):
self.errorIt(it, "Integer is expected")
return it.n
#===============================================
def processFloat(self, it):
if not isinstance(it, ast.Num) or (
not isinstance(it.n, int) and not isinstance(it.n, float)):
self.errorIt(it, "Int or float is expected: %r" % it.n)
return it.n
#===============================================
def processIdSet(self, it_set):
if isinstance(it_set, ast.Dict) and len(it_set.keys) == 0:
return []
if not (isinstance(it_set, ast.List)
or isinstance(it_set, ast.Set)):
self.errorIt(it_set, "Set (or list) expected")
variants = []
for el in it_set.elts:
if isinstance(el, ast.Str):
val = el.s
elif isinstance(el, ast.Name):
val = el.id
elif isinstance(el, ast.NameConstant):
val = str(el.value)
else:
self.errorIt(el, "Name or string is expected as variant")
if val in variants:
self.errorIt(el, "Duplicated variant")
variants.append(val)
return variants
#===============================================
def processJSonData(self, it):
if isinstance(it, ast.Num):
return it.n
if isinstance(it, ast.Str):
return it.s
if isinstance(it, ast.Name):
return it.id
if isinstance(it, ast.NameConstant):
if it.value in (True, False, None):
return it.value
self.errorIt(it,
"Constant %s not expected" % str(it.value))
if (isinstance(it, ast.List)
or isinstance(it, ast.Set)):
return [self.processJSonData(el) for el in it.elts]
if isinstance(it, ast.Dict):
ret = dict()
for idx, it_key in enumerate(it.keys):
key = self.processJSonData(it_key)
if isinstance(key, ast.List) or isinstance(key, ast.Dict):
self.errorIt(it_key,
"Combined keys for dict are not supported")
ret[key] = self.processJSonData(it.values[idx])
return ret
self.errorIt(it, "Incorrect data format")
return None
#===============================================
if __name__ == '__main__':
source = sys.stdin.read()
parser = ParsedDTree(None, source)
if parser.getError() is not None:
print("Error:", parser.getError())
if parser.getFragments() is not None:
print("Done:", len(parser.getFragments()))
| 39.13465
| 79
| 0.516194
|
9b422f0496b1ba6d0a41e99ce5c5b57543c17a7b
| 291
|
py
|
Python
|
src/main.py
|
AnnekinMeyburgh/lol-spreadsheets
|
dff24b2d99c968228f1870788fc2bb69eb36a148
|
[
"MIT"
] | null | null | null |
src/main.py
|
AnnekinMeyburgh/lol-spreadsheets
|
dff24b2d99c968228f1870788fc2bb69eb36a148
|
[
"MIT"
] | null | null | null |
src/main.py
|
AnnekinMeyburgh/lol-spreadsheets
|
dff24b2d99c968228f1870788fc2bb69eb36a148
|
[
"MIT"
] | null | null | null |
from riot_api import setup_lol_watcher
from sheets import setup_service
from dotenv import load_dotenv
import server
def main():
load_dotenv()
lol_watcher = setup_lol_watcher()
service = setup_service()
server.run(lol_watcher, service)
if __name__ == '__main__':
main()
| 22.384615
| 38
| 0.749141
|
4490425c3de5f6a7f4954b76bda0ea4925c891c6
| 3,568
|
py
|
Python
|
neutron_lib/api/definitions/base.py
|
rubasov/neutron-lib
|
8664953c5a17e8abf5c5a52832e63885108aa818
|
[
"Apache-2.0"
] | null | null | null |
neutron_lib/api/definitions/base.py
|
rubasov/neutron-lib
|
8664953c5a17e8abf5c5a52832e63885108aa818
|
[
"Apache-2.0"
] | null | null | null |
neutron_lib/api/definitions/base.py
|
rubasov/neutron-lib
|
8664953c5a17e8abf5c5a52832e63885108aa818
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import constants
KNOWN_ATTRIBUTES = (
'admin_state_up',
'allocation_pools',
'cidr',
'default_prefixlen',
'default_quota',
'description',
'device_id',
'device_owner',
'dns_nameservers',
'enable_dhcp',
'fixed_ips',
'gateway_ip',
'host_routes',
'id',
'ip_version',
'ipv6_address_mode',
'ipv6_ra_mode',
'is_default',
'mac_address',
'max_prefixlen',
'min_prefixlen',
'name',
'network_id',
'port_id',
'prefixes',
'prefixlen',
'project_id',
constants.SHARED,
'status',
'subnets',
'subnetpool_id',
'tenant_id'
)
KNOWN_RESOURCES = (
'networks',
'ports',
'routers',
'subnets',
'subnetpools'
)
KNOWN_HTTP_ACTIONS = (
'DELETE',
'GET',
'POST',
'PUT',
)
KNOWN_ACTION_STATUSES = (
200,
201,
202,
203,
204,
205,
206,
)
KNOWN_EXTENSIONS = (
'address-scope',
'agent',
'allowed-address-pairs',
'auto-allocated-topology',
'availability_zone',
'binding',
'data-plane-status',
'default-subnetpools',
'dhcp_agent_scheduler',
'dns-domain-ports',
'dns-integration',
'dvr',
'empty-string-filtering',
'ext-gw-mode',
'external-net',
'extra_dhcp_opt',
'extraroute',
'fip-port-details',
'flavors',
'floating-ip-port-forwarding',
'floatingip-pools',
'ip-substring-filtering',
'l3-ha',
'l3_agent_scheduler',
'logging',
'metering',
'multi-provider',
'net-mtu',
'network-ip-availability',
'network_availability_zone',
'pagination',
'port-security',
'project-id',
'provider',
'qos',
'quotas',
'rbac-policies',
'router',
'router_availability_zone',
'security-group',
'segment',
'service-type',
'sorting',
'standard-attr-description',
'standard-attr-revisions',
'standard-attr-timestamp',
'subnet_allocation',
'subnet_onboard',
'subnet-segmentid-writable',
'tag',
'trunk',
'trunk-details',
# Add here list of extensions with pointers to the project repo, e.g.
# 'bgp', # http://git.openstack.org/cgit/openstack/neutron-dynamic-routing
# http://git.openstack.org/cgit/openstack/neutron-fwaas
'fwaas',
'fwaasrouterinsertion',
'fwaas_v2',
'bgpvpn', # https://git.openstack.org/cgit/openstack/networking-bgpvpn
'bgpvpn-routes-control',
'bgpvpn-vni',
# git.openstack.org/cgit/openstack/neutron-vpnaas
'vpnaas',
'vpn-endpoint-groups',
'vpn-flavors',
# http://git.openstack.org/cgit/openstack/networking-sfc:
'flow_classifier',
'sfc',
)
KNOWN_KEYWORDS = (
'allow_post',
'allow_put',
'convert_to',
'convert_list_to',
'default',
'enforce_policy',
'is_filter',
'is_sort_key',
'is_visible',
'primary_key',
'required_by_policy',
'validate',
'default_overrides_none',
'dict_populate_defaults',
)
| 21.493976
| 79
| 0.621917
|
516769780da1ceb02aa6386d8e7a61f9b056bc52
| 2,561
|
py
|
Python
|
Yone/Database/reporting_sql.py
|
Kavindupramudita/YoneRobot
|
3e8ea471171b4552e373d28df85c1062cdaf1597
|
[
"BSD-2-Clause"
] | 1
|
2022-03-04T07:47:45.000Z
|
2022-03-04T07:47:45.000Z
|
Yone/Database/reporting_sql.py
|
Kavindupramudita/YoneRobot
|
3e8ea471171b4552e373d28df85c1062cdaf1597
|
[
"BSD-2-Clause"
] | 1
|
2022-02-04T13:27:52.000Z
|
2022-02-04T13:27:52.000Z
|
Yone/Database/reporting_sql.py
|
Kavindupramudita/YoneRobot
|
3e8ea471171b4552e373d28df85c1062cdaf1597
|
[
"BSD-2-Clause"
] | 2
|
2022-03-04T16:58:21.000Z
|
2022-03-09T09:38:55.000Z
|
import threading
from typing import Union
from Yone.Database import BASE, SESSION
from sqlalchemy import Boolean, Column, BigInteger, String
class ReportingUserSettings(BASE):
__tablename__ = "user_report_settings"
user_id = Column(BigInteger, primary_key=True)
should_report = Column(Boolean, default=True)
def __init__(self, user_id):
self.user_id = user_id
def __repr__(self):
return "<User report settings ({})>".format(self.user_id)
class ReportingChatSettings(BASE):
__tablename__ = "chat_report_settings"
chat_id = Column(String(14), primary_key=True)
should_report = Column(Boolean, default=True)
def __init__(self, chat_id):
self.chat_id = str(chat_id)
def __repr__(self):
return "<Chat report settings ({})>".format(self.chat_id)
ReportingUserSettings.__table__.create(checkfirst=True)
ReportingChatSettings.__table__.create(checkfirst=True)
CHAT_LOCK = threading.RLock()
USER_LOCK = threading.RLock()
def chat_should_report(chat_id: Union[str, int]) -> bool:
try:
chat_setting = SESSION.query(ReportingChatSettings).get(str(chat_id))
if chat_setting:
return chat_setting.should_report
return False
finally:
SESSION.close()
def user_should_report(user_id: int) -> bool:
try:
user_setting = SESSION.query(ReportingUserSettings).get(user_id)
if user_setting:
return user_setting.should_report
return True
finally:
SESSION.close()
def set_chat_setting(chat_id: Union[int, str], setting: bool):
with CHAT_LOCK:
chat_setting = SESSION.query(ReportingChatSettings).get(str(chat_id))
if not chat_setting:
chat_setting = ReportingChatSettings(chat_id)
chat_setting.should_report = setting
SESSION.add(chat_setting)
SESSION.commit()
def set_user_setting(user_id: int, setting: bool):
with USER_LOCK:
user_setting = SESSION.query(ReportingUserSettings).get(user_id)
if not user_setting:
user_setting = ReportingUserSettings(user_id)
user_setting.should_report = setting
SESSION.add(user_setting)
SESSION.commit()
def migrate_chat(old_chat_id, new_chat_id):
with CHAT_LOCK:
chat_notes = (
SESSION.query(ReportingChatSettings)
.filter(ReportingChatSettings.chat_id == str(old_chat_id))
.all()
)
for note in chat_notes:
note.chat_id = str(new_chat_id)
SESSION.commit()
| 28.455556
| 77
| 0.685279
|
8c0d21fff0e7309d4af59c35c4f20109e8d65a7d
| 1,386
|
py
|
Python
|
AntPathExample.py
|
BatteryWater/PythonDPAntExample
|
37a129655fdfbfc40382b7a22545fe094f0f7c8a
|
[
"Unlicense"
] | null | null | null |
AntPathExample.py
|
BatteryWater/PythonDPAntExample
|
37a129655fdfbfc40382b7a22545fe094f0f7c8a
|
[
"Unlicense"
] | null | null | null |
AntPathExample.py
|
BatteryWater/PythonDPAntExample
|
37a129655fdfbfc40382b7a22545fe094f0f7c8a
|
[
"Unlicense"
] | null | null | null |
'''
DP example for ant pathing problem
'''
def CalcDistance(maxX, maxY, x, y):
return abs(x/maxX - y/maxY)
def DictCombine(currentDict, otherDict, d):
for D, count in otherDict.items():
if D > d:
if D not in currentDict:
currentDict[D] = 0
currentDict[D] += count
else:
if d not in currentDict:
currentDict[d] = 0
currentDict[d] += count
def AntPathing(maxX, maxY):
if(maxX == 0) or (maxY == 0):
return 0
current = [] #we'll store only two columns in memory
for y in range(0, maxY+1):
current.append({CalcDistance(maxX, maxY, 0, y) : 1})
for x in range(1, maxX+1):
last = current
current = []
current.append({CalcDistance(maxX, maxY, x, 0) : 1})
for y in range(1, maxY+1):
d = CalcDistance(maxX, maxY, x, y)
current.append({})
DictCombine(current[y], current[y-1], d)
DictCombine(current[y], last[y], d)
LastPointDict = current[-1]
Sum = 0.0
Count = 0
for pathValue, pathCount in LastPointDict.items():
Sum += (pathValue * pathCount)
Count += pathCount
print ('Total D: ', Sum)
print('Paths: ', Count)
print('Average D:', Sum/Count)
| 28.875
| 61
| 0.510823
|
a80a6d2246a9c5bdba199f33415ec2ca6042107e
| 4,478
|
py
|
Python
|
tests/test_objmod.py
|
csningli/MultiAgent
|
cabfeab3ea8cf19559e06c354247f8f25739d6b1
|
[
"Apache-2.0"
] | 1
|
2018-03-21T02:17:01.000Z
|
2018-03-21T02:17:01.000Z
|
tests/test_objmod.py
|
csningli/MultiAgent
|
cabfeab3ea8cf19559e06c354247f8f25739d6b1
|
[
"Apache-2.0"
] | null | null | null |
tests/test_objmod.py
|
csningli/MultiAgent
|
cabfeab3ea8cf19559e06c354247f8f25739d6b1
|
[
"Apache-2.0"
] | null | null | null |
# MultiAgent 2.0
# (c) 2017-2018, NiL, csningli@gmail.com
import sys, os, time
import doctest
sys.path.append("..")
from mas.multiagent import *
def test_objmod_basic() :
'''
>>> test_objmod_basic()
Initialization.
ObjectModule: <<multiagent.ObjectModule memory_size=0>>
'''
print("Initialization.")
mod = ObjectModule()
print("ObjectModule: %s" % mod.info())
def test_objmod_sense() :
'''
>>> test_objmod_sense()
Initialization.
ObjectModule: <<multiagent.ObjectModule memory_size=1>>
Prepare the request.
Request: <<multiagent.Request content_len=1>>
Message to '0': <<multiagent.Message src= dest=0 key=pos value=(10, 10)>>
Message to '0': <<multiagent.Message src= dest=0 key=angle value=10>>
Message to '0': <<multiagent.Message src= dest=0 key=vel value=(1, 1)>>
Message to '0': <<multiagent.Message src= dest=0 key=avel value=1>>
Message to '0': <<multiagent.Message src= dest=0 key=force value=(1, 1)>>
Message to '0': <<multiagent.Message src= dest=0 key=color value=(1, 1, 1, 255)>>
Memory after sense.
Position in memory: (10.0, 10.0)
Position in buffer: (10.0, 10.0)
Angle in memory: 10.0
Angle in buffer: 10.0
Velocity in memory: (1.0, 1.0)
Velocity in buffer: (1.0, 1.0)
Angular velocity in memory: 1.0
Angular velocity in buffer: 1.0
Force in memory: (1.0, 1.0)
Force in buffer: (1.0, 1.0)
Color in memory: (1, 1, 1, 255)
Color in buffer: (1, 1, 1, 255)
'''
print("Initialization.")
mod = ObjectModule()
mod.mem.reg(key = "name", value = "0")
print("ObjectModule: %s" % mod.info())
print("Prepare the request.")
reqt = Request()
reqt.add_msg(Message(src = "", dest = "0", key = "pos", value = (10, 10)))
reqt.add_msg(Message(src = "", dest = "0", key = "angle", value = 10))
reqt.add_msg(Message(src = "", dest = "0", key = "vel", value = (1, 1)))
reqt.add_msg(Message(src = "", dest = "0", key = "avel", value = 1))
reqt.add_msg(Message(src = "", dest = "0", key = "force", value = (1, 1)))
reqt.add_msg(Message(src = "", dest = "0", key = "color", value = (1, 1, 1, 255)))
print("Request: %s" % reqt.info())
for msg in reqt.get_msgs(dest = "0") :
print("Message to '0': %s" % msg.info())
mod.sense(reqt)
print("Memory after sense.")
pos = mod.get_pos()
print("Position in memory: (%.1f, %.1f)" % (pos[0], pos[1]))
print("Position in buffer: (%.1f, %.1f)" % (mod.buff["pos"][0], mod.buff["pos"][1]))
angle = mod.get_angle()
print("Angle in memory: %.1f" % angle)
print("Angle in buffer: %.1f" % mod.buff["angle"])
vel = mod.get_vel()
print("Velocity in memory: (%.1f, %.1f)" % (vel[0], vel[1]))
print("Velocity in buffer: (%.1f, %.1f)" % (mod.buff["vel"][0], mod.buff["vel"][1]))
avel = mod.get_avel()
print("Angular velocity in memory: %.1f" % avel)
print("Angular velocity in buffer: %.1f" % mod.buff["avel"])
force = mod.get_force()
print("Force in memory: (%.1f, %.1f)" % (force[0], force[1]))
print("Force in buffer: (%.1f, %.1f)" % (mod.buff["force"][0], mod.buff["force"][1]))
color = mod.get_color()
print("Color in memory: (%d, %d, %d, %d)" % (color[0], color[1], color[2], color[3]))
print("Color in buffer: (%d, %d, %d, %d)" % (mod.buff["color"][0], mod.buff["color"][1], mod.buff["color"][2], mod.buff["color"][3]))
def test_objmod_act() :
'''
>>> test_objmod_act()
Initialization.
ObjectModule: <<multiagent.ObjectModule memory_size=1>>
Message to '': <<multiagent.Message src= dest= key=vel value=(10, 10)>>
Message to '': <<multiagent.Message src= dest= key=avel value=1>>
Message to '': <<multiagent.Message src= dest= key=force value=(1, 1)>>
Message to '': <<multiagent.Message src= dest= key=color value=(1, 1, 1, 255)>>
'''
print("Initialization.")
mod = ObjectModule()
mod.mem.reg(key = "name", value = "0")
print("ObjectModule: %s" % mod.info())
mod.apply_vel(vel = (10, 10))
mod.apply_avel(avel = 1)
mod.apply_force(force = (1, 1))
mod.apply_color(color = (1, 1, 1, 255))
resp = Response()
mod.act(resp)
for msg in resp.get_msgs(dest = "") :
print("Message to '': %s" % msg.info())
if __name__ == '__main__' :
result = doctest.testmod()
print("-" * 50)
print("[ObjectModule Test] attempted/failed tests: %d/%d" % (result.attempted, result.failed))
| 38.93913
| 137
| 0.590889
|
7c9cb9aaa2dd68bf8de1beb80bfdf271f6ee789d
| 2,621
|
py
|
Python
|
clburlison_scripts/reset_location_services/reset_location_services.py
|
hackerman518/scripts
|
f1a234b1d12996ee278a1a3030e57df974758054
|
[
"MIT"
] | null | null | null |
clburlison_scripts/reset_location_services/reset_location_services.py
|
hackerman518/scripts
|
f1a234b1d12996ee278a1a3030e57df974758054
|
[
"MIT"
] | null | null | null |
clburlison_scripts/reset_location_services/reset_location_services.py
|
hackerman518/scripts
|
f1a234b1d12996ee278a1a3030e57df974758054
|
[
"MIT"
] | 1
|
2019-09-09T17:46:37.000Z
|
2019-09-09T17:46:37.000Z
|
#!/usr/bin/python
"""Reset location services to factory settings."""
import os
import platform
import subprocess
from distutils.version import LooseVersion
def root_check():
"""Check for root access."""
if not os.geteuid() == 0:
exit("This must be run with root access.")
def os_vers():
"""Retrieve OS version."""
return platform.mac_ver()[0]
def os_check():
"""Only supported on 10.8+."""
if not LooseVersion(os_vers()) >= LooseVersion('10.8'):
exit("This tool only tested on 10.8+")
def kill_services():
"""On 10.12, both the locationd and cfprefsd services like to not respect
preference changes so we force them to reload."""
proc = subprocess.Popen(['/usr/bin/killall', '-9', 'cfprefsd'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc = subprocess.Popen(['/usr/bin/killall', '-9', 'locationd'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def service_handler(action):
"""Loads/unloads System's location services job on supported OSs."""
supported, current = LooseVersion('10.12.4'), LooseVersion(os_vers())
if current < supported:
print("LaunchD for locationd supported")
if action is 'load':
kill_services()
launchctl = ['/bin/launchctl', action,
'/System/Library/LaunchDaemons/com.apple.locationd.plist']
subprocess.check_output(launchctl)
def sysprefs_boxchk():
"""Disable location services in sysprefs globally."""
read_cmd = ['/usr/bin/sudo', '-u', '_locationd', '/usr/bin/defaults',
'-currentHost', 'read', 'com.apple.locationd',
'LocationServicesEnabled']
status = subprocess.check_output(read_cmd)
if int(status) != 0:
write_cmd = ['/usr/bin/sudo', '-u', '_locationd', '/usr/bin/defaults',
'-currentHost', 'write', 'com.apple.locationd',
'LocationServicesEnabled', '-bool', 'FALSE']
subprocess.check_output(write_cmd)
def clear_clients():
"""Clear clients.plist in locationd settings."""
cmd = ['/usr/bin/sudo', '-u', '_locationd', '/usr/bin/defaults',
'delete', '/private/var/db/locationd/clients.plist']
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
kill_services()
def main():
"""Give main"""
os_check()
root_check()
sysprefs_boxchk()
clear_clients()
if __name__ == '__main__':
main()
| 31.202381
| 79
| 0.5971
|
64eadaf51d0170225b6948faeab2a46b60d39217
| 3,280
|
py
|
Python
|
tfx/dsl/component/experimental/executor_specs.py
|
nikelite/tfx
|
76ca6d65868058f9e9b144a8a16ed88213d28d99
|
[
"Apache-2.0"
] | null | null | null |
tfx/dsl/component/experimental/executor_specs.py
|
nikelite/tfx
|
76ca6d65868058f9e9b144a8a16ed88213d28d99
|
[
"Apache-2.0"
] | null | null | null |
tfx/dsl/component/experimental/executor_specs.py
|
nikelite/tfx
|
76ca6d65868058f9e9b144a8a16ed88213d28d99
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python2, python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Executor specifications for components."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Text, Union
from tfx.components.base import executor_spec
from tfx.dsl.component.experimental import placeholders
CommandlineArgumentType = Union[
Text,
placeholders.InputValuePlaceholder,
placeholders.InputUriPlaceholder,
placeholders.OutputUriPlaceholder,
]
class TemplatedExecutorContainerSpec(executor_spec.ExecutorSpec):
"""Experimental: Describes a command-line program inside a container.
This class is similar to ExecutorContainerSpec, but uses structured
placeholders instead of jinja templates for constructing container commands
based on input and output artifact metadata. See placeholders.py for a list of
supported placeholders.
The spec includes the container image name and the command line
(entrypoint plus arguments) for a program inside the container.
Example:
class MyTrainer(base_component.BaseComponent)
class MyTrainerSpec(types.ComponentSpec):
INPUTS = {
'training_data':
component_spec.ChannelParameter(type=standard_artifacts.Dataset),
}
OUTPUTS = {
'model':
component_spec.ChannelParameter(type=standard_artifacts.Model),
}
PARAMETERS = {
'num_training_steps': component_spec.ExecutionParameter(type=int),
}
SPEC_CLASS = MyTrainerSpec
EXECUTOR_SPEC = executor_specs.TemplatedExecutorContainerSpec(
image='gcr.io/my-project/my-trainer',
command=[
'python3', 'my_trainer',
'--training_data_uri', InputUriPlaceholder('training_data'),
'--model_uri', OutputUriPlaceholder('model'),
'--num_training-steps', InputValuePlaceholder('num_training_steps'),
]
)
Attributes:
image: Container image name.
command: Container entrypoint command-line. Not executed within a shell.
The command-line can use placeholder objects that will be replaced at
the compilation time. Note: Jinja templates are not supported.
"""
# The "command" parameter holds the name of the program and its arguments.
# The "command" parameter is required to enable instrumentation.
# The command-line is often split into command+args, but here "args" would be
# redundant since all items can just be added to "command".
def __init__(
self,
image: Text,
command: List[CommandlineArgumentType],
):
self.image = image
self.command = command
super(TemplatedExecutorContainerSpec, self).__init__()
| 35.652174
| 80
| 0.733537
|
4c5b1261d0029474f061a9551350134e4a70e62c
| 10,696
|
py
|
Python
|
qa/eth_refund_moderated.py
|
rodkeys/openbazaar-go
|
57340cdef95d5c07403aabe4d7d31ac3ec3b3562
|
[
"MIT"
] | null | null | null |
qa/eth_refund_moderated.py
|
rodkeys/openbazaar-go
|
57340cdef95d5c07403aabe4d7d31ac3ec3b3562
|
[
"MIT"
] | null | null | null |
qa/eth_refund_moderated.py
|
rodkeys/openbazaar-go
|
57340cdef95d5c07403aabe4d7d31ac3ec3b3562
|
[
"MIT"
] | null | null | null |
import requests
import json
import time
from collections import OrderedDict
from test_framework.test_framework import OpenBazaarTestFramework, TestFailure
class EthRefundModeratedTest(OpenBazaarTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
def run_test(self):
alice = self.nodes[1]
bob = self.nodes[2]
charlie = self.nodes[3]
# generate some coins and send them to bob
time.sleep(4)
api_url = bob["gateway_url"] + "wallet/address/" + self.cointype
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
address = resp["address"]
elif r.status_code == 404:
raise TestFailure("EthRefundModeratedTest - FAIL: Address endpoint not found")
else:
raise TestFailure("EthRefundModeratedTest - FAIL: Unknown response")
time.sleep(20)
# create a profile for charlie
pro = {"name": "Charlie"}
api_url = charlie["gateway_url"] + "ob/profile"
r = requests.post(api_url, data=json.dumps(pro, indent=4))
if r.status_code == 404:
raise TestFailure("EthRefundModeratedTest - FAIL: Profile post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("EthRefundModeratedTest - FAIL: Profile POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# make charlie a moderator
with open('testdata/moderation.json') as listing_file:
moderation_json = json.load(listing_file, object_pairs_hook=OrderedDict)
api_url = charlie["gateway_url"] + "ob/moderator"
r = requests.put(api_url, data=json.dumps(moderation_json, indent=4))
if r.status_code == 404:
raise TestFailure("EthRefundModeratedTest - FAIL: Moderator post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("EthRefundModeratedTest - FAIL: Moderator POST failed. Reason: %s", resp["reason"])
moderatorId = charlie["peerId"]
time.sleep(4)
# post profile for alice
with open('testdata/profile.json') as profile_file:
profile_json = json.load(profile_file, object_pairs_hook=OrderedDict)
api_url = alice["gateway_url"] + "ob/profile"
requests.post(api_url, data=json.dumps(profile_json, indent=4))
# post listing to alice
with open('testdata/eth_listing.json') as listing_file:
listing_json = json.load(listing_file, object_pairs_hook=OrderedDict)
listing_json["item"]["priceCurrency"]["code"] = "T" + self.cointype
listing_json["metadata"]["acceptedCurrencies"] = ["T" + self.cointype]
listing_json["moderators"] = [moderatorId]
api_url = alice["gateway_url"] + "ob/listing"
r = requests.post(api_url, data=json.dumps(listing_json, indent=4))
if r.status_code == 404:
raise TestFailure("EthRefundModeratedTest - FAIL: Listing post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("EthRefundModeratedTest - FAIL: Listing POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# get listing hash
api_url = alice["gateway_url"] + "ob/listings/" + alice["peerId"]
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("EthRefundModeratedTest - FAIL: Couldn't get listing index")
resp = json.loads(r.text)
listingId = resp[0]["hash"]
# bob send order
with open('testdata/order_direct.json') as order_file:
order_json = json.load(order_file, object_pairs_hook=OrderedDict)
order_json["items"][0]["listingHash"] = listingId
order_json["moderator"] = moderatorId
order_json["paymentCoin"] = "T" + self.cointype
api_url = bob["gateway_url"] + "ob/purchase"
r = requests.post(api_url, data=json.dumps(order_json, indent=4))
if r.status_code == 404:
raise TestFailure("EthRefundModeratedTest - FAIL: Purchase post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
self.print_logs(alice, "ob.log")
raise TestFailure("EthRefundModeratedTest - FAIL: Purchase POST failed. Reason: %s", resp["reason"])
resp = json.loads(r.text)
orderId = resp["orderId"]
payment_address = resp["paymentAddress"]
payment_amount = resp["amount"]
# check the purchase saved correctly
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("EthRefundModeratedTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_PAYMENT":
raise TestFailure("EthRefundModeratedTest - FAIL: Bob purchase saved in incorrect state")
if resp["funded"] == True:
raise TestFailure("EthRefundModeratedTest - FAIL: Bob incorrectly saved as funded")
# check the sale saved correctly
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("EthRefundModeratedTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_PAYMENT":
raise TestFailure("EthRefundModeratedTest - FAIL: Alice purchase saved in incorrect state")
if resp["funded"] == True:
raise TestFailure("EthRefundModeratedTest - FAIL: Alice incorrectly saved as funded")
# fund order
spend = {
"currencyCode": "T" + self.cointype,
"address": payment_address,
"amount": payment_amount["amount"],
"feeLevel": "NORMAL",
"requireAssociateOrder": True,
"orderID": orderId
}
api_url = bob["gateway_url"] + "ob/orderspend"
r = requests.post(api_url, data=json.dumps(spend, indent=4))
if r.status_code == 404:
raise TestFailure("EthRefundModeratedTest - FAIL: Spend post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("EthRefundModeratedTest - FAIL: Spend POST failed. Reason: %s", resp["reason"])
time.sleep(20)
# check bob detected payment
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("EthRefundModeratedTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_FULFILLMENT":
raise TestFailure("EthRefundModeratedTest - FAIL: Bob failed to detect his payment")
if resp["funded"] == False:
raise TestFailure("EthRefundModeratedTest - FAIL: Bob incorrectly saved as unfunded")
# check alice detected payment
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("EthRefundModeratedTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_FULFILLMENT":
raise TestFailure("EthRefundModeratedTest - FAIL: Alice failed to detect payment")
if resp["funded"] == False:
raise TestFailure("EthRefundModeratedTest - FAIL: Alice incorrectly saved as unfunded")
time.sleep(5)
# alice refund order
api_url = alice["gateway_url"] + "ob/refund"
refund = {"orderId": orderId}
r = requests.post(api_url, data=json.dumps(refund, indent=4))
if r.status_code == 404:
raise TestFailure("EthRefundModeratedTest - FAIL: Refund endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("EthRefundModeratedTest - FAIL: Refund POST failed. Reason: %s", resp["reason"])
time.sleep(20)
# alice check order refunded correctly
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("EthRefundModeratedTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "REFUNDED":
raise TestFailure("EthRefundModeratedTest - FAIL: Alice failed to save as rejected")
#if len(resp["paymentAddressTransactions"]) != 2:
# raise TestFailure("EthRefundModeratedTest - FAIL: Alice failed to detect outgoing payment")
#if "refundAddressTransaction" not in resp or resp["refundAddressTransaction"] == {}:
# raise TestFailure("EthRefundModeratedTest - FAIL: Alice failed to detect refund payment")
# bob check order refunded correctly
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("EthRefundModeratedTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "REFUNDED":
raise TestFailure("EthRefundModeratedTest - FAIL: Bob failed to save as rejected")
#if len(resp["paymentAddressTransactions"]) != 2:
# raise TestFailure("EthRefundModeratedTest - FAIL: Bob failed to detect outgoing payment")
#if "refundAddressTransaction" not in resp or resp["refundAddressTransaction"] == {}:
# raise TestFailure("EthRefundModeratedTest - FAIL: Alice failed to detect refund payment")
time.sleep(2)
# Check the funds moved into bob's wallet
api_url = bob["gateway_url"] + "wallet/balance/T" + self.cointype
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
confirmed = int(resp["confirmed"])
#unconfirmed = int(resp["unconfirmed"])
#if confirmed <= 50 - int(payment_amount["amount"]):
# raise TestFailure("EthRefundModeratedTest - FAIL: Bob failed to receive the multisig payout")
else:
raise TestFailure("EthRefundModeratedTest - FAIL: Failed to query Bob's balance")
print("EthRefundModeratedTest - PASS")
if __name__ == '__main__':
print("Running EthRefundModeratedTest")
EthRefundModeratedTest().main(["--regtest", "--disableexchangerates"])
| 47.537778
| 113
| 0.629768
|
fb0bb25765080e6cff2d5d4de6f164ba9bbaae3e
| 4,827
|
py
|
Python
|
fashionmnist/sample.py
|
liuyangdh/multimodal-vae-public
|
ba5941d010b0164094f5818b93baad9df546494e
|
[
"MIT"
] | 98
|
2018-05-28T17:07:36.000Z
|
2022-03-16T03:54:11.000Z
|
fashionmnist/sample.py
|
jannik-w/multimodal-vae-public
|
2a358eb3593e9942e0846eb0095519acef462fa6
|
[
"MIT"
] | 4
|
2019-04-15T00:40:21.000Z
|
2020-03-04T06:24:56.000Z
|
fashionmnist/sample.py
|
jannik-w/multimodal-vae-public
|
2a358eb3593e9942e0846eb0095519acef462fa6
|
[
"MIT"
] | 36
|
2018-08-07T05:02:03.000Z
|
2022-03-28T05:21:42.000Z
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import sys
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from torchvision.utils import save_image
from train import load_checkpoint
from datasets import FashionMNIST
from model import LABEL_IX_TO_STRING
def fetch_fashionmnist_image(label):
"""Return a random image from the FashionMNIST dataset with label.
@param label: integer
a integer from 0 to 9
@return: torch.autograd.Variable
FashionMNIST image
"""
dataset = FashionMNIST('./data', train=False, download=True,
transform=transforms.ToTensor())
images = dataset.test_data.numpy()
labels = dataset.test_labels.numpy()
images = images[labels == label]
image = images[np.random.choice(np.arange(images.shape[0]))]
image = torch.from_numpy(image).float()
image = image.unsqueeze(0)
return Variable(image, volatile=True)
def fetch_fashionmnist_text(label):
"""Randomly generate a number from 0 to 9.
@param label: integer
a integer from 0 to 9
@return: torch.autograd.Variable
Variable wrapped around an integer.
"""
text = torch.LongTensor([label])
return Variable(text, volatile=True)
if __name__ == "__main__":
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('model_path', type=str, help='path to trained model file')
parser.add_argument('--n-samples', type=int, default=64,
help='Number of images and texts to sample [default: 64]')
# condition sampling on a particular images
parser.add_argument('--condition-on-image', type=int, default=None,
help='If True, generate text conditioned on an image.')
# condition sampling on a particular text
parser.add_argument('--condition-on-text', type=int, default=None,
help='If True, generate images conditioned on a text.')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training [default: False]')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
model = load_checkpoint(args.model_path, use_cuda=args.cuda)
model.eval()
if args.cuda:
model.cuda()
# mode 1: unconditional generation
if not args.condition_on_image and not args.condition_on_text:
mu = Variable(torch.Tensor([0]))
std = Variable(torch.Tensor([1]))
if args.cuda:
mu = mu.cuda()
std = std.cuda()
# mode 2: generate conditioned on image
elif args.condition_on_image and not args.condition_on_text:
image = fetch_fashionmnist_image(args.condition_on_image)
if args.cuda:
image = image.cuda()
mu, logvar = model.infer(1, image=image)
std = logvar.mul(0.5).exp_()
# mode 3: generate conditioned on text
elif args.condition_on_text and not args.condition_on_image:
text = fetch_fashionmnist_text(args.condition_on_text)
if args.cuda:
text = text.cuda()
mu, logvar = model.infer(1, text=text)
std = logvar.mul(0.5).exp_()
# mode 4: generate conditioned on image and text
elif args.condition_on_text and args.condition_on_image:
image = fetch_fashionmnist_image(args.condition_on_image)
text = fetch_fashionmnist_text(args.condition_on_text)
if args.cuda:
image = image.cuda()
text = text.cuda()
mu, logvar = model.infer(1, image=image, text=text)
std = logvar.mul(0.5).exp_()
# sample from uniform gaussian
sample = Variable(torch.randn(args.n_samples, model.n_latents))
if args.cuda:
sample = sample.cuda()
# sample from particular gaussian by multiplying + adding
mu = mu.expand_as(sample)
std = std.expand_as(sample)
sample = sample.mul(std).add_(mu)
# generate image and text
img_recon = F.sigmoid(model.image_decoder(sample)).cpu().data
txt_recon = F.log_softmax(model.text_decoder(sample), dim=1).cpu().data
# save image samples to filesystem
save_image(img_recon.view(args.n_samples, 1, 28, 28),
'./sample_image.png')
# save text samples to filesystem
with open('./sample_text.txt', 'w') as fp:
txt_recon_np = txt_recon.numpy()
txt_recon_np = np.argmax(txt_recon_np, axis=1).tolist()
for i, item in enumerate(txt_recon_np):
fp.write('Text (%d): %s\n' % (i, LABEL_IX_TO_STRING[item]))
| 38.616
| 82
| 0.649679
|
454c4644cc05a773de4de9d231ae56786bc51b3f
| 1,930
|
py
|
Python
|
density_funcs.py
|
HarrisonWinch96/DarkDisk_Microlensing
|
e25d59051771318239116a8d2036aca8ce70236d
|
[
"BSD-3-Clause"
] | null | null | null |
density_funcs.py
|
HarrisonWinch96/DarkDisk_Microlensing
|
e25d59051771318239116a8d2036aca8ce70236d
|
[
"BSD-3-Clause"
] | null | null | null |
density_funcs.py
|
HarrisonWinch96/DarkDisk_Microlensing
|
e25d59051771318239116a8d2036aca8ce70236d
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from astropy import units as u
def rho_baryon(r,z,th,q):
A = 0.04*u.solMass/(u.parsec**3)
hr = 3000.0*u.parsec
R0 = 8200.0*u.parsec
hz = 400.0*u.parsec
return A*np.exp(-1*(r - R0)/hr)*np.exp(-1*np.abs(z)/hz)
def rho_mirrordisk(r,z,th,q): #dark version of the baryonic disk
#use q as 2-element array to rescale z and r
#figure out normalization so total DM stays the same
#maybe make fiducial model have same mass as baryons, not CDM
#so instead of f_DM, we would be constraining rho_mir/rho_bar
A = 0.04*u.solMass/(u.parsec**3) /(q[0]**2*q[1])
hr = 3000.0*u.parsec
R0 = 8200.0*u.parsec
hz = 400.0*u.parsec
return A*np.exp(-1*(r/q[0] - R0)/hr)*np.exp(-1*np.abs(z/q[1])/hz)
def rho_mirrordisk_tilt(r,z,th,q): #tilted by some angles, q = [theta of tilt, +phi]
A = 0.04*u.solMass/(u.parsec**3)
hr = 3000.0*u.parsec
R0 = 8200.0*u.parsec
hz = 400.0*u.parsec
theta = q[0]
phi = q[1]
x,y,z = r*np.cos(th), r*np.sin(th), z
x,y,p = x*np.cos(theta) - y*np.sin(theta), y*np.cos(theta) + x*np.sin(theta), z #rotate theta around z axis
x,y,z = x, y*np.cos(phi) - z*np.sin(phi), z*np.cos(phi) + y*np.sin(phi)
x,y,p = x*np.cos(theta) + y*np.sin(theta), y*np.cos(theta) - x*np.sin(theta), z #rotate theta around z axis
r = np.sqrt(x**2 + y**2)
z = z
return A*np.exp(-1*(r - R0)/hr)*np.exp(-1*np.abs(z)/hz)
def rho_semis(r, z,th,q): #standard semi-isothermal halo
R = np.sqrt(r**2 + (z/q)**2)
A = 0.01 *u.solMass / (u.parsec**3) #originally 0.0079
R0 = 8200.0*u.parsec
Rc = 5000.0*u.parsec
#print(r,z,R)
return A*(R0**2 + Rc**2)/(q*(R**2 + Rc**2))
def rho_NFW(r,z,th,q): #NFW halo distribution
A = 0.014*u.solMass / (u.parsec**3)
Rs = 16000*u.parsec
R = np.sqrt(r**2 + (z/q)**2)
x = R/Rs
return A/(q*x*(1 + x)**2)
| 32.166667
| 111
| 0.568912
|
c25bc629b2b30263bb09fadb5d61bab121524e61
| 1,091
|
py
|
Python
|
max2/inference_player.py
|
thexa4/Spades
|
1b6c5003d5bec13421418e1e563db435fac18286
|
[
"MIT"
] | 1
|
2018-01-27T16:45:51.000Z
|
2018-01-27T16:45:51.000Z
|
max2/inference_player.py
|
thexa4/Spades
|
1b6c5003d5bec13421418e1e563db435fac18286
|
[
"MIT"
] | null | null | null |
max2/inference_player.py
|
thexa4/Spades
|
1b6c5003d5bec13421418e1e563db435fac18286
|
[
"MIT"
] | 1
|
2018-01-27T16:45:56.000Z
|
2018-01-27T16:45:56.000Z
|
from i_player import IPlayer
from max2.gamestate import GameState
class InferencePlayer(IPlayer):
""""Constitutes the very bare necessity to be called a player"""
def __init__(self, model):
self.state = None
self.round = 0
self.score = None
self.model = model
self.samples = ([[] for i in range(70)], [[] for i in range(2)])
def give_hand(self, cards):
self.hand = cards
def make_bid(self, bids):
self.state = GameState(self.model)
self.round = 0
return self.state.bid(self.hand, bids, self.score)
def announce_bids(self, bids):
self.state.store_bids(bids)
def play_card(self, trick, valid_cards):
card = self.state.play(self.round, trick, valid_cards)
self.round = self.round + 1
return card
def announce_trick(self, trick):
self.state.store_trick(trick)
def announce_score(self, score):
self.score = score
def offer_blind_nill(self, bids):
return False
def receive_blind_nill_cards(self, cards):
self.hand += cards
def request_blind_nill_cards(self):
offered_cards = self.hand[-2:]
self.hand = self.hand[:-2]
return offered_cards
| 25.372093
| 66
| 0.718607
|
f651acccb50b59dcac0694cbedb50515ae648210
| 475
|
py
|
Python
|
src/march_madness/settings.py
|
mattrwyrick/March-Madness-Predictor
|
be45a91c5206f902c8ddcbc7b90d768e63427614
|
[
"Apache-2.0"
] | 1
|
2022-03-17T07:10:10.000Z
|
2022-03-17T07:10:10.000Z
|
src/march_madness/settings.py
|
mattrwyrick/March-Madness-Predictor
|
be45a91c5206f902c8ddcbc7b90d768e63427614
|
[
"Apache-2.0"
] | null | null | null |
src/march_madness/settings.py
|
mattrwyrick/March-Madness-Predictor
|
be45a91c5206f902c8ddcbc7b90d768e63427614
|
[
"Apache-2.0"
] | null | null | null |
import os
from pathlib import Path
PROJ_DIR = os.path.dirname(os.path.realpath(__file__))
ROOT_DIR = Path(Path(PROJ_DIR).parent).parent
DATA_DIR = os.path.join(ROOT_DIR, "data")
BRACKET_DIR = os.path.join(DATA_DIR, "brackets")
CONFERENCE_WITH_TEAM_DIR = os.path.join(DATA_DIR, "conferences")
CONFERENCE_OVERALL_DIR = os.path.join(DATA_DIR, "overall")
INDEX_DIR = os.path.join(DATA_DIR, "indexes")
TEAM_YEARS = list(range(2000, 2022))
ACTUAL_EXCLUDE_YEARS = [2020, 2021]
| 27.941176
| 64
| 0.766316
|
dbd15d2088c6746a35fecd23acfd8614e3ef2541
| 386
|
py
|
Python
|
python/qitest/test/projects/testme/test/test_bar.py
|
aldebaran/qibuild
|
efea6fa3744664348717fe5e8df708a3cf392072
|
[
"BSD-3-Clause"
] | 51
|
2015-01-05T14:35:13.000Z
|
2021-07-27T06:46:59.000Z
|
python/qitest/test/projects/testme/test/test_bar.py
|
aldebaran/qibuild
|
efea6fa3744664348717fe5e8df708a3cf392072
|
[
"BSD-3-Clause"
] | 104
|
2015-04-09T10:48:42.000Z
|
2020-09-16T16:33:29.000Z
|
python/qitest/test/projects/testme/test/test_bar.py
|
aldebaran/qibuild
|
efea6fa3744664348717fe5e8df708a3cf392072
|
[
"BSD-3-Clause"
] | 46
|
2015-01-05T14:35:16.000Z
|
2022-02-13T20:39:36.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" Test Bar """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
def test_bar():
""" Test Bar """
assert True
| 27.571429
| 84
| 0.725389
|
0781d4e1b47037e6e5b44c1eadd8e0c4fdc1d61d
| 9,582
|
py
|
Python
|
smnsdkcore/client.py
|
xunmeibuyue/IntelligentPeephole
|
c3bebf8792f019c859539607846971f33fee7cc2
|
[
"Apache-2.0"
] | null | null | null |
smnsdkcore/client.py
|
xunmeibuyue/IntelligentPeephole
|
c3bebf8792f019c859539607846971f33fee7cc2
|
[
"Apache-2.0"
] | null | null | null |
smnsdkcore/client.py
|
xunmeibuyue/IntelligentPeephole
|
c3bebf8792f019c859539607846971f33fee7cc2
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright (C) 2017. Huawei Technologies Co., LTD. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of Apache License, Version 2.0.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License, Version 2.0 for more detail
"""
SMN Client mudule
create at 2017/11/4
"""
__author__ = 'pengzl'
import json
import logging
from __init__ import __version__
from auth.iam import Auth
from http.http_client import HttpClient
from smnsdkcore.exception.exceptions import SMNException
from smnsdkcore.auth.iam import AkskNova
from apig_sdk import signer
logger = logging.getLogger(__name__)
class SMNClient():
SMN_ENDPOINT = 'smn.%s.myhuaweicloud.com'
def __init__(
self,
domain_name=None,
password=None,
username=None,
region_id='cn-north-1',
auto_retry=True,
max_retry_time=3,
user_agent='smn-sdk-python/' + __version__,
port=443,
connect_timeout=10
):
"""
constructor for SMNClient
:param domain_name: String, domain_name
:param username: String, username;if use sub user, need to set username
:param password: String, user password
:param region_id: String, region id
:param auto_retry: Boolean
:param max_retry_time: Number
:param user_agent: user_agent, proxy for http(s)
:param port: Number, default is 443 for https
:param connect_timeout: Number, http connect timeout
:return:
"""
self.__domain_name = domain_name
self.__username = username
if not username:
self.__username = domain_name
self.__password = password
self.__region_id = region_id
self.__auto_retry = auto_retry
self.__max_retry_time = max_retry_time
self.__user_agent = user_agent
self._port = port
self._connect_timeout = connect_timeout
self.__authentication = Auth(domain_name=self.__domain_name,
username=self.__username,
password=self.__password,
region_id=self.__region_id
)
def send(self, smn_request):
if self._get_user_agent():
smn_request.add_header('User-Agent', self._get_user_agent())
self._set_authentication(smn_request)
endpoint = self._resovle_endpoint()
if smn_request.get_request_body_param():
req_body = json.dumps(smn_request.get_request_body_param())
else:
req_body = ""
logger.debug('request body is %s', req_body)
httpclient = HttpClient(host=endpoint,
url=self._resolve_url(smn_request),
method=smn_request.get_method(),
headers=smn_request.get_headers(),
body=req_body,
timeout=self._connect_timeout
)
return httpclient.get_https_response()
def _set_authentication(self, request):
x_auth_token = self.__authentication.get_x_auth_token()
request.add_header('X-Auth-Token', x_auth_token)
def _resovle_endpoint(self):
if self.__region_id.lower().startswith('dec'):
region = self.__region_id.split('_')[1]
else:
region = self.__region_id.split('_')[0]
return self.SMN_ENDPOINT % (region)
def _resolve_url(self, request):
project_id = self.__authentication.get_project_id()
return request.get_uri().format(project_id=project_id)
def _get_user_agent(self):
return self.__user_agent
class AkskSMNClient:
SMN_ENDPOINT = 'smn.%s.myhuaweicloud.com'
IAM_ENDPOINT = 'iam.%s.myhuaweicloud.com'
PROJECT_ID_URI = '/v3/projects'
def __init__(
self,
access, secret,
securitytoken=None,
region_id='cn-north-1',
smn_endpoint=None,
iam_endpoint=None,
auto_retry=True,
max_retry_time=3,
user_agent='smn-sdk-python/' + __version__,
port=443,
connect_timeout=10
):
"""
constructor for AkskSMNClient
:param access: String, access
:param secret: String, secret
:param securitytoken: String, when use temporary AKs and SKs is needed.
:param region_id: String, region id
:param smn_endpoint: String, smn_endpoint
:param iam_endpoint: String, iam_endpoint
:param auto_retry: Boolean
:param max_retry_time: Number
:param user_agent: user_agent, proxy for http(s)
:param port: Number, default is 443 for https
:param connect_timeout: Number, http connect timeout
:return:
"""
self.__access = access
self.__secret = secret
self.__securitytoken = securitytoken
self.__region_id = region_id
if smn_endpoint is not None:
self.__smn_endpoint = smn_endpoint
else:
self.__smn_endpoint = self._resovle_endpoint(self.SMN_ENDPOINT)
if iam_endpoint is not None:
self.__iam_endpoint = iam_endpoint
else:
self.__iam_endpoint = self._resovle_endpoint(self.IAM_ENDPOINT)
self.__auto_retry = auto_retry
self.__max_retry_time = max_retry_time
self.__user_agent = user_agent
self._port = port
self._connect_timeout = connect_timeout
def send(self, smn_request):
project_id = self._get_project_id()
uri, query = self._resolve_url(smn_request, project_id)
header = {"content-type": "application/json",
"x-project-id": project_id}
if smn_request.get_request_body_param():
req_body = json.dumps(smn_request.get_request_body_param())
else:
req_body = ""
return self._get_response(smn_request.get_method(), self.__smn_endpoint, uri, header, query=query,
body=req_body)
def _get_project_id(self):
'''get project id by ak/sk or ak/sk/securitytoken'''
header = {"content-type": "application/json"}
response = self._get_response('GET', self.__iam_endpoint, self.PROJECT_ID_URI, header,
query={"name": self.__region_id})
message_map = json.loads(response[2])
try:
project_id = message_map['projects'][0]['id']
except Exception:
raise SMNException(response[0], response[2])
if project_id is None or len(project_id) == 0:
raise SMNException(response[0], 'Failed to get project id by aksk auth. project_id is null.')
return project_id
def _get_response(self, methed, host, uri, header, query=None, body=None):
sig = signer.Signer()
# Set the AK/SK to sign and authenticate the request.
sig.Key = self.__access
sig.Secret = self.__secret
# The following example shows how to set the request URL and parameters to query a VPC list.
r = signer.HttpRequest()
r.scheme = "https"
# Set request Endpoint.
r.host = host
# Specify a request method, such as GET, PUT, POST, DELETE, HEAD, and PATCH.
r.method = methed
# Set request URI.
r.uri = uri
# Set parameters for the request URL.
r.query = query
# Add header parameters, for example, x-domain-id for invoking a global service and x-project-id for invoking a project-level service.
r.headers = header
if body is not None:
r.body = body
sig.Sign(r)
if self.__securitytoken is not None:
r.headers['X-Security-Token'] = self.__securitytoken
httpclient = HttpClient(host=r.host,
url=r.uri,
method=r.method,
headers=r.headers,
body=r.body,
timeout=self._connect_timeout
)
return httpclient.get_https_response()
def _resolve_url(self, request, project_id):
query = {}
url = request.get_uri().format(project_id=project_id)
# split uri and query string
if '?' not in url:
return url, query
url = url.split('?')
pre_uri = url[0]
query_str = str(url[1])
# split query key and value
if '&' not in query_str:
return pre_uri, query
query_list = query_str.split('&')
for q in query_list:
if q is None or len(q) == 0:
continue
key_val = q.split('=')
if key_val[1] is None or len(key_val[1]) == 0:
continue
query[key_val[0]] = key_val[1]
return pre_uri, query
return request.get_uri().format(project_id=project_id)
def _resovle_endpoint(self, domain_name):
if self.__region_id.lower().startswith('dec'):
region = self.__region_id.split('_')[1]
else:
region = self.__region_id.split('_')[0]
return domain_name % (region)
| 35.753731
| 142
| 0.596118
|
b495159a29383c02a6ed562f7770144790a32830
| 6,655
|
py
|
Python
|
main.py
|
wtyuan96/PSMNet
|
974750530d243e5fbf8ed9974b86c255707e51c9
|
[
"MIT"
] | null | null | null |
main.py
|
wtyuan96/PSMNet
|
974750530d243e5fbf8ed9974b86c255707e51c9
|
[
"MIT"
] | null | null | null |
main.py
|
wtyuan96/PSMNet
|
974750530d243e5fbf8ed9974b86c255707e51c9
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
import time
import math
from dataloader import listflowfile as lt
from dataloader import SecenFlowLoader as DA
from models import *
parser = argparse.ArgumentParser(description='PSMNet')
parser.add_argument('--maxdisp', type=int ,default=192,
help='maxium disparity')
parser.add_argument('--model', default='stackhourglass',
help='select model')
parser.add_argument('--datapath', default='/media/jiaren/ImageNet/SceneFlowData/',
help='datapath')
parser.add_argument('--epochs', type=int, default=0,
help='number of epochs to train')
parser.add_argument('--loadmodel', default= './trained/pretrained_sceneflow.tar',
help='load model')
parser.add_argument('--savemodel', default='./',
help='save model')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
all_left_img, all_right_img, all_left_disp, test_left_img, test_right_img, test_left_disp = lt.dataloader(args.datapath)
TrainImgLoader = torch.utils.data.DataLoader(
DA.myImageFloder(all_left_img,all_right_img,all_left_disp, True),
batch_size= 12, shuffle= True, num_workers= 8, drop_last=False)
TestImgLoader = torch.utils.data.DataLoader(
DA.myImageFloder(test_left_img,test_right_img,test_left_disp, False),
batch_size= 8, shuffle= False, num_workers= 4, drop_last=False)
if args.model == 'stackhourglass':
model = stackhourglass(args.maxdisp)
elif args.model == 'basic':
model = basic(args.maxdisp)
else:
print('no model')
if args.cuda:
model = nn.DataParallel(model)
model.cuda()
if args.loadmodel is not None:
print('Load pretrained model')
pretrain_dict = torch.load(args.loadmodel)
model.load_state_dict(pretrain_dict['state_dict'])
print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
optimizer = optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999))
def train(imgL,imgR, disp_L):
model.train()
if args.cuda:
imgL, imgR, disp_true = imgL.cuda(), imgR.cuda(), disp_L.cuda()
#---------
mask = disp_true < args.maxdisp
mask.detach_()
#----
optimizer.zero_grad()
if args.model == 'stackhourglass':
output1, output2, output3 = model(imgL,imgR)
output1 = torch.squeeze(output1,1)
output2 = torch.squeeze(output2,1)
output3 = torch.squeeze(output3,1)
loss = 0.5*F.smooth_l1_loss(output1[mask], disp_true[mask], size_average=True) + 0.7*F.smooth_l1_loss(output2[mask], disp_true[mask], size_average=True) + F.smooth_l1_loss(output3[mask], disp_true[mask], size_average=True)
elif args.model == 'basic':
output = model(imgL,imgR)
output = torch.squeeze(output,1)
loss = F.smooth_l1_loss(output[mask], disp_true[mask], size_average=True)
loss.backward()
optimizer.step()
return loss.data
def test(imgL,imgR,disp_true):
model.eval()
if args.cuda:
imgL, imgR, disp_true = imgL.cuda(), imgR.cuda(), disp_true.cuda()
#---------
mask = disp_true < 192
#----
if imgL.shape[2] % 16 != 0:
times = imgL.shape[2]//16
top_pad = (times+1)*16 -imgL.shape[2]
else:
top_pad = 0
if imgL.shape[3] % 16 != 0:
times = imgL.shape[3]//16
right_pad = (times+1)*16-imgL.shape[3]
else:
right_pad = 0
imgL = F.pad(imgL,(0,right_pad, top_pad,0))
imgR = F.pad(imgR,(0,right_pad, top_pad,0))
with torch.no_grad():
output3 = model(imgL,imgR)
output3 = torch.squeeze(output3)
if top_pad !=0:
img = output3[:,top_pad:,:]
else:
img = output3
if len(disp_true[mask])==0:
loss = 0
else:
loss = F.l1_loss(img[mask],disp_true[mask]) #torch.mean(torch.abs(img[mask]-disp_true[mask])) # end-point-error
return loss.data.cpu()
def adjust_learning_rate(optimizer, epoch):
lr = 0.001
print(lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def main():
start_full_time = time.time()
for epoch in range(0, args.epochs):
print('This is %d-th epoch' %(epoch))
total_train_loss = 0
adjust_learning_rate(optimizer,epoch)
## training ##
for batch_idx, (imgL_crop, imgR_crop, disp_crop_L) in enumerate(TrainImgLoader):
start_time = time.time()
loss = train(imgL_crop,imgR_crop, disp_crop_L)
print('Iter %d training loss = %.3f , time = %.2f' %(batch_idx, loss, time.time() - start_time))
total_train_loss += loss
print('epoch %d total training loss = %.3f' %(epoch, total_train_loss/len(TrainImgLoader)))
# SAVE
savefilename = args.savemodel+'/checkpoint_'+str(epoch)+'.tar'
torch.save({
'epoch': epoch,
'state_dict': model.state_dict(),
'train_loss': total_train_loss/len(TrainImgLoader),
}, savefilename)
print('full training time = %.2f HR' %((time.time() - start_full_time)/3600))
# ------------- TEST ------------------------------------------------------------
total_test_loss = 0
for batch_idx, (imgL, imgR, disp_L) in enumerate(TestImgLoader):
test_loss = test(imgL,imgR, disp_L)
print('Iter %d test loss = %.3f' %(batch_idx, test_loss))
total_test_loss += test_loss
print('total test loss = %.3f' % (total_test_loss / len(TestImgLoader)))
# ----------------------------------------------------------------------------------
# SAVE test information
savefilename = args.savemodel + 'testinformation.tar'
torch.save({
'test_loss': total_test_loss / len(TestImgLoader),
}, savefilename)
if __name__ == '__main__':
main()
| 34.481865
| 235
| 0.602855
|
106e24a874c6a62328dea2694bce22fd57cf4947
| 4,752
|
py
|
Python
|
mcts_pure.py
|
moddent/Gomoku_Deep
|
5d9bca97e6b30db4f99a4686152bcef7a6160ac6
|
[
"MIT"
] | null | null | null |
mcts_pure.py
|
moddent/Gomoku_Deep
|
5d9bca97e6b30db4f99a4686152bcef7a6160ac6
|
[
"MIT"
] | null | null | null |
mcts_pure.py
|
moddent/Gomoku_Deep
|
5d9bca97e6b30db4f99a4686152bcef7a6160ac6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
A pure implementation of the Monte Carlo Tree Search (MCTS)
@author: Junxiao Song
"""
import numpy as np
import copy
from operator import itemgetter
from tree import TreeNode
def rollout_policy_fn(board):
"""a coarse, fast version of policy_fn used in the rollout phase."""
# rollout randomly
action_probs = np.random.rand(len(board.availables))
return zip(board.availables, action_probs)
def policy_value_fn(board):
"""a function that takes in a state and outputs a list of (action, probability)
tuples and a score for the state"""
# return uniform probabilities and 0 score for pure MCTS
action_probs = np.ones(len(board.availables))/len(board.availables)
return zip(board.availables, action_probs), 0
class MCTS(object):
"""A simple implementation of Monte Carlo Tree Search."""
def __init__(self, policy_value_fn, c_puct=5, n_playout=10000):
"""
policy_value_fn: a function that takes in a board state and outputs
a list of (action, probability) tuples and also a score in [-1, 1]
(i.e. the expected value of the end game score from the current
player's perspective) for the current player.
c_puct: a number in (0, inf) that controls how quickly exploration
converges to the maximum-value policy. A higher value means
relying on the prior more.
"""
self._root = TreeNode(None, 1.0)
self._policy = policy_value_fn
self._c_puct = c_puct
self._n_playout = n_playout
def _playout(self, state):
"""Run a single playout from the root to the leaf, getting a value at
the leaf and propagating it back through its parents.
State is modified in-place, so a copy must be provided.
"""
node = self._root
while True:
if node.is_leaf():
break
# Greedily select next move.
action, node = node.select(self._c_puct)
state.do_move(action)
action_probs, _ = self._policy(state)
# Check for end of game
end, winner = state.game_end()
if not end:
node.expand(action_probs)
# Evaluate the leaf node by random rollout
leaf_value = self._evaluate_rollout(state)
# Update value and visit count of nodes in this traversal.
node.update_recursive(-leaf_value)
def _evaluate_rollout(self, state, limit=1000):
"""Use the rollout policy to play until the end of the game,
returning +1 if the current player wins, -1 if the opponent wins,
and 0 if it is a tie.
"""
player = state.get_current_player()
for i in range(limit):
end, winner = state.game_end()
if end:
break
action_probs = rollout_policy_fn(state)
max_action = max(action_probs, key=itemgetter(1))[0]
state.do_move(max_action)
else:
# If no break from the loop, issue a warning.
print("WARNING: rollout reached move limit")
if winner == -1: # tie
return 0
else:
return 1 if winner == player else -1
def get_move(self, state):
"""Runs all playouts sequentially and returns the most visited action.
state: the current game state
Return: the selected action
"""
for n in range(self._n_playout):
state_copy = copy.deepcopy(state)
self._playout(state_copy)
return max(self._root._children.items(),
key=lambda act_node: act_node[1]._n_visits)[0]
def update_with_move(self, last_move):
"""Step forward in the tree, keeping everything we already know
about the subtree.
"""
if last_move in self._root._children:
self._root = self._root._children[last_move]
self._root._parent = None
else:
self._root = TreeNode(None, 1.0)
def __str__(self):
return "MCTS"
class MCTSPlayer(object):
"""AI player based on MCTS"""
def __init__(self, c_puct=5, n_playout=2000):
self.mcts = MCTS(policy_value_fn, c_puct, n_playout)
def set_player_ind(self, p):
self.player = p
def reset_player(self):
self.mcts.update_with_move(-1)
def get_action(self, board):
sensible_moves = board.availables
if len(sensible_moves) > 0:
move = self.mcts.get_move(board)
self.mcts.update_with_move(-1)
# print(move)
return move
else:
print("WARNING: the board is full")
def __str__(self):
return "MCTS {}".format(self.player)
| 33.942857
| 83
| 0.617214
|
80cdabdc294bec6cbc008910a29629502c9a4e2f
| 7,051
|
py
|
Python
|
digitalComponent/dice.py
|
GLHolmstroff/digitalComponent
|
974aa13eae70aa96fb12d8c9724a30d3d7d1236e
|
[
"Apache-2.0"
] | 1
|
2020-01-16T12:19:00.000Z
|
2020-01-16T12:19:00.000Z
|
digitalComponent/dice.py
|
uev52/digitalComponent
|
974aa13eae70aa96fb12d8c9724a30d3d7d1236e
|
[
"Apache-2.0"
] | null | null | null |
digitalComponent/dice.py
|
uev52/digitalComponent
|
974aa13eae70aa96fb12d8c9724a30d3d7d1236e
|
[
"Apache-2.0"
] | 1
|
2019-03-29T12:56:57.000Z
|
2019-03-29T12:56:57.000Z
|
from items import*
import random
class dice(clickable):
def __init__(self,x,y,w,h,name, val ,backgroundColor = color(255)):
super(dice, self).__init__(x,y,w,h,name,backgroundColor = backgroundColor)
self.val=val
def display(self):
fill(255)
if self.val == 1:
self.diceOne()
if self.val == 2:
self.diceTwo()
if self.val == 3:
self.diceThree()
if self.val == 4:
self.diceFour()
if self.val == 5:
self.diceFive()
if self.val == 6:
self.diceSix()
def onClick(self, *args):
self.diceRoll()
def onHover(self):
pass
def diceRoll(self):
self.val = random.randint(1,6)
def diceOne(self):
fill(self.backgroundColor)
rect (self.x,self.y,self.w,self.h,10,10,10,10)
fill(10)
ellipse(self.x + 0.5*self.w,self.y+0.5*self.h,0.2*self.w,0.2*self.h)
def diceTwo(self):
fill(self.backgroundColor)
rect(self.x,self.y,self.w,self.h,10,10,10,10)
fill(10)
ellipse(self.x + 0.25 * self.w, self.y + 0.75 * self.h, 0.2*self.w, 0.2*self.h)
ellipse(self.x + 0.75 * self.w, self.y + 0.25 * self.h, 0.2*self.w, 0.2*self.h)
def diceThree(self):
fill(self.backgroundColor)
rect (self.x,self.y,self.w,self.h,10,10,10,10)
fill(10)
ellipse(self.x + 0.25 * self.w, self.y + 0.75 * self.h, 0.2*self.w, 0.2*self.h)
ellipse(self.x + 0.5*self.w,self.y+0.5*self.h,0.2*self.w,0.2*self.h)
ellipse(self.x + 0.75 * self.w, self.y + 0.25 * self.h, 0.2*self.w, 0.2*self.h)
def diceFour(self):
fill(self.backgroundColor)
rect (self.x,self.y,self.w,self.h,10,10,10,10)
fill(10)
ellipse(self.x + 0.25 * self.w, self.y + 0.25 * self.h, 0.2*self.w, 0.2*self.h)
ellipse(self.x + 0.75 * self.w, self.y + 0.25 * self.h, 0.2*self.w, 0.2*self.h)
ellipse(self.x + 0.25 * self.w, self.y + 0.75 * self.h, 0.2*self.w, 0.2*self.h)
ellipse(self.x + 0.75 * self.w, self.y + 0.75 * self.h, 0.2*self.w, 0.2*self.h)
def diceFive(self):
fill(self.backgroundColor)
rect (self.x,self.y,self.w,self.h,10,10,10,10)
fill(10)
ellipse(self.x + 0.25 * self.w, self.y + 0.25 * self.h, 0.2*self.w, 0.2*self.h)
ellipse(self.x + 0.75 * self.w, self.y + 0.25 * self.h, 0.2*self.w, 0.2*self.h)
ellipse(self.x + 0.25 * self.w, self.y + 0.75 * self.h, 0.2*self.w, 0.2*self.h)
ellipse(self.x + 0.75 * self.w, self.y + 0.75 * self.h, 0.2*self.w, 0.2*self.h)
ellipse(self.x + 0.5*self.w,self.y+0.5*self.h,0.2*self.w,0.2*self.h)
def diceSix(self):
fill(self.backgroundColor)
rect (self.x,self.y,self.w,self.h,10,10,10,10)
fill(10)
ellipse(self.x + 0.25 * self.w, self.y + 0.25 * self.h, 0.2*self.w, 0.2*self.h)
ellipse(self.x + 0.75 * self.w, self.y + 0.25 * self.h, 0.2*self.w, 0.2*self.h)
ellipse(self.x + 0.25 * self.w, self.y + 0.75 * self.h, 0.2*self.w, 0.2*self.h)
ellipse(self.x + 0.75 * self.w, self.y + 0.75 * self.h, 0.2*self.w, 0.2*self.h)
ellipse(self.x + 0.25 * self.w, self.y + 0.5 * self.h, 0.2*self.w, 0.2*self.h)
ellipse(self.x + 0.75 * self.w, self.y + 0.5 * self.h, 0.2*self.w, 0.2*self.h)
def copy(self):
return dice(self.x,self.y,self.w,self.h,self.name,self.val,self.backgroundColor)
class diceGroup(clickable):
def __init__(self,x,y,w,h,name, *dice):
super(diceGroup, self).__init__(x,y,w,h,name)
self.dice=list()
self._observers = []
for d in dice:
self.dice.append(d)
def display(self):
for d in self.dice:
d.display()
def onClick(self):
for d in self.dice:
d.onClick()
def addDice(self,d):
if isinstance(dice,d):
self.dice.append(d)
def bindTo(self,callback):
self._observers.append(callback)
#some secific items I made for the setup screen
class setupDice(dice):
def __init__(self, x,y,w,h,name, val, backgroundColor = color(255)):
super(setupDice, self).__init__(x,y,w,h,name, val,backgroundColor = backgroundColor)
self.active = None
class setupDiceGroup(diceGroup):
def __init__(self,x,y,w,h,name,game,*dice):
super(setupDiceGroup,self).__init__(x,y,w,h,name,*dice)
self.results = []
self.amountActive = 2
self.winningDice = 0
self.game = game
self.changeAmount(2)
def onClick(self):
self.winningDice = 0
self.results = []
for d in self.dice:
if d.active:
d.onClick()
self.results.append(d.val)
try:
self.winningDice = self.results.index(max(self.results))
except:
self.winningDice = 0
self.game._currentPlayer = self.game.setPlayer(self.game.players[self.winningDice])
self.game.currentPlayerIndex = self.winningDice
def changeAmount(self, amount):
if amount == 2:
self.amountActive = 2
self.dice[0].active = True
self.dice[1].active = True
self.dice[2].active = False
self.dice[3].active = False
elif amount == 3:
self.amountActive = 3
self.dice[0].active = True
self.dice[1].active = True
self.dice[2].active = True
self.dice[3].active = False
else:
self.amountActive = 4
self.dice[0].active = True
self.dice[1].active = True
self.dice[2].active = True
self.dice[3].active = True
def display(self):
for d in self.dice:
if d.active:
d.display()
class varDiceGroup(diceGroup):
def __init__(self,x,y,w,h,name,parents,attrname,function,*dice):
super(varDiceGroup,self).__init__(x,y,w,h,name,*dice)
self.amount = 0
self.parents = parents
self.attrname = attrname
for x in self.parents:
x.bindTo(self.update)
self.function = function
self.sum = 0
def update(self, value):
self.amount = getattr(value,self.attrname)
self.resetDice()
def resetDice(self):
self.dice = list()
for x in range(self.amount):
d = dice(self.x + 100*x,self.y,100,100,'',1)
self.dice.append(d.copy())
def onClick(self):
self.sum = 0
for d in self.dice:
d.onClick()
self.sum += d.val
for callback in self._observers:
callback(self)
self.function(self.sum)
| 34.563725
| 92
| 0.527585
|
f975b7c167451e42b28d50ff52d0dcd07215985e
| 682
|
py
|
Python
|
_config_example.py
|
wastedsummer/weedlings
|
7f27f08455fd7e40587a38a03694bda171e2d360
|
[
"MIT"
] | null | null | null |
_config_example.py
|
wastedsummer/weedlings
|
7f27f08455fd7e40587a38a03694bda171e2d360
|
[
"MIT"
] | null | null | null |
_config_example.py
|
wastedsummer/weedlings
|
7f27f08455fd7e40587a38a03694bda171e2d360
|
[
"MIT"
] | null | null | null |
"""
This is template on how to configurate your setup for the weedlings model.
You may create a copy of this script and name it "_conf.py" so it will be not be tracked by the ".gitignore"
and is stored only locally.
The "_conf.py" than can be used in other scripts.
"""
PATH = "/home/yourname/projects/weedlings/" # overall path
DATA_PATH = "/home/yourname/projects/weedlings/data/" # write your path to the dataset here
RAW_DATA_PATH = DATA_PATH + "raw_data/" # path where raw data is stored
SPLIT_DATA_PATH = DATA_PATH + "split_data/" # path where the data, that we will use for the model is stored
MODEL_PATH = DATA_PATH + "models/" # path where the saved models will be stored
| 56.833333
| 108
| 0.750733
|
9afe6bb6d6d927862b2bab5cf66a17a6b029c98b
| 15,125
|
py
|
Python
|
addons/blender.NodeOSC/server/_base.py
|
trisadmeslek/V-Sekai-Blender-tools
|
0d8747387c58584b50c69c61ba50a881319114f8
|
[
"MIT"
] | 2
|
2021-12-21T16:38:58.000Z
|
2022-01-08T00:56:35.000Z
|
addons/blender.NodeOSC/server/_base.py
|
trisadmeslek/V-Sekai-Blender-tools
|
0d8747387c58584b50c69c61ba50a881319114f8
|
[
"MIT"
] | 1
|
2022-01-29T05:46:50.000Z
|
2022-01-29T05:46:50.000Z
|
addons/blender.NodeOSC/server/_base.py
|
trisadmeslek/V-Sekai-Blender-tools
|
0d8747387c58584b50c69c61ba50a881319114f8
|
[
"MIT"
] | 1
|
2021-11-07T19:41:34.000Z
|
2021-11-07T19:41:34.000Z
|
import bpy
import types
import sys
from select import select
import socket
import errno
import mathutils
import traceback
from math import radians
from bpy.props import *
from ast import literal_eval as make_tuple
from .callbacks import *
from ..nodes.nodes import *
def make_osc_messages(myOscKeys, myOscMsg):
envars = bpy.context.scene.nodeosc_envars
for item in myOscKeys:
if item.dp_format_enable == False:
# we cannot deal with a datapath string that has format syntax
#print( "sending :{}".format(item) )
prop = None
if item.node_type == 1:
prop = eval(item.data_path + ".getValue()")
else:
prop = eval(item.data_path)
# now make the values to be sent a tuple (unless its a string or None)
if isinstance(prop, (bool, int, float)):
prop = (prop,)
elif prop is None:
prop = 'None'
elif isinstance(prop, (mathutils.Vector, mathutils.Quaternion, mathutils.Euler, mathutils.Matrix)):
prop = tuple(prop)
stringProp = str(prop)
if not (item.filter_repetition and envars.repeat_argument_filter_OUT) and stringProp != item.value:
item.value = stringProp
# make sure the osc indices are a tuple
indices = make_tuple(item.osc_index)
if isinstance(indices, int):
indices = (indices,)
# sort the properties according to the osc_indices
if prop is not None and not isinstance(prop, str) and len(indices) > 0:
prop = tuple(prop[i] for i in indices)
myOscMsg[item.osc_address] = prop
return myOscMsg
#######################################
# PythonOSC Server BASE CLASS #
#######################################
class OSC_OT_OSCServer(bpy.types.Operator):
_timer = None
count = 0
#####################################
# CUSTOMIZEABLE FUNCTIONS:
#inputServer = "" #for the receiving socket
#outputServer = "" #for the sending socket
#dispatcher = "" #dispatcher function
def sendingOSC(self, context, event):
pass
# setup the sending server
def setupInputServer(self, context, envars):
pass
# setup the receiving server
def setupOutputServer(self, context, envars):
pass
# add method
def addMethod(self, address, data):
pass
# add default method
def addDefaultMethod():
pass
# start receiving
def startupInputServer(self, context, envars):
pass
# stop receiving
def shutDownInputServer(self, context, envars):
pass
#
#
#####################################
#######################################
# MODAL Function #
#######################################
def modal(self, context, event):
envars = bpy.context.scene.nodeosc_envars
if envars.isServerRunning == False:
return self.cancel(context)
if envars.message_monitor and envars.error != "":
self.report({'WARNING'}, envars.error)
print(envars.error)
envars.error = ""
if event.type == 'TIMER':
#hack to refresh the GUI
self.count = self.count + envars.output_rate
if envars.message_monitor == True:
if self.count >= 100:
self.count = 0
context.area.tag_redraw()
# only available spot where updating the sorcar tree doesn't throw errors...
executeSorcarNodeTrees(context)
try:
start = time.perf_counter()
self.sendingOSC(context, event)
# calculate the execution time
end = time.perf_counter()
bpy.context.scene.nodeosc_envars.executionTimeOutput = end - start
except Exception as err:
self.report({'WARNING'}, "Output error: {0}".format(err))
return self.cancel(context)
return {'PASS_THROUGH'}
#######################################
# Setup OSC Receiver and Sender #
#######################################
def execute(self, context):
envars = bpy.context.scene.nodeosc_envars
if envars.port_in == envars.port_out:
self.report({'WARNING'}, "Ports must be different.")
return{'FINISHED'}
if envars.isServerRunning == False:
#Setting up the dispatcher for receiving
try:
self.setupInputServer(context, envars)
self.setupOutputServer(context, envars)
# all the osc messages handlers ready for registering to the server
oscHandlerDict = {}
oscHandleList = []
# register a message for executing
if envars.node_update == "MESSAGE" and hasAnimationNodes():
# oscHandleList content:
# callback type
# blender datapath (i.e. bpy.data.objects['Cube'])
# blender property (i.e. location)
# blender property index (i.e. location[index])
# osc argument index to use (should be a tuplet, like (1,2,3))
# node type
# datapath format string
# loop range string
# filter eval string
oscHandleList = (-1, None, None, None, None, 0, '', '', True)
self.addOscHandler(oscHandlerDict, envars.node_frameMessage, oscHandleList)
for item in bpy.context.scene.NodeOSC_keys:
filter_eval = True
if item.filter_enable:
filter_eval = item.filter_eval
if item.osc_direction != "OUTPUT" and item.enabled:
if item.dp_format_enable == False:
# make osc index into a tuple ..
oscIndex = make_tuple(item.osc_index)
# ... and don't forget the corner case
if isinstance(oscIndex, int):
oscIndex = (oscIndex,)
try:
oscHandleList = None
if item.data_path.find('][') != -1 and (item.data_path[-2:] == '"]' or item.data_path[-2:] == '\']'):
#For custom properties
# like bpy.data.objects['Cube']['customProp']
prop = item.data_path[item.data_path.rindex('['):]
prop = prop[2:-2] # get rid of [' ']
datapath = item.data_path[0:item.data_path.rindex('[')]
oscHandleList = [1, eval(datapath), prop, item.idx, oscIndex, item.node_type, '', '', filter_eval]
elif item.data_path[-1] == ']':
#For normal properties with index in brackets
# like bpy.data.objects['Cube'].location[0]
datapath = item.data_path[0:item.data_path.rindex('.')]
prop = item.data_path[item.data_path.rindex('.') + 1:item.data_path.rindex('[')]
prop_index = item.data_path[item.data_path.rindex('[') + 1:item.data_path.rindex(']')]
oscHandleList = [3, eval(datapath), prop, int(prop_index), oscIndex, item.node_type, '', '', filter_eval]
elif item.data_path[-1] == ')':
# its a function call
oscHandleList = [7, item.data_path, '', item.idx, oscIndex, item.node_type, '', '', filter_eval]
else:
#without index in brackets
datapath = item.data_path[0:item.data_path.rindex('.')]
prop = item.data_path[item.data_path.rindex('.') + 1:]
if isinstance(getattr(eval(datapath), prop), (int, float, str)):
# property is single value
oscHandleList = [2, eval(datapath), prop, item.idx, oscIndex, item.node_type, '', '', filter_eval]
else:
# property is array
oscHandleList = [4, eval(datapath), prop, item.idx, oscIndex, item.node_type, '', '', filter_eval]
if oscHandleList != None:
self.addOscHandler(oscHandlerDict, item.osc_address.strip(), oscHandleList)
else:
self.report({'WARNING'}, "Unable to create listener for: object '"+item.data_path+"' with id '"+item.props+"' : {0}".format(err))
except Exception as err:
self.report({'WARNING'}, "Register custom handle: object '"+item.data_path+"' with id '"+item.props+"' : {0}".format(err))
else:
oscIndex = item.osc_index
try:
oscHandleList = None
if item.loop_enable:
oscHandleList = [10, item.data_path, '', 0, item.osc_index, item.node_type, item.dp_format, item.loop_range, filter_eval]
else:
oscHandleList = [10, item.data_path, '', 0, item.osc_index, item.node_type, item.dp_format, '', filter_eval]
if oscHandleList != None:
self.addOscHandler(oscHandlerDict, item.osc_address.strip(), oscHandleList)
else:
self.report({'WARNING'}, "Unable to create listener for: object '"+item.data_path+"' with id '"+item.props+"' : {0}".format(err))
except Exception as err:
self.report({'WARNING'}, "Register custom handle: object '"+item.data_path+"' with id '"+item.props+"' : {0}".format(err))
# lets go and find all nodes in all nodetrees that are relevant for us
nodes_createCollections()
for item in bpy.context.scene.NodeOSC_nodes:
filter_eval = True
if item.osc_direction != "OUTPUT":
# make osc index into a tuple ..
oscIndex = make_tuple(item.osc_index)
# ... and don't forget the corner case
if isinstance(oscIndex, int):
oscIndex = (oscIndex,)
try:
if item.node_data_type == "SINGLE":
oscHandleList = [5, eval(item.data_path), item.props, item.idx, oscIndex, item.node_type, '', '', filter_eval]
elif item.node_data_type == "LIST":
oscHandleList = [6, eval(item.data_path), item.props, item.idx, oscIndex, item.node_type, '', '', filter_eval]
self.addOscHandler(oscHandlerDict, item.osc_address.strip(), oscHandleList)
except Exception as err:
self.report({'WARNING'}, "Register node handle: object '"+item.data_path+"' with id '"+item.props+"' : {0}".format(err))
# register all oscHandles on the server
for address, oscHandles in oscHandlerDict.items():
self.addMethod(address, oscHandles)
# this provides the callback functions with the oscHandles
setOscHandlers(oscHandlerDict)
# register the default method for unregistered addresses
self.addDefaultMethod()
# startup the receiving server
self.startupInputServer(context, envars)
# register the execute queue method
bpy.app.timers.register(execute_queued_OSC_callbacks)
#inititate the modal timer thread
context.window_manager.modal_handler_add(self)
self._timer = context.window_manager.event_timer_add(envars.output_rate/1000, window = context.window)
except Exception as err:
self.report({'WARNING'}, "Server startup: {0}".format(err))
return {'CANCELLED'}
envars.isServerRunning = True
self.report({'INFO'}, "Server successfully started!")
return {'RUNNING_MODAL'}
else:
self.report({'INFO'}, "Server stopped!")
envars.isServerRunning = False
return{'FINISHED'}
def cancel(self, context):
envars = bpy.context.scene.nodeosc_envars
self.shutDownInputServer(context, envars)
context.window_manager.event_timer_remove(self._timer)
# hack to check who is calling the cancel method.
# see https://blender.stackexchange.com/questions/23126/is-there-a-way-to-execute-code-before-blender-is-closing
traceback_elements = traceback.format_stack()
# if the stack has 2 elements, it is because the server stop has been pushed.
# otherwise it might be loading a new project which would cause an exception
# and stop the proper shutdown of the server..
if traceback_elements.__len__ == 2:
bpy.app.timers.unregister(execute_queued_OSC_callbacks)
return {'CANCELLED'}
# will take an address and a oscHandle data packet.
# if the address has already been used, the package will be added to the packagelist
def addOscHandler(self, handleDict, address, oscHandlePackage):
oldpackage = handleDict.get(address)
if oldpackage == None:
oldpackage = [oscHandlePackage]
else:
oldpackage += [oscHandlePackage]
handleDict[address] = oldpackage
| 46.826625
| 165
| 0.492496
|
c62f577f56ac303db1d6295a6c9a2b137dbd95b3
| 256
|
py
|
Python
|
0x04-python-more_data_structures/10-best_score.py
|
JRodriguez9510/holbertonschool-higher_level_programming-2
|
50e788cc0b03de308ed9bc90007dd19edb2efed5
|
[
"MIT"
] | 1
|
2021-01-27T03:13:32.000Z
|
2021-01-27T03:13:32.000Z
|
0x04-python-more_data_structures/10-best_score.py
|
JRodriguez9510/holbertonschool-higher_level_programming-2
|
50e788cc0b03de308ed9bc90007dd19edb2efed5
|
[
"MIT"
] | null | null | null |
0x04-python-more_data_structures/10-best_score.py
|
JRodriguez9510/holbertonschool-higher_level_programming-2
|
50e788cc0b03de308ed9bc90007dd19edb2efed5
|
[
"MIT"
] | 2
|
2021-01-09T04:45:30.000Z
|
2021-07-13T04:23:47.000Z
|
#!/usr/bin/python3
def best_score(a_dictionary):
if a_dictionary is None:
return None
maxval = 0
maxkey = None
for k, v in a_dictionary.items():
if v > maxval:
maxval = v
maxkey = k
return maxkey
| 21.333333
| 37
| 0.5625
|
1b1153c04a89d5443e4881c56376dd42a3a8bad7
| 2,508
|
py
|
Python
|
builder/builder.py
|
napnac/napnac.fr
|
2df4e2428c7a6be9e5a4f0a3cae7d89e78f8a368
|
[
"MIT"
] | 1
|
2017-10-18T17:19:31.000Z
|
2017-10-18T17:19:31.000Z
|
builder/builder.py
|
napnac/napnac.fr
|
2df4e2428c7a6be9e5a4f0a3cae7d89e78f8a368
|
[
"MIT"
] | 1
|
2015-10-14T16:20:19.000Z
|
2015-10-14T16:21:53.000Z
|
builder/builder.py
|
napnac/napnac.fr
|
2df4e2428c7a6be9e5a4f0a3cae7d89e78f8a368
|
[
"MIT"
] | 1
|
2015-10-12T18:00:46.000Z
|
2015-10-12T18:00:46.000Z
|
import logging
import os
import traceback
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import List
import jinja2
from page import Page
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader("templates"))
# To get rid of useless empty lines in the html output
jinja_env.trim_blocks = True
jinja_env.lstrip_blocks = True
class RenderStatus(Enum):
FAILED = 0
RENDERED = 1
SKIPPED = 2
@dataclass
class Builder:
"""Core class to build and render all the input pages."""
page_paths: List[Path]
build_dir: Path
def run(self) -> None:
nb_failed = 0
nb_rendered = 0
nb_skipped = 0
for path in self.page_paths:
try:
status = self.render(path)
if status == RenderStatus.RENDERED:
logging.info(f"Rendered '{path}'")
nb_rendered += 1
elif status == RenderStatus.SKIPPED:
nb_skipped += 1
except Exception:
logging.error(f"Could not render '{path}':\n{traceback.format_exc()}")
nb_failed += 1
logging.info(
f"Total number of pages rendered: {nb_rendered} "
f"({nb_skipped} skipped and {nb_failed} failed)"
)
def render(self, path: Path) -> RenderStatus:
page = Page(path)
output_path = Path(self.build_dir, page.metadata["path"])
# Skip the page if the source/metadata file has not changed since last
# rendering. We cannot really encode this information in the Makefile
# dependencies because the output path is not the same as input path and
# must be parsed in the metadata.
try:
output_mtime = os.path.getmtime(output_path)
if (output_mtime > os.path.getmtime(page.source_file) and
output_mtime > os.path.getmtime(page.metadata_file)):
return RenderStatus.SKIPPED
except OSError:
pass
template = jinja_env.get_template(page.metadata["template"] + ".html.jinja")
context = {
"html_content": page.to_html(),
"metadata": page.metadata,
}
output = template.render(context)
# Make sure subdirectories are created if needed
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(output_path, "w+") as f:
f.write(output)
return RenderStatus.RENDERED
| 31.746835
| 86
| 0.615231
|
eadf694985761f79a5d7e3599c7209ab1bc09e31
| 773
|
py
|
Python
|
notebooks/tutorial.py
|
webteckie/iJupyterNotebooks
|
028351ad525d22f48a9795e8cca48a7b8a9d4bc3
|
[
"MIT"
] | 1
|
2020-08-24T13:37:04.000Z
|
2020-08-24T13:37:04.000Z
|
notebooks/tutorial.py
|
webteckie/iJupyterNotebooks
|
028351ad525d22f48a9795e8cca48a7b8a9d4bc3
|
[
"MIT"
] | null | null | null |
notebooks/tutorial.py
|
webteckie/iJupyterNotebooks
|
028351ad525d22f48a9795e8cca48a7b8a9d4bc3
|
[
"MIT"
] | null | null | null |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
from datetime import datetime
print ("Today is:" + str(datetime.now()))
# %%
def say_hello(recipient):
return 'Hi, {}!'.format(recipient)
say_hello('Curious') + " Welcome to the Jupyter Notebooks tutorial!"
# %%
import numpy as np
def square(x):
return x * x
print ("You can use the awesome numpy to do things like:")
x = np.random.randint(1, 10)
print ("The square of {} is: ".format(x) + str(square(x)))
# %%
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="darkgrid")
df = pd.read_csv(os.getcwd() + '/TutorialNotebooks/UserSample.csv')
df.head()
# %%
print ("Total records: " + str(len(df)))
df.dtypes
# %%
| 17.976744
| 68
| 0.653299
|
90851723b29fc9a099f7684981690e3e90cef938
| 2,167
|
py
|
Python
|
cliprt/tests/destination_worksheet_test.py
|
mhodgesatuh/cliprt
|
1410f54bdab0cf3d3f418b059b5a298a401926c1
|
[
"Apache-2.0"
] | 1
|
2021-08-29T02:15:22.000Z
|
2021-08-29T02:15:22.000Z
|
cliprt/tests/destination_worksheet_test.py
|
mhodgesatuh/cliprt
|
1410f54bdab0cf3d3f418b059b5a298a401926c1
|
[
"Apache-2.0"
] | null | null | null |
cliprt/tests/destination_worksheet_test.py
|
mhodgesatuh/cliprt
|
1410f54bdab0cf3d3f418b059b5a298a401926c1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
Project: CLIPRT - Client Information Parsing and Reporting Tool.
@author: mhodges
Copyright 2021 Michael Hodges
"""
from cliprt.classes.client_information_workbook import ClientInformationWorkbook
from cliprt.classes.cliprt_settings import CliprtSettings
from cliprt.classes.destination_worksheet import DestinationWorksheet
class DestinationWorksheetTest:
"""
Data destination worksheet testing harness.
"""
# Test data.
wb_file = 'cliprt/tests/test_workbook.xlsx'
client_info = ClientInformationWorkbook(wb_file)
dest_ws = DestinationWorksheet(client_info.wb, 'fb')
client_info.dest_ws_reg.add_ws(client_info.wb, 'fb')
settings = CliprtSettings()
def init_test(self):
"""
Unit test
"""
assert self.dest_ws.dest_ind == 'fb'
assert self.dest_ws.first_row_idx == 1
assert self.dest_ws.next_col_idx == 1
assert self.dest_ws.next_row_idx == 2
assert self.dest_ws.ws_name == self.dest_ws.DEST_WS_NAME_PREFIX + 'fb'
assert not self.dest_ws.ws is None
def update_cell_test(self):
"""
Unit test
"""
# Empty cell data test.
assert self.dest_ws.update_cell(2, 1, None)
# Formatted data tests.
assert self.dest_ws.update_cell(3, 1, '12/31/2021', self.settings.DATE_FORMAT)
assert self.dest_ws.update_cell(3, 2, 'Doe, John', self.settings.NAME_FORMAT)
assert self.dest_ws.update_cell(3, 3, '123-1234', self.settings.PHONE_FORMAT)
# Identical data avoidance test.
self.dest_ws.update_cell(4, 1, 'cell_data')
self.dest_ws.update_cell(4, 1, 'cell_data')
assert self.dest_ws.ws.cell(4, 1).value == 'cell_data'
# Data collection test.
self.dest_ws.update_cell(4, 1, 'cell_data_02')
assert self.dest_ws.ws.cell(4, 1).value == 'cell_data, cell_data_02'
def update_column_headings_test(self):
"""
Unit test
"""
self.dest_ws.dest_de_list['de_heading'] = 1
self.dest_ws.update_column_headings()
assert self.dest_ws.ws.cell(1, 1).value == 'de_heading'
| 35.52459
| 86
| 0.669589
|
d134e3c3b04dfc11ba186cad71e0cdb835b283e8
| 35,515
|
py
|
Python
|
Lib/test/test_importlib/test_util.py
|
livioso/cpython
|
077061a7b24917aaf31057885c69919c5a553c88
|
[
"PSF-2.0"
] | 3
|
2019-04-23T11:06:38.000Z
|
2021-03-03T12:17:16.000Z
|
Lib/test/test_importlib/test_util.py
|
livioso/cpython
|
077061a7b24917aaf31057885c69919c5a553c88
|
[
"PSF-2.0"
] | 4
|
2020-04-02T14:59:42.000Z
|
2021-02-10T14:30:18.000Z
|
Lib/test/test_importlib/test_util.py
|
livioso/cpython
|
077061a7b24917aaf31057885c69919c5a553c88
|
[
"PSF-2.0"
] | 2
|
2018-05-03T01:08:13.000Z
|
2019-12-02T03:03:43.000Z
|
from . import util
abc = util.import_importlib('importlib.abc')
init = util.import_importlib('importlib')
machinery = util.import_importlib('importlib.machinery')
importlib_util = util.import_importlib('importlib.util')
import contextlib
import importlib.util
import os
import pathlib
import string
import sys
from test import support
import types
import unittest
import unittest.mock
import warnings
class DecodeSourceBytesTests:
source = "string ='ü'"
def test_ut8_default(self):
source_bytes = self.source.encode('utf-8')
self.assertEqual(self.util.decode_source(source_bytes), self.source)
def test_specified_encoding(self):
source = '# coding=latin-1\n' + self.source
source_bytes = source.encode('latin-1')
assert source_bytes != source.encode('utf-8')
self.assertEqual(self.util.decode_source(source_bytes), source)
def test_universal_newlines(self):
source = '\r\n'.join([self.source, self.source])
source_bytes = source.encode('utf-8')
self.assertEqual(self.util.decode_source(source_bytes),
'\n'.join([self.source, self.source]))
(Frozen_DecodeSourceBytesTests,
Source_DecodeSourceBytesTests
) = util.test_both(DecodeSourceBytesTests, util=importlib_util)
class ModuleFromSpecTests:
def test_no_create_module(self):
class Loader:
def exec_module(self, module):
pass
spec = self.machinery.ModuleSpec('test', Loader())
with self.assertRaises(ImportError):
module = self.util.module_from_spec(spec)
def test_create_module_returns_None(self):
class Loader(self.abc.Loader):
def create_module(self, spec):
return None
spec = self.machinery.ModuleSpec('test', Loader())
module = self.util.module_from_spec(spec)
self.assertIsInstance(module, types.ModuleType)
self.assertEqual(module.__name__, spec.name)
def test_create_module(self):
name = 'already set'
class CustomModule(types.ModuleType):
pass
class Loader(self.abc.Loader):
def create_module(self, spec):
module = CustomModule(spec.name)
module.__name__ = name
return module
spec = self.machinery.ModuleSpec('test', Loader())
module = self.util.module_from_spec(spec)
self.assertIsInstance(module, CustomModule)
self.assertEqual(module.__name__, name)
def test___name__(self):
spec = self.machinery.ModuleSpec('test', object())
module = self.util.module_from_spec(spec)
self.assertEqual(module.__name__, spec.name)
def test___spec__(self):
spec = self.machinery.ModuleSpec('test', object())
module = self.util.module_from_spec(spec)
self.assertEqual(module.__spec__, spec)
def test___loader__(self):
loader = object()
spec = self.machinery.ModuleSpec('test', loader)
module = self.util.module_from_spec(spec)
self.assertIs(module.__loader__, loader)
def test___package__(self):
spec = self.machinery.ModuleSpec('test.pkg', object())
module = self.util.module_from_spec(spec)
self.assertEqual(module.__package__, spec.parent)
def test___path__(self):
spec = self.machinery.ModuleSpec('test', object(), is_package=True)
module = self.util.module_from_spec(spec)
self.assertEqual(module.__path__, spec.submodule_search_locations)
def test___file__(self):
spec = self.machinery.ModuleSpec('test', object(), origin='some/path')
spec.has_location = True
module = self.util.module_from_spec(spec)
self.assertEqual(module.__file__, spec.origin)
def test___cached__(self):
spec = self.machinery.ModuleSpec('test', object())
spec.cached = 'some/path'
spec.has_location = True
module = self.util.module_from_spec(spec)
self.assertEqual(module.__cached__, spec.cached)
(Frozen_ModuleFromSpecTests,
Source_ModuleFromSpecTests
) = util.test_both(ModuleFromSpecTests, abc=abc, machinery=machinery,
util=importlib_util)
class ModuleForLoaderTests:
"""Tests for importlib.util.module_for_loader."""
@classmethod
def module_for_loader(cls, func):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
return cls.util.module_for_loader(func)
def test_warning(self):
# Should raise a PendingDeprecationWarning when used.
with warnings.catch_warnings():
warnings.simplefilter('error', DeprecationWarning)
with self.assertRaises(DeprecationWarning):
func = self.util.module_for_loader(lambda x: x)
def return_module(self, name):
fxn = self.module_for_loader(lambda self, module: module)
return fxn(self, name)
def raise_exception(self, name):
def to_wrap(self, module):
raise ImportError
fxn = self.module_for_loader(to_wrap)
try:
fxn(self, name)
except ImportError:
pass
def test_new_module(self):
# Test that when no module exists in sys.modules a new module is
# created.
module_name = 'a.b.c'
with util.uncache(module_name):
module = self.return_module(module_name)
self.assertIn(module_name, sys.modules)
self.assertIsInstance(module, types.ModuleType)
self.assertEqual(module.__name__, module_name)
def test_reload(self):
# Test that a module is reused if already in sys.modules.
class FakeLoader:
def is_package(self, name):
return True
@self.module_for_loader
def load_module(self, module):
return module
name = 'a.b.c'
module = types.ModuleType('a.b.c')
module.__loader__ = 42
module.__package__ = 42
with util.uncache(name):
sys.modules[name] = module
loader = FakeLoader()
returned_module = loader.load_module(name)
self.assertIs(returned_module, sys.modules[name])
self.assertEqual(module.__loader__, loader)
self.assertEqual(module.__package__, name)
def test_new_module_failure(self):
# Test that a module is removed from sys.modules if added but an
# exception is raised.
name = 'a.b.c'
with util.uncache(name):
self.raise_exception(name)
self.assertNotIn(name, sys.modules)
def test_reload_failure(self):
# Test that a failure on reload leaves the module in-place.
name = 'a.b.c'
module = types.ModuleType(name)
with util.uncache(name):
sys.modules[name] = module
self.raise_exception(name)
self.assertIs(module, sys.modules[name])
def test_decorator_attrs(self):
def fxn(self, module): pass
wrapped = self.module_for_loader(fxn)
self.assertEqual(wrapped.__name__, fxn.__name__)
self.assertEqual(wrapped.__qualname__, fxn.__qualname__)
def test_false_module(self):
# If for some odd reason a module is considered false, still return it
# from sys.modules.
class FalseModule(types.ModuleType):
def __bool__(self): return False
name = 'mod'
module = FalseModule(name)
with util.uncache(name):
self.assertFalse(module)
sys.modules[name] = module
given = self.return_module(name)
self.assertIs(given, module)
def test_attributes_set(self):
# __name__, __loader__, and __package__ should be set (when
# is_package() is defined; undefined implicitly tested elsewhere).
class FakeLoader:
def __init__(self, is_package):
self._pkg = is_package
def is_package(self, name):
return self._pkg
@self.module_for_loader
def load_module(self, module):
return module
name = 'pkg.mod'
with util.uncache(name):
loader = FakeLoader(False)
module = loader.load_module(name)
self.assertEqual(module.__name__, name)
self.assertIs(module.__loader__, loader)
self.assertEqual(module.__package__, 'pkg')
name = 'pkg.sub'
with util.uncache(name):
loader = FakeLoader(True)
module = loader.load_module(name)
self.assertEqual(module.__name__, name)
self.assertIs(module.__loader__, loader)
self.assertEqual(module.__package__, name)
(Frozen_ModuleForLoaderTests,
Source_ModuleForLoaderTests
) = util.test_both(ModuleForLoaderTests, util=importlib_util)
class SetPackageTests:
"""Tests for importlib.util.set_package."""
def verify(self, module, expect):
"""Verify the module has the expected value for __package__ after
passing through set_package."""
fxn = lambda: module
wrapped = self.util.set_package(fxn)
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
wrapped()
self.assertTrue(hasattr(module, '__package__'))
self.assertEqual(expect, module.__package__)
def test_top_level(self):
# __package__ should be set to the empty string if a top-level module.
# Implicitly tests when package is set to None.
module = types.ModuleType('module')
module.__package__ = None
self.verify(module, '')
def test_package(self):
# Test setting __package__ for a package.
module = types.ModuleType('pkg')
module.__path__ = ['<path>']
module.__package__ = None
self.verify(module, 'pkg')
def test_submodule(self):
# Test __package__ for a module in a package.
module = types.ModuleType('pkg.mod')
module.__package__ = None
self.verify(module, 'pkg')
def test_setting_if_missing(self):
# __package__ should be set if it is missing.
module = types.ModuleType('mod')
if hasattr(module, '__package__'):
delattr(module, '__package__')
self.verify(module, '')
def test_leaving_alone(self):
# If __package__ is set and not None then leave it alone.
for value in (True, False):
module = types.ModuleType('mod')
module.__package__ = value
self.verify(module, value)
def test_decorator_attrs(self):
def fxn(module): pass
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
wrapped = self.util.set_package(fxn)
self.assertEqual(wrapped.__name__, fxn.__name__)
self.assertEqual(wrapped.__qualname__, fxn.__qualname__)
(Frozen_SetPackageTests,
Source_SetPackageTests
) = util.test_both(SetPackageTests, util=importlib_util)
class SetLoaderTests:
"""Tests importlib.util.set_loader()."""
@property
def DummyLoader(self):
# Set DummyLoader on the class lazily.
class DummyLoader:
@self.util.set_loader
def load_module(self, module):
return self.module
self.__class__.DummyLoader = DummyLoader
return DummyLoader
def test_no_attribute(self):
loader = self.DummyLoader()
loader.module = types.ModuleType('blah')
try:
del loader.module.__loader__
except AttributeError:
pass
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
self.assertEqual(loader, loader.load_module('blah').__loader__)
def test_attribute_is_None(self):
loader = self.DummyLoader()
loader.module = types.ModuleType('blah')
loader.module.__loader__ = None
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
self.assertEqual(loader, loader.load_module('blah').__loader__)
def test_not_reset(self):
loader = self.DummyLoader()
loader.module = types.ModuleType('blah')
loader.module.__loader__ = 42
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
self.assertEqual(42, loader.load_module('blah').__loader__)
(Frozen_SetLoaderTests,
Source_SetLoaderTests
) = util.test_both(SetLoaderTests, util=importlib_util)
class ResolveNameTests:
"""Tests importlib.util.resolve_name()."""
def test_absolute(self):
# bacon
self.assertEqual('bacon', self.util.resolve_name('bacon', None))
def test_absolute_within_package(self):
# bacon in spam
self.assertEqual('bacon', self.util.resolve_name('bacon', 'spam'))
def test_no_package(self):
# .bacon in ''
with self.assertRaises(ValueError):
self.util.resolve_name('.bacon', '')
def test_in_package(self):
# .bacon in spam
self.assertEqual('spam.eggs.bacon',
self.util.resolve_name('.bacon', 'spam.eggs'))
def test_other_package(self):
# ..bacon in spam.bacon
self.assertEqual('spam.bacon',
self.util.resolve_name('..bacon', 'spam.eggs'))
def test_escape(self):
# ..bacon in spam
with self.assertRaises(ValueError):
self.util.resolve_name('..bacon', 'spam')
(Frozen_ResolveNameTests,
Source_ResolveNameTests
) = util.test_both(ResolveNameTests, util=importlib_util)
class FindSpecTests:
class FakeMetaFinder:
@staticmethod
def find_spec(name, path=None, target=None): return name, path, target
def test_sys_modules(self):
name = 'some_mod'
with util.uncache(name):
module = types.ModuleType(name)
loader = 'a loader!'
spec = self.machinery.ModuleSpec(name, loader)
module.__loader__ = loader
module.__spec__ = spec
sys.modules[name] = module
found = self.util.find_spec(name)
self.assertEqual(found, spec)
def test_sys_modules_without___loader__(self):
name = 'some_mod'
with util.uncache(name):
module = types.ModuleType(name)
del module.__loader__
loader = 'a loader!'
spec = self.machinery.ModuleSpec(name, loader)
module.__spec__ = spec
sys.modules[name] = module
found = self.util.find_spec(name)
self.assertEqual(found, spec)
def test_sys_modules_spec_is_None(self):
name = 'some_mod'
with util.uncache(name):
module = types.ModuleType(name)
module.__spec__ = None
sys.modules[name] = module
with self.assertRaises(ValueError):
self.util.find_spec(name)
def test_sys_modules_loader_is_None(self):
name = 'some_mod'
with util.uncache(name):
module = types.ModuleType(name)
spec = self.machinery.ModuleSpec(name, None)
module.__spec__ = spec
sys.modules[name] = module
found = self.util.find_spec(name)
self.assertEqual(found, spec)
def test_sys_modules_spec_is_not_set(self):
name = 'some_mod'
with util.uncache(name):
module = types.ModuleType(name)
try:
del module.__spec__
except AttributeError:
pass
sys.modules[name] = module
with self.assertRaises(ValueError):
self.util.find_spec(name)
def test_success(self):
name = 'some_mod'
with util.uncache(name):
with util.import_state(meta_path=[self.FakeMetaFinder]):
self.assertEqual((name, None, None),
self.util.find_spec(name))
def test_nothing(self):
# None is returned upon failure to find a loader.
self.assertIsNone(self.util.find_spec('nevergoingtofindthismodule'))
def test_find_submodule(self):
name = 'spam'
subname = 'ham'
with util.temp_module(name, pkg=True) as pkg_dir:
fullname, _ = util.submodule(name, subname, pkg_dir)
spec = self.util.find_spec(fullname)
self.assertIsNot(spec, None)
self.assertIn(name, sorted(sys.modules))
self.assertNotIn(fullname, sorted(sys.modules))
# Ensure successive calls behave the same.
spec_again = self.util.find_spec(fullname)
self.assertEqual(spec_again, spec)
def test_find_submodule_parent_already_imported(self):
name = 'spam'
subname = 'ham'
with util.temp_module(name, pkg=True) as pkg_dir:
self.init.import_module(name)
fullname, _ = util.submodule(name, subname, pkg_dir)
spec = self.util.find_spec(fullname)
self.assertIsNot(spec, None)
self.assertIn(name, sorted(sys.modules))
self.assertNotIn(fullname, sorted(sys.modules))
# Ensure successive calls behave the same.
spec_again = self.util.find_spec(fullname)
self.assertEqual(spec_again, spec)
def test_find_relative_module(self):
name = 'spam'
subname = 'ham'
with util.temp_module(name, pkg=True) as pkg_dir:
fullname, _ = util.submodule(name, subname, pkg_dir)
relname = '.' + subname
spec = self.util.find_spec(relname, name)
self.assertIsNot(spec, None)
self.assertIn(name, sorted(sys.modules))
self.assertNotIn(fullname, sorted(sys.modules))
# Ensure successive calls behave the same.
spec_again = self.util.find_spec(fullname)
self.assertEqual(spec_again, spec)
def test_find_relative_module_missing_package(self):
name = 'spam'
subname = 'ham'
with util.temp_module(name, pkg=True) as pkg_dir:
fullname, _ = util.submodule(name, subname, pkg_dir)
relname = '.' + subname
with self.assertRaises(ValueError):
self.util.find_spec(relname)
self.assertNotIn(name, sorted(sys.modules))
self.assertNotIn(fullname, sorted(sys.modules))
def test_find_submodule_in_module(self):
# ModuleNotFoundError raised when a module is specified as
# a parent instead of a package.
with self.assertRaises(ModuleNotFoundError):
self.util.find_spec('module.name')
(Frozen_FindSpecTests,
Source_FindSpecTests
) = util.test_both(FindSpecTests, init=init, util=importlib_util,
machinery=machinery)
class MagicNumberTests:
def test_length(self):
# Should be 4 bytes.
self.assertEqual(len(self.util.MAGIC_NUMBER), 4)
def test_incorporates_rn(self):
# The magic number uses \r\n to come out wrong when splitting on lines.
self.assertTrue(self.util.MAGIC_NUMBER.endswith(b'\r\n'))
(Frozen_MagicNumberTests,
Source_MagicNumberTests
) = util.test_both(MagicNumberTests, util=importlib_util)
class PEP3147Tests:
"""Tests of PEP 3147-related functions: cache_from_source and source_from_cache."""
tag = sys.implementation.cache_tag
@unittest.skipIf(sys.implementation.cache_tag is None,
'requires sys.implementation.cache_tag not be None')
def test_cache_from_source(self):
# Given the path to a .py file, return the path to its PEP 3147
# defined .pyc file (i.e. under __pycache__).
path = os.path.join('foo', 'bar', 'baz', 'qux.py')
expect = os.path.join('foo', 'bar', 'baz', '__pycache__',
'qux.{}.pyc'.format(self.tag))
self.assertEqual(self.util.cache_from_source(path, optimization=''),
expect)
def test_cache_from_source_no_cache_tag(self):
# No cache tag means NotImplementedError.
with support.swap_attr(sys.implementation, 'cache_tag', None):
with self.assertRaises(NotImplementedError):
self.util.cache_from_source('whatever.py')
def test_cache_from_source_no_dot(self):
# Directory with a dot, filename without dot.
path = os.path.join('foo.bar', 'file')
expect = os.path.join('foo.bar', '__pycache__',
'file{}.pyc'.format(self.tag))
self.assertEqual(self.util.cache_from_source(path, optimization=''),
expect)
def test_cache_from_source_debug_override(self):
# Given the path to a .py file, return the path to its PEP 3147/PEP 488
# defined .pyc file (i.e. under __pycache__).
path = os.path.join('foo', 'bar', 'baz', 'qux.py')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.assertEqual(self.util.cache_from_source(path, False),
self.util.cache_from_source(path, optimization=1))
self.assertEqual(self.util.cache_from_source(path, True),
self.util.cache_from_source(path, optimization=''))
with warnings.catch_warnings():
warnings.simplefilter('error')
with self.assertRaises(DeprecationWarning):
self.util.cache_from_source(path, False)
with self.assertRaises(DeprecationWarning):
self.util.cache_from_source(path, True)
def test_cache_from_source_cwd(self):
path = 'foo.py'
expect = os.path.join('__pycache__', 'foo.{}.pyc'.format(self.tag))
self.assertEqual(self.util.cache_from_source(path, optimization=''),
expect)
def test_cache_from_source_override(self):
# When debug_override is not None, it can be any true-ish or false-ish
# value.
path = os.path.join('foo', 'bar', 'baz.py')
# However if the bool-ishness can't be determined, the exception
# propagates.
class Bearish:
def __bool__(self): raise RuntimeError
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.assertEqual(self.util.cache_from_source(path, []),
self.util.cache_from_source(path, optimization=1))
self.assertEqual(self.util.cache_from_source(path, [17]),
self.util.cache_from_source(path, optimization=''))
with self.assertRaises(RuntimeError):
self.util.cache_from_source('/foo/bar/baz.py', Bearish())
def test_cache_from_source_optimization_empty_string(self):
# Setting 'optimization' to '' leads to no optimization tag (PEP 488).
path = 'foo.py'
expect = os.path.join('__pycache__', 'foo.{}.pyc'.format(self.tag))
self.assertEqual(self.util.cache_from_source(path, optimization=''),
expect)
def test_cache_from_source_optimization_None(self):
# Setting 'optimization' to None uses the interpreter's optimization.
# (PEP 488)
path = 'foo.py'
optimization_level = sys.flags.optimize
almost_expect = os.path.join('__pycache__', 'foo.{}'.format(self.tag))
if optimization_level == 0:
expect = almost_expect + '.pyc'
elif optimization_level <= 2:
expect = almost_expect + '.opt-{}.pyc'.format(optimization_level)
else:
msg = '{!r} is a non-standard optimization level'.format(optimization_level)
self.skipTest(msg)
self.assertEqual(self.util.cache_from_source(path, optimization=None),
expect)
def test_cache_from_source_optimization_set(self):
# The 'optimization' parameter accepts anything that has a string repr
# that passes str.alnum().
path = 'foo.py'
valid_characters = string.ascii_letters + string.digits
almost_expect = os.path.join('__pycache__', 'foo.{}'.format(self.tag))
got = self.util.cache_from_source(path, optimization=valid_characters)
# Test all valid characters are accepted.
self.assertEqual(got,
almost_expect + '.opt-{}.pyc'.format(valid_characters))
# str() should be called on argument.
self.assertEqual(self.util.cache_from_source(path, optimization=42),
almost_expect + '.opt-42.pyc')
# Invalid characters raise ValueError.
with self.assertRaises(ValueError):
self.util.cache_from_source(path, optimization='path/is/bad')
def test_cache_from_source_debug_override_optimization_both_set(self):
# Can only set one of the optimization-related parameters.
with warnings.catch_warnings():
warnings.simplefilter('ignore')
with self.assertRaises(TypeError):
self.util.cache_from_source('foo.py', False, optimization='')
@unittest.skipUnless(os.sep == '\\' and os.altsep == '/',
'test meaningful only where os.altsep is defined')
def test_sep_altsep_and_sep_cache_from_source(self):
# Windows path and PEP 3147 where sep is right of altsep.
self.assertEqual(
self.util.cache_from_source('\\foo\\bar\\baz/qux.py', optimization=''),
'\\foo\\bar\\baz\\__pycache__\\qux.{}.pyc'.format(self.tag))
@unittest.skipIf(sys.implementation.cache_tag is None,
'requires sys.implementation.cache_tag not be None')
def test_source_from_cache_path_like_arg(self):
path = pathlib.PurePath('foo', 'bar', 'baz', 'qux.py')
expect = os.path.join('foo', 'bar', 'baz', '__pycache__',
'qux.{}.pyc'.format(self.tag))
self.assertEqual(self.util.cache_from_source(path, optimization=''),
expect)
@unittest.skipIf(sys.implementation.cache_tag is None,
'requires sys.implementation.cache_tag to not be None')
def test_source_from_cache(self):
# Given the path to a PEP 3147 defined .pyc file, return the path to
# its source. This tests the good path.
path = os.path.join('foo', 'bar', 'baz', '__pycache__',
'qux.{}.pyc'.format(self.tag))
expect = os.path.join('foo', 'bar', 'baz', 'qux.py')
self.assertEqual(self.util.source_from_cache(path), expect)
def test_source_from_cache_no_cache_tag(self):
# If sys.implementation.cache_tag is None, raise NotImplementedError.
path = os.path.join('blah', '__pycache__', 'whatever.pyc')
with support.swap_attr(sys.implementation, 'cache_tag', None):
with self.assertRaises(NotImplementedError):
self.util.source_from_cache(path)
def test_source_from_cache_bad_path(self):
# When the path to a pyc file is not in PEP 3147 format, a ValueError
# is raised.
self.assertRaises(
ValueError, self.util.source_from_cache, '/foo/bar/bazqux.pyc')
def test_source_from_cache_no_slash(self):
# No slashes at all in path -> ValueError
self.assertRaises(
ValueError, self.util.source_from_cache, 'foo.cpython-32.pyc')
def test_source_from_cache_too_few_dots(self):
# Too few dots in final path component -> ValueError
self.assertRaises(
ValueError, self.util.source_from_cache, '__pycache__/foo.pyc')
def test_source_from_cache_too_many_dots(self):
with self.assertRaises(ValueError):
self.util.source_from_cache(
'__pycache__/foo.cpython-32.opt-1.foo.pyc')
def test_source_from_cache_not_opt(self):
# Non-`opt-` path component -> ValueError
self.assertRaises(
ValueError, self.util.source_from_cache,
'__pycache__/foo.cpython-32.foo.pyc')
def test_source_from_cache_no__pycache__(self):
# Another problem with the path -> ValueError
self.assertRaises(
ValueError, self.util.source_from_cache,
'/foo/bar/foo.cpython-32.foo.pyc')
def test_source_from_cache_optimized_bytecode(self):
# Optimized bytecode is not an issue.
path = os.path.join('__pycache__', 'foo.{}.opt-1.pyc'.format(self.tag))
self.assertEqual(self.util.source_from_cache(path), 'foo.py')
def test_source_from_cache_missing_optimization(self):
# An empty optimization level is a no-no.
path = os.path.join('__pycache__', 'foo.{}.opt-.pyc'.format(self.tag))
with self.assertRaises(ValueError):
self.util.source_from_cache(path)
@unittest.skipIf(sys.implementation.cache_tag is None,
'requires sys.implementation.cache_tag to not be None')
def test_source_from_cache_path_like_arg(self):
path = pathlib.PurePath('foo', 'bar', 'baz', '__pycache__',
'qux.{}.pyc'.format(self.tag))
expect = os.path.join('foo', 'bar', 'baz', 'qux.py')
self.assertEqual(self.util.source_from_cache(path), expect)
@unittest.skipIf(sys.implementation.cache_tag is None,
'requires sys.implementation.cache_tag to not be None')
def test_cache_from_source_respects_pycache_prefix(self):
# If pycache_prefix is set, cache_from_source will return a bytecode
# path inside that directory (in a subdirectory mirroring the .py file's
# path) rather than in a __pycache__ dir next to the py file.
pycache_prefixes = [
os.path.join(os.path.sep, 'tmp', 'bytecode'),
os.path.join(os.path.sep, 'tmp', '\u2603'), # non-ASCII in path!
os.path.join(os.path.sep, 'tmp', 'trailing-slash') + os.path.sep,
]
drive = ''
if os.name == 'nt':
drive = 'C:'
pycache_prefixes = [
f'{drive}{prefix}' for prefix in pycache_prefixes]
pycache_prefixes += [r'\\?\C:\foo', r'\\localhost\c$\bar']
for pycache_prefix in pycache_prefixes:
with self.subTest(path=pycache_prefix):
path = drive + os.path.join(
os.path.sep, 'foo', 'bar', 'baz', 'qux.py')
expect = os.path.join(
pycache_prefix, 'foo', 'bar', 'baz',
'qux.{}.pyc'.format(self.tag))
with util.temporary_pycache_prefix(pycache_prefix):
self.assertEqual(
self.util.cache_from_source(path, optimization=''),
expect)
@unittest.skipIf(sys.implementation.cache_tag is None,
'requires sys.implementation.cache_tag to not be None')
def test_cache_from_source_respects_pycache_prefix_relative(self):
# If the .py path we are given is relative, we will resolve to an
# absolute path before prefixing with pycache_prefix, to avoid any
# possible ambiguity.
pycache_prefix = os.path.join(os.path.sep, 'tmp', 'bytecode')
path = os.path.join('foo', 'bar', 'baz', 'qux.py')
root = os.path.splitdrive(os.getcwd())[0] + os.path.sep
expect = os.path.join(
pycache_prefix,
os.path.relpath(os.getcwd(), root),
'foo', 'bar', 'baz', f'qux.{self.tag}.pyc')
with util.temporary_pycache_prefix(pycache_prefix):
self.assertEqual(
self.util.cache_from_source(path, optimization=''),
expect)
@unittest.skipIf(sys.implementation.cache_tag is None,
'requires sys.implementation.cache_tag to not be None')
def test_source_from_cache_inside_pycache_prefix(self):
# If pycache_prefix is set and the cache path we get is inside it,
# we return an absolute path to the py file based on the remainder of
# the path within pycache_prefix.
pycache_prefix = os.path.join(os.path.sep, 'tmp', 'bytecode')
path = os.path.join(pycache_prefix, 'foo', 'bar', 'baz',
f'qux.{self.tag}.pyc')
expect = os.path.join(os.path.sep, 'foo', 'bar', 'baz', 'qux.py')
with util.temporary_pycache_prefix(pycache_prefix):
self.assertEqual(self.util.source_from_cache(path), expect)
@unittest.skipIf(sys.implementation.cache_tag is None,
'requires sys.implementation.cache_tag to not be None')
def test_source_from_cache_outside_pycache_prefix(self):
# If pycache_prefix is set but the cache path we get is not inside
# it, just ignore it and handle the cache path according to the default
# behavior.
pycache_prefix = os.path.join(os.path.sep, 'tmp', 'bytecode')
path = os.path.join('foo', 'bar', 'baz', '__pycache__',
f'qux.{self.tag}.pyc')
expect = os.path.join('foo', 'bar', 'baz', 'qux.py')
with util.temporary_pycache_prefix(pycache_prefix):
self.assertEqual(self.util.source_from_cache(path), expect)
(Frozen_PEP3147Tests,
Source_PEP3147Tests
) = util.test_both(PEP3147Tests, util=importlib_util)
class MagicNumberTests(unittest.TestCase):
"""
Test release compatibility issues relating to importlib
"""
@unittest.skipUnless(
sys.version_info.releaselevel in ('candidate', 'final'),
'only applies to candidate or final python release levels'
)
def test_magic_number(self):
"""
Each python minor release should generally have a MAGIC_NUMBER
that does not change once the release reaches candidate status.
Once a release reaches candidate status, the value of the constant
EXPECTED_MAGIC_NUMBER in this test should be changed.
This test will then check that the actual MAGIC_NUMBER matches
the expected value for the release.
In exceptional cases, it may be required to change the MAGIC_NUMBER
for a maintenance release. In this case the change should be
discussed in python-dev. If a change is required, community
stakeholders such as OS package maintainers must be notified
in advance. Such exceptional releases will then require an
adjustment to this test case.
"""
EXPECTED_MAGIC_NUMBER = 3400
actual = int.from_bytes(importlib.util.MAGIC_NUMBER[:2], 'little')
msg = (
"To avoid breaking backwards compatibility with cached bytecode "
"files that can't be automatically regenerated by the current "
"user, candidate and final releases require the current "
"importlib.util.MAGIC_NUMBER to match the expected "
"magic number in this test. Set the expected "
"magic number in this test to the current MAGIC_NUMBER to "
"continue with the release.\n\n"
"Changing the MAGIC_NUMBER for a maintenance release "
"requires discussion in python-dev and notification of "
"community stakeholders."
)
self.assertEqual(EXPECTED_MAGIC_NUMBER, actual, msg)
if __name__ == '__main__':
unittest.main()
| 40.129944
| 88
| 0.631142
|
018defa379602aebfbeae9db33d09c124b968d4c
| 481
|
py
|
Python
|
hooks/post_gen_project.py
|
e-k-m/cookiecutter-python-package
|
a9ff33084177ed14785b0b961c0ebe4825059d26
|
[
"MIT"
] | 1
|
2021-01-23T22:07:53.000Z
|
2021-01-23T22:07:53.000Z
|
hooks/post_gen_project.py
|
e-k-m/cookiecutter-python-package
|
a9ff33084177ed14785b0b961c0ebe4825059d26
|
[
"MIT"
] | null | null | null |
hooks/post_gen_project.py
|
e-k-m/cookiecutter-python-package
|
a9ff33084177ed14785b0b961c0ebe4825059d26
|
[
"MIT"
] | null | null | null |
import os
import shutil
PROJECT_DIRECTORY = os.path.realpath(os.path.curdir)
def remove(path):
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(os.path.join(PROJECT_DIRECTORY, path))
if __name__ == "__main__":
if "y" not in "{{ cookiecutter.command_line_interface|lower }}":
remove(os.path.join("{{ cookiecutter.project_slug }}", "cli.py"))
if "y" not in "{{ cookiecutter.github_actions|lower }}":
remove(".github")
| 24.05
| 73
| 0.650728
|
e42fc49ae6a48d153c3224ccafd1ccc1fbbce8f1
| 5,429
|
py
|
Python
|
tests/parts/test_story.py
|
revvsales/python-docx-1
|
5b3ff2b828cc30f1567cb1682a8cb399143732d7
|
[
"MIT"
] | 3,031
|
2015-01-02T11:11:24.000Z
|
2022-03-30T00:57:17.000Z
|
tests/parts/test_story.py
|
revvsales/python-docx-1
|
5b3ff2b828cc30f1567cb1682a8cb399143732d7
|
[
"MIT"
] | 934
|
2015-01-06T20:53:56.000Z
|
2022-03-28T10:08:03.000Z
|
tests/parts/test_story.py
|
revvsales/python-docx-1
|
5b3ff2b828cc30f1567cb1682a8cb399143732d7
|
[
"MIT"
] | 901
|
2015-01-07T18:22:07.000Z
|
2022-03-31T18:38:51.000Z
|
# encoding: utf-8
"""Unit test suite for the docx.parts.story module"""
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
from docx.enum.style import WD_STYLE_TYPE
from docx.image.image import Image
from docx.opc.constants import RELATIONSHIP_TYPE as RT
from docx.package import Package
from docx.parts.document import DocumentPart
from docx.parts.image import ImagePart
from docx.parts.story import BaseStoryPart
from docx.styles.style import BaseStyle
from ..unitutil.cxml import element
from ..unitutil.file import snippet_text
from ..unitutil.mock import instance_mock, method_mock, property_mock
class DescribeBaseStoryPart(object):
def it_can_get_or_add_an_image(self, package_, image_part_, image_, relate_to_):
package_.get_or_add_image_part.return_value = image_part_
relate_to_.return_value = "rId42"
image_part_.image = image_
story_part = BaseStoryPart(None, None, None, package_)
rId, image = story_part.get_or_add_image("image.png")
package_.get_or_add_image_part.assert_called_once_with("image.png")
relate_to_.assert_called_once_with(story_part, image_part_, RT.IMAGE)
assert rId == "rId42"
assert image is image_
def it_can_get_a_style_by_id_and_type(
self, _document_part_prop_, document_part_, style_
):
style_id = "BodyText"
style_type = WD_STYLE_TYPE.PARAGRAPH
_document_part_prop_.return_value = document_part_
document_part_.get_style.return_value = style_
story_part = BaseStoryPart(None, None, None, None)
style = story_part.get_style(style_id, style_type)
document_part_.get_style.assert_called_once_with(style_id, style_type)
assert style is style_
def it_can_get_a_style_id_by_style_or_name_and_type(
self, _document_part_prop_, document_part_, style_
):
style_type = WD_STYLE_TYPE.PARAGRAPH
_document_part_prop_.return_value = document_part_
document_part_.get_style_id.return_value = "BodyText"
story_part = BaseStoryPart(None, None, None, None)
style_id = story_part.get_style_id(style_, style_type)
document_part_.get_style_id.assert_called_once_with(style_, style_type)
assert style_id == "BodyText"
def it_can_create_a_new_pic_inline(self, get_or_add_image_, image_, next_id_prop_):
get_or_add_image_.return_value = "rId42", image_
image_.scaled_dimensions.return_value = 444, 888
image_.filename = "bar.png"
next_id_prop_.return_value = 24
expected_xml = snippet_text("inline")
story_part = BaseStoryPart(None, None, None, None)
inline = story_part.new_pic_inline("foo/bar.png", width=100, height=200)
get_or_add_image_.assert_called_once_with(story_part, "foo/bar.png")
image_.scaled_dimensions.assert_called_once_with(100, 200)
assert inline.xml == expected_xml
def it_knows_the_next_available_xml_id(self, next_id_fixture):
story_element, expected_value = next_id_fixture
story_part = BaseStoryPart(None, None, story_element, None)
next_id = story_part.next_id
assert next_id == expected_value
def it_knows_the_main_document_part_to_help(self, package_, document_part_):
package_.main_document_part = document_part_
story_part = BaseStoryPart(None, None, None, package_)
document_part = story_part._document_part
assert document_part is document_part_
# fixtures -------------------------------------------------------
@pytest.fixture(
params=[
(("w:document"), 1),
(("w:document/w:p{id=1}"), 2),
(("w:document/w:p{id=2}"), 3),
(("w:hdr/(w:p{id=1},w:p{id=2},w:p{id=3})"), 4),
(("w:hdr/(w:p{id=1},w:p{id=2},w:p{id=4})"), 5),
(("w:hdr/(w:p{id=0},w:p{id=0})"), 1),
(("w:ftr/(w:p{id=0},w:p{id=0},w:p{id=1},w:p{id=3})"), 4),
(("w:ftr/(w:p{id=foo},w:p{id=1},w:p{id=2})"), 3),
(("w:ftr/(w:p{id=1},w:p{id=bar})"), 2),
]
)
def next_id_fixture(self, request):
story_cxml, expected_value = request.param
story_element = element(story_cxml)
return story_element, expected_value
# fixture components ---------------------------------------------
@pytest.fixture
def document_part_(self, request):
return instance_mock(request, DocumentPart)
@pytest.fixture
def _document_part_prop_(self, request):
return property_mock(request, BaseStoryPart, "_document_part")
@pytest.fixture
def get_or_add_image_(self, request):
return method_mock(request, BaseStoryPart, "get_or_add_image")
@pytest.fixture
def image_(self, request):
return instance_mock(request, Image)
@pytest.fixture
def image_part_(self, request):
return instance_mock(request, ImagePart)
@pytest.fixture
def next_id_prop_(self, request):
return property_mock(request, BaseStoryPart, "next_id")
@pytest.fixture
def package_(self, request):
return instance_mock(request, Package)
@pytest.fixture
def relate_to_(self, request):
return method_mock(request, BaseStoryPart, "relate_to")
@pytest.fixture
def style_(self, request):
return instance_mock(request, BaseStyle)
| 35.717105
| 87
| 0.678394
|
d5e15f12b6ea6a4a73ffdd64e1a92bac8acf3bf0
| 8,048
|
py
|
Python
|
sdk/storage/azure-storage-blob/tests/test_blob_samples_common_async.py
|
kushan2018/azure-sdk-for-python
|
08a9296207281f4e90e23cf7a30173863accc867
|
[
"MIT"
] | null | null | null |
sdk/storage/azure-storage-blob/tests/test_blob_samples_common_async.py
|
kushan2018/azure-sdk-for-python
|
08a9296207281f4e90e23cf7a30173863accc867
|
[
"MIT"
] | 1
|
2020-03-06T05:57:16.000Z
|
2020-03-06T05:57:16.000Z
|
sdk/storage/azure-storage-blob/tests/test_blob_samples_common_async.py
|
kushan2018/azure-sdk-for-python
|
08a9296207281f4e90e23cf7a30173863accc867
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
import asyncio
from azure.core.exceptions import ResourceExistsError
try:
import settings_real as settings
except ImportError:
import blob_settings_fake as settings
from testcase import (
StorageTestCase,
TestMode,
record
)
SOURCE_FILE = 'SampleSource.txt'
class TestCommonBlobSamplesAsync(StorageTestCase):
connection_string = settings.BLOB_CONNECTION_STRING
def setUp(self):
data = b"Lorem ipsum dolor sit amet, consectetur adipiscing elit"
with open(SOURCE_FILE, 'wb') as stream:
stream.write(data)
super(TestCommonBlobSamplesAsync, self).setUp()
def tearDown(self):
if os.path.isfile(SOURCE_FILE):
try:
os.remove(SOURCE_FILE)
except:
pass
return super(TestCommonBlobSamplesAsync, self).tearDown()
#--Begin Blob Samples-----------------------------------------------------------------
async def _test_blob_snapshots_async(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob.aio import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
# Instantiate a ContainerClient
container_client = blob_service_client.get_container_client("containerformyblobsasync")
# Create new Container
await container_client.create_container()
# Upload a blob to the container
with open(SOURCE_FILE, "rb") as data:
await container_client.upload_blob(name="my_blob", data=data)
# Get a BlobClient for a specific blob
blob_client = blob_service_client.get_blob_client(container="containerformyblobsasync", blob="my_blob")
# [START create_blob_snapshot]
# Create a read-only snapshot of the blob at this point in time
snapshot_blob = await blob_client.create_snapshot()
# Get the snapshot ID
print(snapshot_blob.get('snapshot'))
# Delete only the snapshot (blob itself is retained)
await blob_client.delete_blob(delete_snapshots="only")
# [END create_blob_snapshot]
# Delete container
await blob_service_client.delete_container("containerformyblobsasync")
@record
def test_blob_snapshots_async(self):
if TestMode.need_recording_file(self.test_mode):
return
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_blob_snapshots_async())
async def _test_soft_delete_and_undelete_blob_async(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob.aio import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
# Create a retention policy to retain deleted blobs
from azure.storage.blob import RetentionPolicy
delete_retention_policy = RetentionPolicy(enabled=True, days=1)
# Set the retention policy on the service
await blob_service_client.set_service_properties(delete_retention_policy=delete_retention_policy)
# Instantiate a ContainerClient
container_client = blob_service_client.get_container_client("containerfordeletedblobsasync")
# Create new Container
try:
await container_client.create_container()
except ResourceExistsError:
# Container already created
pass
# Upload a blob to the container
with open(SOURCE_FILE, "rb") as data:
blob_client = await container_client.upload_blob(name="my_blob", data=data)
# Soft delete blob in the container (blob can be recovered with undelete)
await blob_client.delete_blob()
# [START undelete_blob]
# Undelete the blob before the retention policy expires
await blob_client.undelete_blob()
# [END undelete_blob]
# [START get_blob_properties]
properties = await blob_client.get_blob_properties()
# [END get_blob_properties]
assert properties is not None
# Delete container
await blob_service_client.delete_container("containerfordeletedblobsasync")
@record
def test_soft_delete_and_undelete_blob_async(self):
if TestMode.need_recording_file(self.test_mode):
return
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_soft_delete_and_undelete_blob_async())
async def _test_acquire_lease_on_blob_async(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob.aio import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
# Instantiate a ContainerClient
container_client = blob_service_client.get_container_client("leasemyblobscontainerasync")
# Create new Container
await container_client.create_container()
# Upload a blob to the container
with open(SOURCE_FILE, "rb") as data:
await container_client.upload_blob(name="my_blob", data=data)
# [START acquire_lease_on_blob]
# Get the blob client
blob_client = blob_service_client.get_blob_client("leasemyblobscontainerasync", "my_blob")
# Acquire a lease on the blob
lease = await blob_client.acquire_lease()
# Delete blob by passing in the lease
await blob_client.delete_blob(lease=lease)
# [END acquire_lease_on_blob]
# Delete container
await blob_service_client.delete_container("leasemyblobscontainerasync")
@record
def test_acquire_lease_on_blob_async(self):
if TestMode.need_recording_file(self.test_mode):
return
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_acquire_lease_on_blob_async())
async def _test_copy_blob_from_url_and_abort_copy_async(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob.aio import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
# Instantiate a ContainerClient
container_client = blob_service_client.get_container_client("copyblobcontainerasync")
# Create new Container
await container_client.create_container()
try:
# [START copy_blob_from_url]
# Get the blob client with the source blob
source_blob = "http://www.gutenberg.org/files/59466/59466-0.txt"
copied_blob = blob_service_client.get_blob_client("copyblobcontainerasync", '59466-0.txt')
# start copy and check copy status
copy = await copied_blob.start_copy_from_url(source_blob)
props = await copied_blob.get_blob_properties()
print(props.copy.status)
# [END copy_blob_from_url]
copy_id = props.copy.id
# [START abort_copy_blob_from_url]
# Passing in copy id to abort copy operation
await copied_blob.abort_copy(copy_id)
# check copy status
props = await copied_blob.get_blob_properties()
print(props.copy.status)
# [END abort_copy_blob_from_url]
finally:
await blob_service_client.delete_container("copyblobcontainerasync")
@record
def test_copy_blob_from_url_and_abort_copy_async(self):
if TestMode.need_recording_file(self.test_mode):
return
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_copy_blob_from_url_and_abort_copy_async())
| 37.432558
| 111
| 0.682654
|
7f56dd6875391442da339265a0137ffa5b25d027
| 14,214
|
py
|
Python
|
AlphaZero/game.py
|
aipromote/reinforcement-learning-master
|
0d2fe7be5b50d12fd212483f9c71a1b846d02c49
|
[
"MIT"
] | null | null | null |
AlphaZero/game.py
|
aipromote/reinforcement-learning-master
|
0d2fe7be5b50d12fd212483f9c71a1b846d02c49
|
[
"MIT"
] | null | null | null |
AlphaZero/game.py
|
aipromote/reinforcement-learning-master
|
0d2fe7be5b50d12fd212483f9c71a1b846d02c49
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pygame
from pygame.locals import *
# windows font location
FONT_PATH = 'C:/Windows/Fonts/simkai.ttf'
class Board(object):
'''
Board game logic control
'''
def __init__(self, **kwargs):
self.width = int(kwargs.get('width', 15))
self.height = int(kwargs.get('height', 15))
self.states = {} # Board state is a dictionary, key: moving steps, value: player's piece type
self.n_in_row = int(kwargs.get('n_in_row', 5)) # 5 pieces of a piece win the line
self.players = [1, 2] # player 1,2
def init_board(self, start_player=0):
if self.width < self.n_in_row or self.height < self.n_in_row:
raise Exception('The length and width of the board cannot be less than {}'.format(self.n_in_row))
self.current_player = self.players[start_player]
self.availables = list(range(self.width * self.height))
self.states = {}
self.last_move = -1
def move_to_location(self, move):
'''
Returns the position according to the number of moving steps passed (eg: move=2, the calculated coordinates are [0, 2], which means the third horizontal position on the upper left corner of the board)
:param move:
:return:
'''
h = move // self.width
w = move % self.width
return [h, w]
def location_to_move(self, location):
# Return the move value based on the incoming location
# Location information must contain 2 values [h,w]
if len(location) != 2:
return -1
h = location[0]
w = location[1]
move = h * self.width + w
if move not in range(self.width * self.height):
return -1
return move
def current_state(self):
# Use four 15x15 binary feature planes to describe the current situation
# The first two planes represent the position of the current player and the position of the opponent's player. The position of the piece is 1, and the position of no piece is 0.
# The third plane represents the position of the opponent's player in the nearest step, that is, only one position of the entire plane is 1, and the rest are all 0.
# The fourth plane indicates whether the current player is a first-hand player. If it is a first-hand player, the entire plane is all 1, otherwise all are 0.
square_state = np.zeros((4, self.width, self.height))
if self.states:
moves, players = np.array(list(zip(*self.states.items())))
move_curr = moves[
players == self.current_player] # Get all the movement values belonging to the current player on the board status
move_oppo = moves[
players != self.current_player] # Get all the movement values belonging to the opponent's player on the board status
square_state[0][move_curr // self.width, # Fill the value for the first feature plane (current player)
move_curr % self.height] = 1.0
square_state[1][move_oppo // self.width, # Fill the value of the second feature plane (the opponent player)
move_oppo % self.height] = 1.0
square_state[2][
self.last_move // self.width, # Fill the value of the third feature plane (the last drop position of the opponent)
self.last_move % self.height] = 1.0
if len(
self.states) % 2 == 0: # For the fourth feature plane fill value, the current player is the first hand, then fill all 1s, otherwise it is all 0
square_state[3][:, :] = 1.0
return square_state[:, ::-1, :]
def do_move(self, move):
self.states[move] = self.current_player
self.availables.remove(move)
self.current_player = (
self.players[0] if self.current_player == self.players[1]
else self.players[1]
)
self.last_move = move
def has_a_winner(self):
width = self.width
height = self.height
states = self.states
n = self.n_in_row
moved = list(set(range(width * height)) - set(self.availables))
if len(moved) < self.n_in_row + 2:
# If there are more than 7 chessboards, the winner will be generated. If the number of players is less than 7, the winner will return directly without a winner.
return False, -1
for m in moved:
h = m // width
w = m % width
player = states[m]
# 5 horizontal
if (w in range(width - n + 1) and
len(set(states.get(i, -1) for i in range(m, m + n))) == 1):
return True, player
# 5 vertical
if (h in range(height - n + 1) and
len(set(states.get(i, -1) for i in range(m, m + n * width, width))) == 1):
return True, player
# 5 left obliquely
if (w in range(width - n + 1) and h in range(height - n + 1) and
len(set(states.get(i, -1) for i in range(m, m + n * (width + 1), width + 1))) == 1):
return True, player
# 5 right obliquely
if (w in range(n - 1, width) and h in range(height - n + 1) and
len(set(states.get(i, -1) for i in range(m, m + n * (width - 1), width - 1))) == 1):
return True, player
return False, -1
def game_end(self):
win, winner = self.has_a_winner()
if win:
return True, winner
elif not len(self.availables):
return True, -1
return False, -1
def get_current_player(self):
return self.current_player
N = 15
IMAGE_PATH = 'UI/'
WIDTH = 540
HEIGHT = 540
MARGIN = 22
GRID = (WIDTH - 2 * MARGIN) / (N - 1)
PIECE = 32
class Game_UI(object):
def __init__(self, board, is_shown, **kwargs):
self.board = board
self.is_shown = is_shown
pygame.init()
if is_shown != 0:
self.__screen = pygame.display.set_mode((WIDTH, HEIGHT), 0, 32)
pygame.display.set_caption('Gomoku AI')
# UI resource
self.__ui_chessboard = pygame.image.load(IMAGE_PATH + 'chessboard.jpg').convert()
self.__ui_piece_black = pygame.image.load(IMAGE_PATH + 'piece_black.png').convert_alpha()
self.__ui_piece_white = pygame.image.load(IMAGE_PATH + 'piece_white.png').convert_alpha()
def coordinate_transform_map2pixel(self, i, j):
'''
Convert an index to coordinates
:param i:
:param j:
:return:
'''
return MARGIN + j * GRID - PIECE / 2, MARGIN + i * GRID - PIECE / 2
def coordinate_transform_pixel2map(self, x, y):
'''
Convert coordinates to index
:param x:
:param y:
:return:
'''
i, j = int(round((y - MARGIN + PIECE / 2) / GRID)), int(round((x - MARGIN + PIECE / 2) / GRID))
if i < 0 or i >= N or j < 0 or j >= N:
return None, None
else:
return i, j
def draw_chess(self):
self.__screen.blit(self.__ui_chessboard, (0, 0))
for i in range(0, N):
for j in range(0, N):
# Calculate the movement position
loc = i * N + j
p = self.board.states.get(loc, -1)
player1, player2 = self.board.players
# Find the coordinates of (i,j)
x, y = self.coordinate_transform_map2pixel(i, j)
if p == player1: # player1 ==> black
self.__screen.blit(self.__ui_piece_black, (x, y))
elif p == player2: # player2 ==> white
self.__screen.blit(self.__ui_piece_white, (x, y))
else:
pass
def one_step(self):
i, j = None, None
mouse_button = pygame.mouse.get_pressed()
if mouse_button[0]:
x, y = pygame.mouse.get_pos()
i, j = self.coordinate_transform_pixel2map(x, y)
if not i is None and not j is None:
loc = i * N + j
p = self.board.states.get(loc, -1)
player1, player2 = self.board.players
if p == player1 or p == player2:
return False
else:
cp = self.board.current_player
location = [i, j]
move = self.board.location_to_move(location)
self.board.do_move(move)
if self.is_shown:
if cp == player1:
self.__screen.blit(self.__ui_piece_black, (x, y))
else:
self.__screen.blit(self.__ui_piece_white, (x, y))
return True
return False
def draw_result(self, result):
font = pygame.font.Font(FONT_PATH, 50)
tips = u"Game Over:"
player1, player2 = self.board.players
if result == player1:
tips = tips + u"Player 1 wins"
elif result == player2:
tips = tips + u"Player 2 wins"
else:
tips = tips + u"Tie"
text = font.render(tips, True, (255, 0, 0))
self.__screen.blit(text, (WIDTH / 2 - 200, HEIGHT / 2 - 50))
def start_play_mouse(self, player1, player2, start_player=0):
if start_player not in (0, 1):
raise Exception('Must be 0 (player 1) or 1 (player 2)')
self.board.init_board(start_player)
p1, p2 = self.board.players
player1.set_player_ind(p1)
player2.set_player_ind(p2)
players = {p1: player1, p2: player2}
if start_player != 0:
current_player = self.board.current_player
player_in_turn = players[current_player]
move = player_in_turn.get_action(self.board) # MCTS AI
self.board.do_move(move)
if self.is_shown:
self.draw_chess()
pygame.display.update()
flag = False
win = None
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit()
elif event.type == MOUSEBUTTONDOWN:
if self.one_step():
end, winner = self.board.game_end()
else:
continue
if end:
flag = True
win = winner
break
current_player = self.board.current_player
player_in_turn = players[current_player]
move = player_in_turn.get_action(self.board)
self.board.do_move(move)
if self.is_shown:
self.draw_chess()
pygame.display.update()
end, winner = self.board.game_end()
if end:
flag = True
win = winner
break
if flag and self.is_shown:
self.draw_result(win)
pygame.display.update()
break
def start_play(self, player1, player2, start_player=0):
if start_player not in (0, 1):
raise Exception('Must be 0 (player 1) or 1 (player 2)')
self.board.init_board(start_player)
p1, p2 = self.board.players
player1.set_player_ind(p1)
player2.set_player_ind(p2)
players = {p1: player1, p2: player2}
if self.is_shown:
self.draw_chess()
pygame.display.update()
while True:
if self.is_shown:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit()
current_player = self.board.current_player
player_in_turn = players[current_player]
move = player_in_turn.get_action(self.board) # MCTS AI
self.board.do_move(move)
if self.is_shown:
self.draw_chess()
pygame.display.update()
end, winner = self.board.game_end()
if end:
win = winner
break
if self.is_shown:
self.draw_result(win)
pygame.display.update()
return win
def start_self_play(self, player, temp=1e-3):
"""
Use players to start playing games themselves, re-use the search tree and store your own game data
(state, mcts_probs, z) provide training
"""
self.board.init_board()
states, mcts_probs, current_players = [], [], []
if self.is_shown:
self.draw_chess()
pygame.display.update()
while True:
if self.is_shown:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit()
move, move_probs = player.get_action(self.board,
temp=temp,
return_prob=1)
states.append(self.board.current_state())
mcts_probs.append(move_probs)
current_players.append(self.board.current_player)
self.board.do_move(move)
if self.is_shown:
self.draw_chess()
pygame.display.update()
end, winner = self.board.game_end()
if end:
winners_z = np.zeros(len(current_players))
if winner != -1:
winners_z[np.array(current_players) == winner] = 1.0
winners_z[np.array(current_players) != winner] = -1.0
player.reset_player()
if self.is_shown:
self.draw_result(winner)
pygame.display.update()
return winner, zip(states, mcts_probs, winners_z)
| 36.634021
| 208
| 0.535247
|
63122e859fa8b171327b0b59ef115367628491dc
| 6,557
|
py
|
Python
|
python/ray/tune/tests/test_tune_save_restore.py
|
firebolt55439/ray
|
215300b070628c06f0106906fc6c03bd70ebf140
|
[
"Apache-2.0"
] | 3
|
2020-12-03T17:48:45.000Z
|
2022-01-22T08:09:46.000Z
|
python/ray/tune/tests/test_tune_save_restore.py
|
firebolt55439/ray
|
215300b070628c06f0106906fc6c03bd70ebf140
|
[
"Apache-2.0"
] | 72
|
2021-02-06T08:07:16.000Z
|
2022-03-26T07:17:49.000Z
|
python/ray/tune/tests/test_tune_save_restore.py
|
firebolt55439/ray
|
215300b070628c06f0106906fc6c03bd70ebf140
|
[
"Apache-2.0"
] | 2
|
2020-05-22T15:36:27.000Z
|
2020-05-22T15:52:03.000Z
|
# coding: utf-8
import os
import pickle
import shutil
import tempfile
import unittest
import ray
from ray import tune
from ray.rllib import _register_all
from ray.tune import Trainable
from ray.tune.utils import validate_save_restore
class SerialTuneRelativeLocalDirTest(unittest.TestCase):
local_mode = True
prefix = "Serial"
class MockTrainable(Trainable):
_name = "MockTrainable"
def setup(self, config):
self.state = {"hi": 1}
def step(self):
return {"timesteps_this_iter": 1, "done": True}
def save_checkpoint(self, checkpoint_dir):
checkpoint_path = os.path.join(
checkpoint_dir, "checkpoint-{}".format(self._iteration))
with open(checkpoint_path, "wb") as f:
pickle.dump(self.state, f)
return checkpoint_path
def load_checkpoint(self, checkpoint_path):
with open(checkpoint_path, "rb") as f:
extra_data = pickle.load(f)
self.state.update(extra_data)
def setUp(self):
self.absolute_local_dir = None
ray.init(num_cpus=1, num_gpus=0, local_mode=self.local_mode)
def tearDown(self):
if self.absolute_local_dir is not None:
shutil.rmtree(self.absolute_local_dir, ignore_errors=True)
self.absolute_local_dir = None
ray.shutdown()
# Without this line, test_tune_server.testAddTrial would fail.
_register_all()
def _get_trial_dir(self, absoulte_exp_dir):
print("looking for", self.MockTrainable._name)
print("in", os.listdir(absoulte_exp_dir))
trial_dirname = next(
(child_dir for child_dir in os.listdir(absoulte_exp_dir)
if (os.path.isdir(os.path.join(absoulte_exp_dir, child_dir))
and child_dir.startswith(self.MockTrainable._name))))
trial_absolute_dir = os.path.join(absoulte_exp_dir, trial_dirname)
return trial_dirname, trial_absolute_dir
def _train(self, exp_name, local_dir, absolute_local_dir):
trial, = tune.run(
self.MockTrainable,
name=exp_name,
stop={
"training_iteration": 1
},
checkpoint_freq=1,
local_dir=local_dir,
config={
"env": "CartPole-v0",
"log_level": "DEBUG"
}).trials
exp_dir = os.path.join(absolute_local_dir, exp_name)
_, abs_trial_dir = self._get_trial_dir(exp_dir)
self.assertIsNone(trial.error_file)
self.assertEqual(trial.local_dir, exp_dir)
self.assertEqual(trial.logdir, abs_trial_dir)
self.assertTrue(os.path.isdir(absolute_local_dir), absolute_local_dir)
self.assertTrue(os.path.isdir(exp_dir))
self.assertTrue(os.path.isdir(abs_trial_dir))
self.assertTrue(
os.path.isfile(
os.path.join(abs_trial_dir, "checkpoint_1/checkpoint-1")))
def _restore(self, exp_name, local_dir, absolute_local_dir):
trial_name, abs_trial_dir = self._get_trial_dir(
os.path.join(absolute_local_dir, exp_name))
checkpoint_path = os.path.join(
local_dir, exp_name, trial_name,
"checkpoint_1/checkpoint-1") # Relative checkpoint path
# The file tune would find. The absolute checkpoint path.
tune_find_file = os.path.abspath(os.path.expanduser(checkpoint_path))
self.assertTrue(
os.path.isfile(tune_find_file),
"{} is not exist!".format(tune_find_file))
trial, = tune.run(
self.MockTrainable,
name=exp_name,
stop={
"training_iteration": 2
}, # train one more iteration.
restore=checkpoint_path, # Restore the checkpoint
config={
"env": "CartPole-v0",
"log_level": "DEBUG"
}).trials
self.assertIsNone(trial.error_file)
def testDottedRelativePath(self):
local_dir = "./test_dotted_relative_local_dir"
exp_name = self.prefix + "DottedRelativeLocalDir"
absolute_local_dir = os.path.abspath(local_dir)
self.absolute_local_dir = absolute_local_dir
self.assertFalse(os.path.exists(absolute_local_dir))
self._train(exp_name, local_dir, absolute_local_dir)
self._restore(exp_name, local_dir, absolute_local_dir)
def testRelativePath(self):
local_dir = "test_relative_local_dir"
exp_name = self.prefix + "RelativePath"
absolute_local_dir = os.path.abspath(local_dir)
self.absolute_local_dir = absolute_local_dir
self.assertFalse(os.path.exists(absolute_local_dir))
self._train(exp_name, local_dir, absolute_local_dir)
self._restore(exp_name, local_dir, absolute_local_dir)
def testTildeAbsolutePath(self):
local_dir = "~/test_tilde_absolute_local_dir"
exp_name = self.prefix + "TildeAbsolutePath"
absolute_local_dir = os.path.abspath(os.path.expanduser(local_dir))
self.absolute_local_dir = absolute_local_dir
self.assertFalse(os.path.exists(absolute_local_dir))
self._train(exp_name, local_dir, absolute_local_dir)
self._restore(exp_name, local_dir, absolute_local_dir)
def testTempfile(self):
local_dir = tempfile.mkdtemp()
exp_name = self.prefix + "Tempfile"
self.absolute_local_dir = local_dir
self._train(exp_name, local_dir, local_dir)
self._restore(exp_name, local_dir, local_dir)
def testCheckpointWithNoop(self):
"""Tests that passing the checkpoint_dir right back works."""
class MockTrainable(Trainable):
def setup(self, config):
pass
def step(self):
return {"score": 1}
def save_checkpoint(self, checkpoint_dir):
with open(os.path.join(checkpoint_dir, "test.txt"), "wb") as f:
pickle.dump("test", f)
return checkpoint_dir
def load_checkpoint(self, checkpoint_dir):
with open(os.path.join(checkpoint_dir, "test.txt"), "rb") as f:
x = pickle.load(f)
assert x == "test"
return checkpoint_dir
validate_save_restore(MockTrainable)
validate_save_restore(MockTrainable, use_object_store=True)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| 35.830601
| 79
| 0.635504
|
42b982662a9f31ecadaec9453c458f4844d0a4d5
| 10,944
|
py
|
Python
|
modules/8.Cal_gdp_benefit_ctry_Dell.py
|
YixuanZheng/Aerosol_Inequality_2019
|
029b198311f192dbb98b96053ce0fbc55a3ab392
|
[
"MIT"
] | 3
|
2020-02-19T12:27:38.000Z
|
2020-03-11T12:19:47.000Z
|
modules/8.Cal_gdp_benefit_ctry_Dell.py
|
YixuanZheng/Aerosol_Inequality_2019
|
029b198311f192dbb98b96053ce0fbc55a3ab392
|
[
"MIT"
] | null | null | null |
modules/8.Cal_gdp_benefit_ctry_Dell.py
|
YixuanZheng/Aerosol_Inequality_2019
|
029b198311f192dbb98b96053ce0fbc55a3ab392
|
[
"MIT"
] | 2
|
2020-04-19T22:19:10.000Z
|
2021-03-04T15:04:28.000Z
|
# -*- coding: utf-8 -*-
'''
This code calculates impacts of temperature changes induced by aerosols on GDP
apply the Dell et al. damage function
distribution of Dell et al. parameter was sampled (1000 times) based on the provided median and standard error
by Yixuan Zheng (yxzheng@carnegiescience.edu)
'''
from netCDF4 import Dataset
import pandas as pd
import numpy as np
import _env
import datetime
import xarray as xr
nens = _env.nens
datasets = _env.datasets
year = _env.year
syr = str(year)
gdp_year = year
sgdp_year = str(gdp_year)
par = 'TREFHT'
ds = 'ERA-Interim'
p_scen = 'No-Aerosol'
if_temp = _env.odir_root + '/sim_temperature/Simulated_Global_and_Country_' + par + '_20yravg.nc'
if_ctry_list = _env.idir_root + '/regioncode/Country_List.xls'
if_ctry_pr = _env.idir_root + '/historical_stat/Ctry_Poor_Rich_from_Burke.csv' #adopt country list from Burke et al. 2018
if_ctry_gdpcap = _env.idir_root + '/historical_stat/' + '/API_NY.GDP.PCAP.KD_DS2_en_csv_v2.csv'
if_ctry_pop = _env.idir_root + '/historical_stat/' + '/API_SP.POP.TOTL_DS2_en_csv_v2.csv'
odir_gdp = _env.odir_root + '/gdp_' + ds + '/'
_env.mkdirs(odir_gdp)
#climatological temperature from three datasets
if_clim_temp = _env.odir_root + 'sim_temperature/Climatological_Temp_Ctry_3ds.csv'
itbl_clim_temp = pd.read_csv(if_clim_temp,index_col = 0)[['iso',ds]]
#country list
itbl_ctry_info = pd.read_csv(_env.odir_root + '/basic_stats/' + 'Country_Basic_Stats.csv')
#read global and country-level temperature
T_glob = Dataset(if_temp)['TREFHT_Global'][:,[0,1]]
T_ctry_full = Dataset(if_temp)['TREFHT_Country'][:,:,[0,1]]
#extract temperature for analyzed countries
T_ctry = T_ctry_full[((itbl_ctry_info['ind_in_full_list'].astype(int)).tolist()),:,:]
T_diff = T_ctry[:,:,1]-T_ctry[:,:,0]
T_ctry[:,:,0] = np.repeat(np.array(itbl_clim_temp[ds].values)[:,np.newaxis],8,axis=1)
T_ctry[:,:,1] = T_ctry[:,:,0] + T_diff
####country-level changes in GDP/cap growth rate####
########
# the net effect of a 1◦ C rise in temperature is to decrease growth rates in poor countries by −1.394 percentage points. (Dell,Jones, and Olken, 2012) Table 2
#median = -1.394
#standard error=0.408
if_gen_pars = 0
n_boot_sample = 1000
def cal_theta(theta,se_theta):
return np.random.normal(loc=theta,scale=se_theta,size=n_boot_sample)
if if_gen_pars:
#generate 1000 sets of parameters for the selected damage function
djo_pars = cal_theta(-1.394,0.408)/100
_env.mkdirs(_env.idir_root + '/Dell_parameters/')
xr.Dataset({'djo_pars' : xr.DataArray(djo_pars,dims = ['boots'])}).to_netcdf(_env.idir_root + '/Dell_parameters/' + '/DJO_parameters.nc')
else:
djo_pars = xr.open_dataset(_env.idir_root + '/Dell_parameters/' + '/DJO_parameters.nc')['djo_pars'].values
n_ctry = len(itbl_ctry_info.index)
ifs_rich = 1-itbl_ctry_info['poor']
poor_ind = np.where(ifs_rich == 0)[0]
diff_gr = np.zeros([n_boot_sample, np.shape(T_ctry)[0],np.shape(T_ctry)[1]])
diff_gr[:,poor_ind,:] = np.einsum('i,jk->ijk',djo_pars, np.squeeze(T_ctry[poor_ind,:,1]-T_ctry[poor_ind,:,0])) #*(0.2609434-1.655145)/100 #no-aerosol minus with-aerosol
diff_gdp = np.einsum('ijk,j->ijk',diff_gr,itbl_ctry_info[str(gdp_year) + '_gdp'])
_env.rmfile(odir_gdp + 'GDP_Changes_' + 'Dell_' + str(gdp_year) + '_' + ds + '_' + p_scen + '.nc')
onc = Dataset(odir_gdp + 'GDP_Changes_' + 'Dell_' + str(gdp_year) + '_' + ds + '_' + p_scen + '.nc', 'w', format='NETCDF4')
d_ctry = onc.createDimension('boots',n_boot_sample)
d_ctry = onc.createDimension('countries',n_ctry)
d_ens = onc.createDimension('ensembles',nens)
v_ratio = onc.createVariable('GDP_Ratio','f4',('boots','countries','ensembles'))
v_ratio.desc = 'Impacts of aerosol-induced cooling on annual GDP growth rate'
v_ratio[:] = diff_gr
v_gdp = onc.createVariable('GDP','f4',('boots','countries','ensembles'))
v_gdp.desc = 'Impacts of aerosol-induced cooling on country-level annual GDP'
v_gdp[:] = diff_gdp
#write global attribute
onc.by = 'Yixuan Zheng (yxzheng@carnegiescience.edu)'
onc.desc = 'Impacts of aerosol-induced cooling on annual GDP and GDP growth rate (based on damage functions by Pretis et al. 2018)'
onc.creattime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
onc.close()
####summarize global and regional GDP changes####
itbl_gdp_baseline = itbl_ctry_info.copy()
odir_summary = _env.odir_root + 'summary_' + ds
_env.mkdirs(odir_summary)
writer = pd.ExcelWriter(odir_summary + '/country_specific_statistics_GDP_'+ds+'_'+p_scen+'_Dell.xls')
otbls_ctry_GDP_stat = {}
gdp_tot = itbl_gdp_baseline[sgdp_year + '_gdp'].sum()
spe = 'Dell'
otbl_median = pd.DataFrame(index=[spe],columns = ['median','median_ratio','5','5_ratio','95','95_ratio','10','10_ratio','90','90_ratio','prob_benefit'])
imtrx_gdp = diff_gdp.copy()
##global total
imtrx_gdp_glob = (imtrx_gdp).sum(axis=1)
otbl_median.loc[spe] = np.median(imtrx_gdp_glob)/1e9,np.median(imtrx_gdp_glob)/gdp_tot*100,np.percentile(imtrx_gdp_glob,95)/1e9,np.percentile(imtrx_gdp_glob,95)/gdp_tot*100,np.percentile(imtrx_gdp_glob,5)/1e9,np.percentile(imtrx_gdp_glob,5)/gdp_tot*100, np.percentile(imtrx_gdp_glob,90)/1e9,np.percentile(imtrx_gdp_glob,90)/gdp_tot*100,np.percentile(imtrx_gdp_glob,10)/1e9,np.percentile(imtrx_gdp_glob,10)/gdp_tot*100,len(np.where(imtrx_gdp_glob<0)[0])/np.size(imtrx_gdp_glob)
otbl_ctry_GDP_stat = itbl_gdp_baseline.copy()
otbl_ctry_GDP_stat['GDP_mean_benefit'] = np.zeros(len(otbl_ctry_GDP_stat.index))
otbl_ctry_GDP_stat['GDP_median_benefit'] = np.zeros(len(otbl_ctry_GDP_stat.index))
otbl_ctry_GDP_stat['GDP_mean_benefit_ratio'] = np.zeros(len(otbl_ctry_GDP_stat.index))
otbl_ctry_GDP_stat['GDP_median_benefit_ratio'] = np.zeros(len(otbl_ctry_GDP_stat.index))
otbl_ctry_GDP_stat['GDP_90_benefit'] = np.zeros(len(otbl_ctry_GDP_stat.index))
otbl_ctry_GDP_stat['GDP_10_benefit'] = np.zeros(len(otbl_ctry_GDP_stat.index))
otbl_ctry_GDP_stat['GDP_95_benefit'] = np.zeros(len(otbl_ctry_GDP_stat.index))
otbl_ctry_GDP_stat['GDP_5_benefit'] = np.zeros(len(otbl_ctry_GDP_stat.index))
otbl_ctry_GDP_stat['probability_damage'] = np.zeros(len(otbl_ctry_GDP_stat.index)) #add by yz 20190719
for ictry,ctry in enumerate(itbl_ctry_info.index):
imtrx_country = (imtrx_gdp)[:,ictry,:]
otbl_ctry_GDP_stat.loc[ctry,'GDP_mean_benefit'] = -np.mean(imtrx_country)
otbl_ctry_GDP_stat.loc[ctry,'GDP_median_benefit'] = -np.median(imtrx_country)
otbl_ctry_GDP_stat.loc[ctry,'GDP_90_benefit'] = -np.percentile(imtrx_country,90)
otbl_ctry_GDP_stat.loc[ctry,'GDP_10_benefit'] = -np.percentile(imtrx_country,10)
otbl_ctry_GDP_stat.loc[ctry,'GDP_95_benefit'] = -np.percentile(imtrx_country,95)
otbl_ctry_GDP_stat.loc[ctry,'GDP_5_benefit'] = -np.percentile(imtrx_country,5)
otbl_ctry_GDP_stat.loc[ctry,'probability_damage'] = len(imtrx_country[imtrx_country>0])/np.size(imtrx_country)
otbl_ctry_GDP_stat['GDP_mean_benefit_ratio'] = otbl_ctry_GDP_stat['GDP_mean_benefit']/otbl_ctry_GDP_stat[sgdp_year+'_gdp']*100
otbl_ctry_GDP_stat['GDP_median_benefit_ratio'] = otbl_ctry_GDP_stat['GDP_median_benefit']/otbl_ctry_GDP_stat[sgdp_year+'_gdp']*100
otbl_ctry_GDP_stat.to_excel(writer,spe)
otbls_ctry_GDP_stat[spe] = otbl_ctry_GDP_stat.copy()
otbl_median = -otbl_median
otbl_median.to_excel(writer,'median_summary')
writer.save()
#==================changes in 90:10 and 80:20 ratio (inequality)===========================
itbl_gdp_baseline.sort_values([sgdp_year + '_gdpcap'],inplace=True)
tot_pop = itbl_gdp_baseline[sgdp_year + '_pop'].sum()
itbl_gdp_baseline[sgdp_year + '_gdpsum'] = 0
itbl_gdp_baseline[sgdp_year + '_popsum'] = 0
for irow, row in enumerate(itbl_gdp_baseline.index):
if irow == 0:
itbl_gdp_baseline.loc[row,sgdp_year + '_gdpsum'] = itbl_gdp_baseline.loc[row,sgdp_year + '_gdp']
itbl_gdp_baseline.loc[row, sgdp_year + '_popsum'] = itbl_gdp_baseline.loc[row,sgdp_year + '_pop']
else:
itbl_gdp_baseline.loc[row,sgdp_year + '_gdpsum'] = itbl_gdp_baseline[sgdp_year + '_gdpsum'].iloc[irow-1] + itbl_gdp_baseline.loc[row,sgdp_year + '_gdp']
itbl_gdp_baseline.loc[row, sgdp_year + '_popsum'] = itbl_gdp_baseline[sgdp_year + '_popsum'].iloc[irow-1] + itbl_gdp_baseline.loc[row,sgdp_year + '_pop']
itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum'] = itbl_gdp_baseline[sgdp_year + '_popsum']/tot_pop
#deciles (<=10% and >=90%)
deciles = {}
ind10 = np.where(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum']<=0.1)[0]
deciles[10] = itbl_gdp_baseline.iloc[ind10].copy()
ind90 = np.where(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum']>=0.9)[0]
deciles[90] = itbl_gdp_baseline.iloc[ind90].copy()
#quintiles (<=20% and >=80%)
ind20 = np.where(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum']<=0.2)[0]
deciles[20] = itbl_gdp_baseline.iloc[ind20].copy()
ind80 = np.where(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum']>=0.8)[0]
deciles[80] = itbl_gdp_baseline.iloc[ind80].copy()
writer = pd.ExcelWriter(odir_summary + '/Deciles_and_Quintile_ratio_changes_'+ds+'_'+p_scen+'_Dell.xls')
otbls = {}
otbl_ineq = pd.DataFrame(index=[spe],columns = ['median_ratio','5_ratio','95_ratio','10_ratio','90_ratio','probability_reduced'])
otbls['deciles'] = otbl_ineq.copy()
otbls['quintiles'] = otbl_ineq.copy()
omtrx_gdp_spe = diff_gdp.copy()
dec_var = {}
dec_base = {}
for perc in [10,20,80,90]:
dec = deciles[perc].copy()
dec_pop_tot = dec[sgdp_year + '_pop'].sum()
dec_gdp_tot = dec[sgdp_year + '_gdp'].sum()
dec_base[perc] = dec_gdp_tot/dec_pop_tot
ind_ctry = dec.index
imtrx_dec = omtrx_gdp_spe[:,ind_ctry,:]
imtrx_dec_sum = dec_gdp_tot-(imtrx_dec).sum(axis=1) #+ dec_gdp_tot
dec_gdpcap = imtrx_dec_sum/dec_pop_tot
dec_var[perc] = dec_gdpcap.copy()
dec_diff = (dec_var[90]/dec_var[10]-dec_base[90]/dec_base[10])/(dec_base[90]/dec_base[10])*100
quin_diff = (dec_var[80]/dec_var[20] - dec_base[80]/dec_base[20])/(dec_base[80]/dec_base[20])*100
otbls['deciles'].loc[spe,'median_ratio'] = np.median(dec_diff)
otbls['deciles'].loc[spe,'5_ratio'] = np.percentile(dec_diff,5)
otbls['deciles'].loc[spe,'95_ratio'] = np.percentile(dec_diff,95)
otbls['deciles'].loc[spe,'10_ratio'] = np.percentile(dec_diff,10)
otbls['deciles'].loc[spe,'90_ratio'] = np.percentile(dec_diff,90)
otbls['deciles'].loc[spe,'probability_reduced'] = len(dec_diff[dec_diff<0])/np.size(dec_diff)
otbls['quintiles'].loc[spe,'median_ratio'] = np.median(quin_diff)
otbls['quintiles'].loc[spe,'5_ratio'] = np.percentile(quin_diff,5)
otbls['quintiles'].loc[spe,'95_ratio'] = np.percentile(quin_diff,95)
otbls['quintiles'].loc[spe,'10_ratio'] = np.percentile(quin_diff,10)
otbls['quintiles'].loc[spe,'90_ratio'] = np.percentile(quin_diff,90)
otbls['quintiles'].loc[spe,'probability_reduced'] = len(quin_diff[quin_diff<0])/np.size(quin_diff)
otbls['deciles'].to_excel(writer,'deciles')
otbls['quintiles'].to_excel(writer,'quintiles')
writer.save()
| 41.142857
| 479
| 0.734192
|
7aec7c228e6fcaa0f832153274461daa2042b997
| 11,934
|
py
|
Python
|
mtp_noms_ops/apps/security/views/check.py
|
uk-gov-mirror/ministryofjustice.money-to-prisoners-noms-ops
|
eb537fb8a8e3adc588d50af1b000402c957b32a7
|
[
"MIT"
] | null | null | null |
mtp_noms_ops/apps/security/views/check.py
|
uk-gov-mirror/ministryofjustice.money-to-prisoners-noms-ops
|
eb537fb8a8e3adc588d50af1b000402c957b32a7
|
[
"MIT"
] | null | null | null |
mtp_noms_ops/apps/security/views/check.py
|
uk-gov-mirror/ministryofjustice.money-to-prisoners-noms-ops
|
eb537fb8a8e3adc588d50af1b000402c957b32a7
|
[
"MIT"
] | null | null | null |
from typing import Optional
from django.contrib import messages
from django.http import Http404, HttpResponseRedirect
from django.urls import reverse, reverse_lazy
from django.utils.translation import gettext_lazy
from django.views.generic.edit import BaseFormView, FormView
from mtp_common.api import retrieve_all_pages_for_path
from security.forms.check import (
AutoAcceptDetailForm,
AutoAcceptListForm,
AcceptOrRejectCheckForm,
CheckListForm,
CreditsHistoryListForm,
AssignCheckToUserForm,
UserCheckListForm
)
from security.utils import convert_date_fields, get_abbreviated_cardholder_names
from security.views.object_base import SecurityView, SimpleSecurityDetailView
class CheckListView(SecurityView):
"""
View returning the checks in 'To action' (pending) status.
"""
title = gettext_lazy('Credits to action')
template_name = 'security/checks_list.html'
form_class = CheckListForm
class MyListCheckView(SecurityView):
"""
View returning the checks in 'To action' (pending) status assigned to current user
"""
title = gettext_lazy('My list')
template_name = 'security/checks_list.html'
form_class = UserCheckListForm
class CreditsHistoryListView(SecurityView):
"""
View history of all accepted and rejected credits.
"""
title = gettext_lazy('Decision history')
template_name = 'security/credits_history_list.html'
form_class = CreditsHistoryListForm
class AutoAcceptRuleListView(SecurityView):
"""
View history of all auto-accept rules
"""
title = gettext_lazy('Auto accepts')
template_name = 'security/auto_accept_rule_list.html'
form_class = AutoAcceptListForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['request_params'] = self.request.GET.dict()
return context
class AutoAcceptRuleDetailView(SimpleSecurityDetailView, FormView):
"""
View history of all auto-accept rules
"""
list_title = gettext_lazy('Auto accepts')
template_name = 'security/auto_accept_rule.html'
object_context_key = 'auto_accept_rule'
id_kwarg_name = 'auto_accept_rule_id'
list_url = reverse_lazy('security:auto_accept_rule_list')
success_url = reverse_lazy('security:auto_accept_rule_list')
form_class = AutoAcceptDetailForm
def get_form_kwargs(self):
return dict(super().get_form_kwargs(), request=self.request, object_id=self.kwargs[self.id_kwarg_name])
def get_object_request_params(self):
return {
'url': f'/security/checks/auto-accept/{self.kwargs[self.id_kwarg_name]}/'
}
def get_title_for_object(self, detail_object):
return '{} {} {} {}'.format(
gettext_lazy('Review auto accept of credits from'),
get_abbreviated_cardholder_names(detail_object['debit_card_sender_details']['cardholder_names']),
gettext_lazy('to'),
detail_object['prisoner_profile']['prisoner_name']
)
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
if not self.object:
# raise a generic error to display standard 500 page if auto-accept rule failed to load for some reason
raise ValueError('Could not load auto-accept rule')
self.title = self.get_title_for_object(self.object)
context_data['auto_accept_rule_is_active'] = sorted(
self.object['states'],
key=lambda s: s['created'],
reverse=True
)[0]['active']
# These must be called again even though called from base class,
# as they rely on self.title being populated, which in this case
# requires the detail_object
list_url = self.get_list_url()
context_data['breadcrumbs'] = self.get_breadcrumbs(list_url)
return context_data
def get_breadcrumbs(self, list_url):
return [
{'name': gettext_lazy('Home'), 'url': reverse('security:dashboard')},
{'name': self.list_title, 'url': list_url},
{'name': gettext_lazy('Review')},
]
def get_object_for_template(self, obj):
return convert_date_fields(obj, include_nested=True)
def form_valid(self, form):
result = form.deactivate_auto_accept_rule()
if not result:
return self.form_invalid(form)
messages.add_message(
self.request,
messages.INFO,
gettext_lazy('The auto accept was stopped'),
)
return super().form_valid(form)
class CheckAssignView(BaseFormView):
"""
Modify assignment of check
"""
form_class = AssignCheckToUserForm
redirect_to_list = False
id_kwarg_name = 'check_id'
page_kwarg_name = 'page'
def get_success_url(self):
check_id = self.kwargs[self.id_kwarg_name]
if self.redirect_to_list:
page = self.kwargs[self.page_kwarg_name]
page_params = f'?page={page}#check-row-{check_id}'
return reverse('security:check_list') + page_params
else:
return reverse('security:resolve_check', kwargs={'check_id': check_id})
def get_form_kwargs(self):
check_id = self.kwargs[self.id_kwarg_name]
form_kwargs = super().get_form_kwargs()
form_kwargs.update(
request=self.request,
object_id=check_id,
)
return form_kwargs
def get(self, request, *args, **kwargs):
return HttpResponseRedirect(self.get_success_url())
def form_valid(self, form):
check_id = self.kwargs[self.id_kwarg_name]
result = form.assign_or_unassign()
if not result:
if self.redirect_to_list:
return HttpResponseRedirect(reverse('security:check_list'))
else:
return HttpResponseRedirect(
reverse('security:resolve_check', kwargs={'check_id': check_id})
)
return super().form_valid(form)
class AcceptOrRejectCheckView(FormView):
"""
View rejecting a check in 'to action' (pending) status.
"""
object_list_context_key = 'checks'
title = gettext_lazy('Review credit')
list_title = gettext_lazy('Credits to action')
id_kwarg_name = 'check_id'
object_context_key = 'check'
list_url = reverse_lazy('security:check_list')
template_name = 'security/accept_or_reject_check.html'
form_class = AcceptOrRejectCheckForm
def get_form_kwargs(self):
form_kwargs = super().get_form_kwargs()
form_kwargs.update(
{
'request': self.request,
'object_id': self.kwargs[self.id_kwarg_name],
},
)
return form_kwargs
@staticmethod
def get_latest_auto_accept_state(auto_accept_rule):
return sorted(
auto_accept_rule['states'], key=lambda x: x['created']
)[-1]
def get_unbound_active_auto_accept_state(
self, api_session, debit_card_sender_details_id: int, prisoner_profile_id: int
) -> Optional[dict]:
query_existing_auto_accept_rule = api_session.get(
'/security/checks/auto-accept',
params={
'prisoner_profile_id': prisoner_profile_id,
'debit_card_sender_details_id': debit_card_sender_details_id
}
)
payload = query_existing_auto_accept_rule.json().get('results')
if len(payload) == 1 and self.get_latest_auto_accept_state(payload[0]).get('active'):
return convert_date_fields(self.get_latest_auto_accept_state(payload[0]))
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
detail_object = context_data['form'].get_object()
if not detail_object:
raise Http404('Credit to check not found')
api_session = context_data['form'].session
context_data['unbound_active_auto_accept_state'] = self.get_unbound_active_auto_accept_state(
api_session,
detail_object['credit']['billing_address'][
'debit_card_sender_details'
],
detail_object['credit']['prisoner_profile'],
)
# keep query string in breadcrumbs
list_url = self.request.build_absolute_uri(str(self.list_url))
referrer_url = self.request.META.get('HTTP_REFERER', '-')
if referrer_url.split('?', 1)[0] == list_url:
list_url = referrer_url
context_data['breadcrumbs'] = [
{'name': gettext_lazy('Home'), 'url': reverse('security:dashboard')},
{'name': self.list_title, 'url': list_url},
{'name': self.title}
]
context_data[self.object_context_key] = detail_object
context_data['related_credits'] = self._get_related_credits(api_session, context_data[self.object_context_key])
return context_data
@staticmethod
def _get_related_credits(api_session, detail_object):
# Get the credits from the same sender that were actioned by FIU
if detail_object['credit']['sender_profile']:
sender_response = retrieve_all_pages_for_path(
api_session,
f'/senders/{detail_object["credit"]["sender_profile"]}/credits/',
**{
'exclude_credit__in': detail_object['credit']['id'],
'security_check__isnull': False,
'only_completed': False,
'security_check__actioned_by__isnull': False,
'include_checks': True
}
)
else:
sender_response = []
sender_credits = convert_date_fields(sender_response, include_nested=True)
# Get the credits to the same prisoner that were actioned by FIU
if detail_object['credit']['prisoner_profile']:
prisoner_response = retrieve_all_pages_for_path(
api_session,
f'/prisoners/{detail_object["credit"]["prisoner_profile"]}/credits/',
**{
# Exclude any credits displayed as part of sender credits, to prevent duplication where
# both prisoner and sender same as the credit in question
'exclude_credit__in': ','.join(
[str(detail_object['credit']['id'])] + [str(c['id']) for c in sender_credits]
),
'security_check__isnull': False,
'only_completed': False,
'security_check__actioned_by__isnull': False,
'include_checks': True
}
)
else:
prisoner_response = []
prisoner_credits = convert_date_fields(prisoner_response, include_nested=True)
return sorted(
prisoner_credits + sender_credits,
key=lambda c: c['security_check']['actioned_at'],
reverse=True
)
def form_valid(self, form):
if self.request.method == 'POST':
result, additional_info_message = form.accept_or_reject()
if not result:
return self.form_invalid(form)
if additional_info_message:
messages.add_message(
self.request,
messages.INFO,
gettext_lazy(additional_info_message),
)
if form.data['fiu_action'] == 'accept':
ui_message = gettext_lazy('Credit accepted')
else:
ui_message = gettext_lazy('Credit rejected')
messages.add_message(
self.request,
messages.INFO,
gettext_lazy(ui_message),
)
return HttpResponseRedirect(self.list_url)
return super().form_valid(form)
| 36.273556
| 119
| 0.634825
|
3e6ad627ed290f32e2d0953d244b98945050f5d9
| 693
|
py
|
Python
|
afm/flight/auth_handlers.py
|
shlomitk1/arrow-flight-module
|
81377d448af4abfac4038dfd256534c56a7422f9
|
[
"Apache-2.0"
] | 4
|
2020-11-08T04:39:07.000Z
|
2021-05-12T20:44:44.000Z
|
afm/flight/auth_handlers.py
|
shlomitk1/arrow-flight-module
|
81377d448af4abfac4038dfd256534c56a7422f9
|
[
"Apache-2.0"
] | 23
|
2021-09-14T10:09:38.000Z
|
2022-03-31T09:56:12.000Z
|
afm/flight/auth_handlers.py
|
shlomitk1/arrow-flight-module
|
81377d448af4abfac4038dfd256534c56a7422f9
|
[
"Apache-2.0"
] | 6
|
2021-08-02T07:38:49.000Z
|
2022-03-09T11:56:33.000Z
|
#
# Copyright 2020 IBM Corp.
# SPDX-License-Identifier: Apache-2.0
#
from pyarrow import flight
# taken from https://github.com/apache/arrow/blob/master/python/pyarrow/tests/test_flight.py#L450
class HttpBasicClientAuthHandler(flight.ClientAuthHandler):
"""An example implementation of HTTP basic authentication."""
def __init__(self, username, password):
super().__init__()
self.basic_auth = flight.BasicAuth(username, password)
self.token = None
def authenticate(self, outgoing, incoming):
auth = self.basic_auth.serialize()
outgoing.write(auth)
self.token = incoming.read()
def get_token(self):
return self.token
| 28.875
| 97
| 0.701299
|
f384b67d59e186e9423247eeef4b1b87bf214186
| 35,651
|
py
|
Python
|
desktop/libs/hadoop/gen-py/hadoop/api/hdfs/ttypes.py
|
digideskio/hortonworks-sandbox
|
dd8e95c91faee3daa094707baeb94c3953b41efa
|
[
"Apache-2.0"
] | 19
|
2015-05-01T19:59:03.000Z
|
2021-12-09T08:03:16.000Z
|
desktop/libs/hadoop/gen-py/hadoop/api/hdfs/ttypes.py
|
digideskio/hortonworks-sandbox
|
dd8e95c91faee3daa094707baeb94c3953b41efa
|
[
"Apache-2.0"
] | 1
|
2018-01-03T15:26:49.000Z
|
2018-01-03T15:26:49.000Z
|
desktop/libs/hadoop/gen-py/hadoop/api/hdfs/ttypes.py
|
hortonworks/hortonworks-sandbox
|
dd8e95c91faee3daa094707baeb94c3953b41efa
|
[
"Apache-2.0"
] | 30
|
2015-03-25T19:40:07.000Z
|
2021-05-28T22:59:26.000Z
|
#
# Autogenerated by Thrift Compiler (0.7.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
from thrift.Thrift import *
import hadoop.api.common.ttypes
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class DatanodeReportType(object):
ALL_DATANODES = 1
LIVE_DATANODES = 2
DEAD_DATANODES = 3
_VALUES_TO_NAMES = {
1: "ALL_DATANODES",
2: "LIVE_DATANODES",
3: "DEAD_DATANODES",
}
_NAMES_TO_VALUES = {
"ALL_DATANODES": 1,
"LIVE_DATANODES": 2,
"DEAD_DATANODES": 3,
}
class DatanodeState(object):
NORMAL_STATE = 1
DECOMMISSION_INPROGRESS = 2
DECOMMISSIONED = 3
_VALUES_TO_NAMES = {
1: "NORMAL_STATE",
2: "DECOMMISSION_INPROGRESS",
3: "DECOMMISSIONED",
}
_NAMES_TO_VALUES = {
"NORMAL_STATE": 1,
"DECOMMISSION_INPROGRESS": 2,
"DECOMMISSIONED": 3,
}
class DatanodeInfo(object):
"""
Information and state of a data node.
Modelled after org.apache.hadoop.hdfs.protocol.DatanodeInfo
Attributes:
- name: HDFS name of the datanode (equals to <host>:<datanode port>)
- storageID: Unique ID within a HDFS cluster
- host: Host name of the Thrift server socket.
- thriftPort: Port number of the Thrift server socket, or UNKNOWN_THRIFT_PORT
if the Thrift port for this datanode is not known.
- httpPort: Port number of the Web UI
- capacity: Raw capacity of the data node (in bytes).
- dfsUsed: Space used by the data node (in bytes).
- remaining: Raw free space in the data node (in bytes).
- xceiverCount: Number of active connections to the data node.
- state: State of this data node.
- millisSinceUpdate: Number of seconds since last contact
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRING, 'storageID', None, None, ), # 2
(3, TType.STRING, 'host', None, None, ), # 3
(4, TType.I32, 'thriftPort', None, None, ), # 4
(5, TType.I64, 'capacity', None, None, ), # 5
(6, TType.I64, 'dfsUsed', None, None, ), # 6
(7, TType.I64, 'remaining', None, None, ), # 7
(8, TType.I32, 'xceiverCount', None, None, ), # 8
(9, TType.I32, 'state', None, None, ), # 9
(10, TType.I32, 'httpPort', None, None, ), # 10
(11, TType.I64, 'millisSinceUpdate', None, None, ), # 11
)
def __init__(self, name=None, storageID=None, host=None, thriftPort=None, httpPort=None, capacity=None, dfsUsed=None, remaining=None, xceiverCount=None, state=None, millisSinceUpdate=None,):
self.name = name
self.storageID = storageID
self.host = host
self.thriftPort = thriftPort
self.httpPort = httpPort
self.capacity = capacity
self.dfsUsed = dfsUsed
self.remaining = remaining
self.xceiverCount = xceiverCount
self.state = state
self.millisSinceUpdate = millisSinceUpdate
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.storageID = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.host = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.thriftPort = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.I32:
self.httpPort = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I64:
self.capacity = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I64:
self.dfsUsed = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I64:
self.remaining = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.I32:
self.xceiverCount = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.I32:
self.state = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.I64:
self.millisSinceUpdate = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('DatanodeInfo')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.storageID is not None:
oprot.writeFieldBegin('storageID', TType.STRING, 2)
oprot.writeString(self.storageID)
oprot.writeFieldEnd()
if self.host is not None:
oprot.writeFieldBegin('host', TType.STRING, 3)
oprot.writeString(self.host)
oprot.writeFieldEnd()
if self.thriftPort is not None:
oprot.writeFieldBegin('thriftPort', TType.I32, 4)
oprot.writeI32(self.thriftPort)
oprot.writeFieldEnd()
if self.capacity is not None:
oprot.writeFieldBegin('capacity', TType.I64, 5)
oprot.writeI64(self.capacity)
oprot.writeFieldEnd()
if self.dfsUsed is not None:
oprot.writeFieldBegin('dfsUsed', TType.I64, 6)
oprot.writeI64(self.dfsUsed)
oprot.writeFieldEnd()
if self.remaining is not None:
oprot.writeFieldBegin('remaining', TType.I64, 7)
oprot.writeI64(self.remaining)
oprot.writeFieldEnd()
if self.xceiverCount is not None:
oprot.writeFieldBegin('xceiverCount', TType.I32, 8)
oprot.writeI32(self.xceiverCount)
oprot.writeFieldEnd()
if self.state is not None:
oprot.writeFieldBegin('state', TType.I32, 9)
oprot.writeI32(self.state)
oprot.writeFieldEnd()
if self.httpPort is not None:
oprot.writeFieldBegin('httpPort', TType.I32, 10)
oprot.writeI32(self.httpPort)
oprot.writeFieldEnd()
if self.millisSinceUpdate is not None:
oprot.writeFieldBegin('millisSinceUpdate', TType.I64, 11)
oprot.writeI64(self.millisSinceUpdate)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Block(object):
"""
Representation of a file block in HDFS
Modelled after org.apache.hadoop.hdfs.protocol.LocatedBlock
Attributes:
- blockId: Block ID (unique among all blocks in a filesystem).
- path: Path of the file which this block belongs to.
- numBytes: Length of this block.
- genStamp: Generational stamp of this block.
- nodes: List of data nodes with copies of this block.
- startOffset: Offset of the first byte of the block relative to the start of the file
- token: The serialized token associated with this block.
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'blockId', None, None, ), # 1
(2, TType.STRING, 'path', None, None, ), # 2
(3, TType.I64, 'numBytes', None, None, ), # 3
(4, TType.I64, 'genStamp', None, None, ), # 4
(5, TType.LIST, 'nodes', (TType.STRUCT,(DatanodeInfo, DatanodeInfo.thrift_spec)), None, ), # 5
(6, TType.I64, 'startOffset', None, None, ), # 6
(7, TType.STRING, 'token', None, None, ), # 7
)
def __init__(self, blockId=None, path=None, numBytes=None, genStamp=None, nodes=None, startOffset=None, token=None,):
self.blockId = blockId
self.path = path
self.numBytes = numBytes
self.genStamp = genStamp
self.nodes = nodes
self.startOffset = startOffset
self.token = token
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.blockId = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.numBytes = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.genStamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.nodes = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in xrange(_size0):
_elem5 = DatanodeInfo()
_elem5.read(iprot)
self.nodes.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I64:
self.startOffset = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.token = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Block')
if self.blockId is not None:
oprot.writeFieldBegin('blockId', TType.I64, 1)
oprot.writeI64(self.blockId)
oprot.writeFieldEnd()
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 2)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.numBytes is not None:
oprot.writeFieldBegin('numBytes', TType.I64, 3)
oprot.writeI64(self.numBytes)
oprot.writeFieldEnd()
if self.genStamp is not None:
oprot.writeFieldBegin('genStamp', TType.I64, 4)
oprot.writeI64(self.genStamp)
oprot.writeFieldEnd()
if self.nodes is not None:
oprot.writeFieldBegin('nodes', TType.LIST, 5)
oprot.writeListBegin(TType.STRUCT, len(self.nodes))
for iter6 in self.nodes:
iter6.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.startOffset is not None:
oprot.writeFieldBegin('startOffset', TType.I64, 6)
oprot.writeI64(self.startOffset)
oprot.writeFieldEnd()
if self.token is not None:
oprot.writeFieldBegin('token', TType.STRING, 7)
oprot.writeString(self.token)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Stat(object):
"""
Information about a path in HDFS.
Modelled after org.apache.hadoop.fs.FileStatus
Attributes:
- path: The path.
- isDir: True: The path represents a file.
False: The path represents a directory.
- atime: Access time (milliseconds since 1970-01-01 00:00 UTC).
- mtime: Modification time (milliseconds since 1970-01-01 00:00 UTC).
- perms: Access permissions
- owner: Owner
- group: Group
- length: Length (in bytes).
- blockSize: Block size (in bytes).
- replication: Replication factor.
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'path', None, None, ), # 1
(2, TType.BOOL, 'isDir', None, None, ), # 2
(3, TType.I64, 'atime', None, None, ), # 3
(4, TType.I64, 'mtime', None, None, ), # 4
(5, TType.I16, 'perms', None, None, ), # 5
(6, TType.STRING, 'owner', None, None, ), # 6
(7, TType.STRING, 'group', None, None, ), # 7
None, # 8
None, # 9
None, # 10
None, # 11
None, # 12
(13, TType.I64, 'length', None, None, ), # 13
(14, TType.I64, 'blockSize', None, None, ), # 14
(15, TType.I16, 'replication', None, None, ), # 15
)
def __init__(self, path=None, isDir=None, atime=None, mtime=None, perms=None, owner=None, group=None, length=None, blockSize=None, replication=None,):
self.path = path
self.isDir = isDir
self.atime = atime
self.mtime = mtime
self.perms = perms
self.owner = owner
self.group = group
self.length = length
self.blockSize = blockSize
self.replication = replication
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.isDir = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.atime = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.mtime = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I16:
self.perms = iprot.readI16();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.owner = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.group = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.I64:
self.length = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.I64:
self.blockSize = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 15:
if ftype == TType.I16:
self.replication = iprot.readI16();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Stat')
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 1)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.isDir is not None:
oprot.writeFieldBegin('isDir', TType.BOOL, 2)
oprot.writeBool(self.isDir)
oprot.writeFieldEnd()
if self.atime is not None:
oprot.writeFieldBegin('atime', TType.I64, 3)
oprot.writeI64(self.atime)
oprot.writeFieldEnd()
if self.mtime is not None:
oprot.writeFieldBegin('mtime', TType.I64, 4)
oprot.writeI64(self.mtime)
oprot.writeFieldEnd()
if self.perms is not None:
oprot.writeFieldBegin('perms', TType.I16, 5)
oprot.writeI16(self.perms)
oprot.writeFieldEnd()
if self.owner is not None:
oprot.writeFieldBegin('owner', TType.STRING, 6)
oprot.writeString(self.owner)
oprot.writeFieldEnd()
if self.group is not None:
oprot.writeFieldBegin('group', TType.STRING, 7)
oprot.writeString(self.group)
oprot.writeFieldEnd()
if self.length is not None:
oprot.writeFieldBegin('length', TType.I64, 13)
oprot.writeI64(self.length)
oprot.writeFieldEnd()
if self.blockSize is not None:
oprot.writeFieldBegin('blockSize', TType.I64, 14)
oprot.writeI64(self.blockSize)
oprot.writeFieldEnd()
if self.replication is not None:
oprot.writeFieldBegin('replication', TType.I16, 15)
oprot.writeI16(self.replication)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ContentSummary(object):
"""
Information about an entire subtree under a directory
Includes the information from org.apache.hadoop.fs.ContentSummary
Attributes:
- fileCount: Number of files in this directory
- directoryCount: Number of directories in this directory
- quota: Quota for this directory (number of files).
- spaceConsumed: Space consumed in disk (in bytes).
- spaceQuota: Quota consumed in disk (in bytes).
- path: The path
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'fileCount', None, None, ), # 1
(2, TType.I64, 'directoryCount', None, None, ), # 2
(3, TType.I64, 'quota', None, None, ), # 3
(4, TType.I64, 'spaceConsumed', None, None, ), # 4
(5, TType.I64, 'spaceQuota', None, None, ), # 5
(6, TType.STRING, 'path', None, None, ), # 6
)
def __init__(self, fileCount=None, directoryCount=None, quota=None, spaceConsumed=None, spaceQuota=None, path=None,):
self.fileCount = fileCount
self.directoryCount = directoryCount
self.quota = quota
self.spaceConsumed = spaceConsumed
self.spaceQuota = spaceQuota
self.path = path
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.fileCount = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.directoryCount = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.quota = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.spaceConsumed = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I64:
self.spaceQuota = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ContentSummary')
if self.fileCount is not None:
oprot.writeFieldBegin('fileCount', TType.I64, 1)
oprot.writeI64(self.fileCount)
oprot.writeFieldEnd()
if self.directoryCount is not None:
oprot.writeFieldBegin('directoryCount', TType.I64, 2)
oprot.writeI64(self.directoryCount)
oprot.writeFieldEnd()
if self.quota is not None:
oprot.writeFieldBegin('quota', TType.I64, 3)
oprot.writeI64(self.quota)
oprot.writeFieldEnd()
if self.spaceConsumed is not None:
oprot.writeFieldBegin('spaceConsumed', TType.I64, 4)
oprot.writeI64(self.spaceConsumed)
oprot.writeFieldEnd()
if self.spaceQuota is not None:
oprot.writeFieldBegin('spaceQuota', TType.I64, 5)
oprot.writeI64(self.spaceQuota)
oprot.writeFieldEnd()
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 6)
oprot.writeString(self.path)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class UpgradeStatusReport(object):
"""
Attributes:
- version
- percentComplete
- finalized
- statusText: The informative text that is the same as is shown on the NN web UI
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'version', None, None, ), # 1
(2, TType.I16, 'percentComplete', None, None, ), # 2
(3, TType.BOOL, 'finalized', None, None, ), # 3
(4, TType.STRING, 'statusText', None, None, ), # 4
)
def __init__(self, version=None, percentComplete=None, finalized=None, statusText=None,):
self.version = version
self.percentComplete = percentComplete
self.finalized = finalized
self.statusText = statusText
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.version = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I16:
self.percentComplete = iprot.readI16();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.finalized = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.statusText = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('UpgradeStatusReport')
if self.version is not None:
oprot.writeFieldBegin('version', TType.I32, 1)
oprot.writeI32(self.version)
oprot.writeFieldEnd()
if self.percentComplete is not None:
oprot.writeFieldBegin('percentComplete', TType.I16, 2)
oprot.writeI16(self.percentComplete)
oprot.writeFieldEnd()
if self.finalized is not None:
oprot.writeFieldBegin('finalized', TType.BOOL, 3)
oprot.writeBool(self.finalized)
oprot.writeFieldEnd()
if self.statusText is not None:
oprot.writeFieldBegin('statusText', TType.STRING, 4)
oprot.writeString(self.statusText)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DFSHealthReport(object):
"""
Information that mirrors the "health report" information available on the
NameNode web UI
Attributes:
- bytesTotal
- bytesUsed
- bytesRemaining
- bytesNonDfs
- numLiveDataNodes: How many datanodes are considered live
- numDeadDataNodes: How many datanodes are considered dead
- upgradeStatus: Status of the current running upgrade. If no upgrade
is running, this will be null.
- httpPort: The http port that the NameNode is listening on for its web UI
- this isn't really health, but it's related and handy
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'bytesTotal', None, None, ), # 1
(2, TType.I64, 'bytesUsed', None, None, ), # 2
(3, TType.I64, 'bytesRemaining', None, None, ), # 3
(4, TType.I64, 'bytesNonDfs', None, None, ), # 4
(5, TType.I32, 'numLiveDataNodes', None, None, ), # 5
(6, TType.I32, 'numDeadDataNodes', None, None, ), # 6
(7, TType.STRUCT, 'upgradeStatus', (UpgradeStatusReport, UpgradeStatusReport.thrift_spec), None, ), # 7
(8, TType.I32, 'httpPort', None, None, ), # 8
)
def __init__(self, bytesTotal=None, bytesUsed=None, bytesRemaining=None, bytesNonDfs=None, numLiveDataNodes=None, numDeadDataNodes=None, upgradeStatus=None, httpPort=None,):
self.bytesTotal = bytesTotal
self.bytesUsed = bytesUsed
self.bytesRemaining = bytesRemaining
self.bytesNonDfs = bytesNonDfs
self.numLiveDataNodes = numLiveDataNodes
self.numDeadDataNodes = numDeadDataNodes
self.upgradeStatus = upgradeStatus
self.httpPort = httpPort
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.bytesTotal = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.bytesUsed = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.bytesRemaining = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.bytesNonDfs = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.numLiveDataNodes = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.numDeadDataNodes = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRUCT:
self.upgradeStatus = UpgradeStatusReport()
self.upgradeStatus.read(iprot)
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.I32:
self.httpPort = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('DFSHealthReport')
if self.bytesTotal is not None:
oprot.writeFieldBegin('bytesTotal', TType.I64, 1)
oprot.writeI64(self.bytesTotal)
oprot.writeFieldEnd()
if self.bytesUsed is not None:
oprot.writeFieldBegin('bytesUsed', TType.I64, 2)
oprot.writeI64(self.bytesUsed)
oprot.writeFieldEnd()
if self.bytesRemaining is not None:
oprot.writeFieldBegin('bytesRemaining', TType.I64, 3)
oprot.writeI64(self.bytesRemaining)
oprot.writeFieldEnd()
if self.bytesNonDfs is not None:
oprot.writeFieldBegin('bytesNonDfs', TType.I64, 4)
oprot.writeI64(self.bytesNonDfs)
oprot.writeFieldEnd()
if self.numLiveDataNodes is not None:
oprot.writeFieldBegin('numLiveDataNodes', TType.I32, 5)
oprot.writeI32(self.numLiveDataNodes)
oprot.writeFieldEnd()
if self.numDeadDataNodes is not None:
oprot.writeFieldBegin('numDeadDataNodes', TType.I32, 6)
oprot.writeI32(self.numDeadDataNodes)
oprot.writeFieldEnd()
if self.upgradeStatus is not None:
oprot.writeFieldBegin('upgradeStatus', TType.STRUCT, 7)
self.upgradeStatus.write(oprot)
oprot.writeFieldEnd()
if self.httpPort is not None:
oprot.writeFieldBegin('httpPort', TType.I32, 8)
oprot.writeI32(self.httpPort)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class QuotaException(Exception):
"""
Quota-related error
Attributes:
- msg: Error message.
- stack: Textual representation of the call stack.
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'msg', None, None, ), # 1
(2, TType.STRING, 'stack', None, None, ), # 2
)
def __init__(self, msg=None, stack=None,):
self.msg = msg
self.stack = stack
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.msg = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.stack = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('QuotaException')
if self.msg is not None:
oprot.writeFieldBegin('msg', TType.STRING, 1)
oprot.writeString(self.msg)
oprot.writeFieldEnd()
if self.stack is not None:
oprot.writeFieldBegin('stack', TType.STRING, 2)
oprot.writeString(self.stack)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class BlockData(object):
"""
Encapsulates a block data transfer with its CRC
Attributes:
- crc: CRC32 of the data being transfered
- length: Length of the data being transfered
- data: The data itsef
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'crc', None, None, ), # 1
(2, TType.I32, 'length', None, None, ), # 2
(3, TType.STRING, 'data', None, None, ), # 3
)
def __init__(self, crc=None, length=None, data=None,):
self.crc = crc
self.length = length
self.data = data
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.crc = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.length = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.data = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('BlockData')
if self.crc is not None:
oprot.writeFieldBegin('crc', TType.I32, 1)
oprot.writeI32(self.crc)
oprot.writeFieldEnd()
if self.length is not None:
oprot.writeFieldBegin('length', TType.I32, 2)
oprot.writeI32(self.length)
oprot.writeFieldEnd()
if self.data is not None:
oprot.writeFieldBegin('data', TType.STRING, 3)
oprot.writeString(self.data)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| 32.617566
| 192
| 0.636448
|
be0832b301fa9d4bb3e4afc2693b711e4cad10f1
| 6,620
|
py
|
Python
|
venv/share/pyshared/pyx/dvi/tfmfile.py
|
pengwu/scapy_env
|
3db9c5dea2e219048a2387649d6d89be342903d9
|
[
"MIT"
] | null | null | null |
venv/share/pyshared/pyx/dvi/tfmfile.py
|
pengwu/scapy_env
|
3db9c5dea2e219048a2387649d6d89be342903d9
|
[
"MIT"
] | null | null | null |
venv/share/pyshared/pyx/dvi/tfmfile.py
|
pengwu/scapy_env
|
3db9c5dea2e219048a2387649d6d89be342903d9
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
#
#
# Copyright (C) 2007-2011 Jörg Lehmann <joergl@users.sourceforge.net>
# Copyright (C) 2007-2011 André Wobst <wobsta@users.sourceforge.net>
#
# This file is part of PyX (http://pyx.sourceforge.net/).
#
# PyX is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# PyX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyX; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
from pyx import reader
class char_info_word:
def __init__(self, word):
self.width_index = int((word & 0xFF000000L) >> 24) #make sign-safe
self.height_index = (word & 0x00F00000) >> 20
self.depth_index = (word & 0x000F0000) >> 16
self.italic_index = (word & 0x0000FC00) >> 10
self.tag = (word & 0x00000300) >> 8
self.remainder = (word & 0x000000FF)
class TFMfile:
def __init__(self, file, debug=0):
self.file = reader.stringreader(file.read())
#
# read pre header
#
self.lf = self.file.readint16()
self.lh = self.file.readint16()
self.bc = self.file.readint16()
self.ec = self.file.readint16()
self.nw = self.file.readint16()
self.nh = self.file.readint16()
self.nd = self.file.readint16()
self.ni = self.file.readint16()
self.nl = self.file.readint16()
self.nk = self.file.readint16()
self.ne = self.file.readint16()
self.np = self.file.readint16()
if not (self.bc-1 <= self.ec <= 255 and
self.ne <= 256 and
self.lf == 6+self.lh+(self.ec-self.bc+1)+self.nw+self.nh+self.nd
+self.ni+self.nl+self.nk+self.ne+self.np):
raise RuntimeError("error in TFM pre-header")
if debug:
print "lh=%d" % self.lh
#
# read header
#
self.checksum = self.file.readint32()
self.designsize = self.file.readint32()
assert self.designsize > 0, "invald design size"
if self.lh > 2:
assert self.lh > 11, "inconsistency in TFM file: incomplete field"
self.charcoding = self.file.readstring(40)
else:
self.charcoding = None
if self.lh > 12:
assert self.lh > 16, "inconsistency in TFM file: incomplete field"
self.fontfamily = self.file.readstring(20)
else:
self.fontfamily = None
if debug:
print "(FAMILY %s)" % self.fontfamily
print "(CODINGSCHEME %s)" % self.charcoding
print "(DESINGSIZE R %f)" % (16.0*self.designsize/16777216L)
if self.lh > 17:
self.sevenbitsave = self.file.readuchar()
# ignore the following two bytes
self.file.readint16()
facechar = self.file.readuchar()
# decode ugly face specification into the Knuth suggested string
if facechar < 18:
if facechar >= 12:
self.face = "E"
facechar -= 12
elif facechar >= 6:
self.face = "C"
facechar -= 6
else:
self.face = "R"
if facechar >= 4:
self.face = "L" + self.face
facechar -= 4
elif facechar >= 2:
self.face = "B" + self.face
facechar -= 2
else:
self.face = "M" + self.face
if facechar == 1:
self.face = self.face[0] + "I" + self.face[1]
else:
self.face = self.face[0] + "R" + self.face[1]
else:
self.face = None
else:
self.sevenbitsave = self.face = None
if self.lh > 18:
# just ignore the rest
print self.file.read((self.lh-18)*4)
#
# read char_info
#
self.char_info = [None]*(self.ec+1)
for charcode in range(self.bc, self.ec+1):
self.char_info[charcode] = char_info_word(self.file.readint32())
if self.char_info[charcode].width_index == 0:
# disable character if width_index is zero
self.char_info[charcode] = None
#
# read widths
#
self.width = [None for width_index in range(self.nw)]
for width_index in range(self.nw):
self.width[width_index] = self.file.readint32()
#
# read heights
#
self.height = [None for height_index in range(self.nh)]
for height_index in range(self.nh):
self.height[height_index] = self.file.readint32()
#
# read depths
#
self.depth = [None for depth_index in range(self.nd)]
for depth_index in range(self.nd):
self.depth[depth_index] = self.file.readint32()
#
# read italic
#
self.italic = [None for italic_index in range(self.ni)]
for italic_index in range(self.ni):
self.italic[italic_index] = self.file.readint32()
#
# read lig_kern
#
# XXX decode to lig_kern_command
self.lig_kern = [None for lig_kern_index in range(self.nl)]
for lig_kern_index in range(self.nl):
self.lig_kern[lig_kern_index] = self.file.readint32()
#
# read kern
#
self.kern = [None for kern_index in range(self.nk)]
for kern_index in range(self.nk):
self.kern[kern_index] = self.file.readint32()
#
# read exten
#
# XXX decode to extensible_recipe
self.exten = [None for exten_index in range(self.ne)]
for exten_index in range(self.ne):
self.exten[exten_index] = self.file.readint32()
#
# read param
#
# XXX decode
self.param = [None for param_index in range(self.np)]
for param_index in range(self.np):
self.param[param_index] = self.file.readint32()
self.file.close()
| 31.079812
| 80
| 0.550302
|
87821f0341bd64940ede1f02184eb2f9b6e99b08
| 3,645
|
py
|
Python
|
site_scons/site_tools/auto_install_binaries.py
|
GeertBosch/mongo
|
884d232473dca72e0872f0e540d4c3108c1e0b3d
|
[
"Apache-2.0"
] | 1
|
2018-03-07T22:12:35.000Z
|
2018-03-07T22:12:35.000Z
|
site_scons/site_tools/auto_install_binaries.py
|
GeertBosch/mongo
|
884d232473dca72e0872f0e540d4c3108c1e0b3d
|
[
"Apache-2.0"
] | 4
|
2019-02-22T10:05:59.000Z
|
2021-03-26T00:20:23.000Z
|
site_scons/site_tools/auto_install_binaries.py
|
GeertBosch/mongo
|
884d232473dca72e0872f0e540d4c3108c1e0b3d
|
[
"Apache-2.0"
] | 10
|
2018-11-29T07:17:30.000Z
|
2022-03-07T01:33:41.000Z
|
import SCons
def exists(env):
return True
def generate(env):
env.Tool('install')
suffix_map = {
env.subst('$PROGSUFFIX') : 'bin',
'.dylib' : 'lib',
'.so' : 'lib',
}
def auto_install(env, target, source, **kwargs):
prefixDir = env.Dir('$INSTALL_DIR')
actions = []
targetDir = prefixDir.Dir(target)
actions = SCons.Script.Install(
target=targetDir,
source=source,
)
for s in map(env.Entry, env.Flatten(source)):
setattr(s.attributes, "aib_install_actions", actions)
# Get the tags. If no tags were set, or a non-falsish thing
# was set then interpret that as a request for normal
# tagging. Auto include the 'all' tag, and generate
# aliases. If the user explicitly set the INSTALL_ALIAS to
# something falsy, interpret that as meaning no tags at all,
# so that we have a way to exempt targets from auto
# installation.
tags = kwargs.get('INSTALL_ALIAS', None)
if tags is None or tags:
tags = set(tags or [])
tags.add('all')
if 'default' in tags:
tags.remove('default')
env.Alias('install', actions)
env.Default('install')
env.Alias(['install-' + tag for tag in tags], actions)
return actions
env.AddMethod(auto_install, 'AutoInstall')
def auto_install_emitter(target, source, env):
for t in target:
tentry = env.Entry(t)
# We want to make sure that the executor information stays
# persisted for this node after it is built so that we can
# access it in our install emitter below.
tentry.attributes.keep_targetinfo = 1
tsuf = tentry.get_suffix()
auto_install_location = suffix_map.get(tsuf)
if auto_install_location:
tentry_install_tags = env.get('INSTALL_ALIAS', [])
setattr(tentry.attributes, 'INSTALL_ALIAS', tentry_install_tags)
install = env.AutoInstall(auto_install_location, tentry, INSTALL_ALIAS=tentry_install_tags)
return (target, source)
def add_emitter(builder):
base_emitter = builder.emitter
new_emitter = SCons.Builder.ListEmitter([base_emitter, auto_install_emitter])
builder.emitter = new_emitter
target_builders = ['Program', 'SharedLibrary', 'LoadableModule', 'StaticLibrary']
for builder in target_builders:
builder = env['BUILDERS'][builder]
add_emitter(builder)
def scan_for_transitive_install(node, env, path=()):
results = []
install_sources = node.sources
for install_source in install_sources:
is_executor = install_source.get_executor()
if not is_executor:
continue
is_targets = is_executor.get_all_targets()
for is_target in (is_targets or []):
grandchildren = is_target.children()
for grandchild in grandchildren:
actions = getattr(grandchild.attributes, "aib_install_actions", None)
if actions:
results.extend(actions)
results = sorted(results, key=lambda t: str(t))
return results
from SCons.Tool import install
base_install_builder = install.BaseInstallBuilder
assert(base_install_builder.target_scanner == None)
base_install_builder.target_scanner = SCons.Scanner.Scanner(
function=scan_for_transitive_install,
path_function=None,
)
| 36.45
| 107
| 0.611797
|
558e8d15dabb273639912ccb592f3299fb92f89d
| 168
|
py
|
Python
|
todoist_oauth2/urls.py
|
travisjungroth/todoer
|
af5cae5b63db685086d2237a6ffa4be704de3f08
|
[
"MIT"
] | 2
|
2020-11-18T21:20:58.000Z
|
2020-11-19T14:40:25.000Z
|
todoist_oauth2/urls.py
|
travisjungroth/todoer
|
af5cae5b63db685086d2237a6ffa4be704de3f08
|
[
"MIT"
] | 6
|
2021-02-26T21:48:17.000Z
|
2022-02-26T15:32:47.000Z
|
todoist_oauth2/urls.py
|
travisjungroth/todoer
|
af5cae5b63db685086d2237a6ffa4be704de3f08
|
[
"MIT"
] | null | null | null |
from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns
from .provider import TodoistProvider
urlpatterns = default_urlpatterns(TodoistProvider)
| 24
| 75
| 0.869048
|
095fb2696eb5705c6090ae50d5ccbd7c25954256
| 1,275
|
py
|
Python
|
src/langumo/parsers/jsonstring.py
|
fossabot/langumo
|
2d8b30979878bb27fb07cc31879c13c5c186582c
|
[
"Apache-2.0"
] | null | null | null |
src/langumo/parsers/jsonstring.py
|
fossabot/langumo
|
2d8b30979878bb27fb07cc31879c13c5c186582c
|
[
"Apache-2.0"
] | null | null | null |
src/langumo/parsers/jsonstring.py
|
fossabot/langumo
|
2d8b30979878bb27fb07cc31879c13c5c186582c
|
[
"Apache-2.0"
] | null | null | null |
import re
import json
from langumo.building import Parser
from langumo.utils import AuxiliaryFile
from typing import Iterable
class EscapedJSONStringParser(Parser):
single_quotes_pattern = re.compile('[\x60\xb4\u2018\u2019]')
double_quotes_pattern = re.compile('[\u201c\u201d]')
def extract(self, raw: AuxiliaryFile) -> Iterable[str]:
with raw.open('r') as fp:
for line in fp:
if not line.strip():
continue
yield line
def parse(self, text: str) -> str:
text, _ = json.decoder.scanstring(text, 1)
filtered = []
for line in text.strip().splitlines():
if not line:
continue
# Remove duplicated spaces.
line = line.replace('\n', ' ').replace('\t', ' ')
while ' ' in line:
line = line.replace(' ', ' ')
# Normalize the quotes by replacing unusual ones to the standard
# ones.
line = (EscapedJSONStringParser
.single_quotes_pattern.sub('\'', line))
line = (EscapedJSONStringParser
.double_quotes_pattern.sub('"', line))
filtered.append(line)
return '\n'.join(filtered)
| 29.651163
| 76
| 0.556078
|
be93f88f07e46567573384b03414bb4f1353de8a
| 2,448
|
py
|
Python
|
models/memnet.py
|
Subrata-Chattopadhyay/memnet
|
bb382ce0cc3d0a4af29a704d090532b5d2ee00eb
|
[
"MIT"
] | null | null | null |
models/memnet.py
|
Subrata-Chattopadhyay/memnet
|
bb382ce0cc3d0a4af29a704d090532b5d2ee00eb
|
[
"MIT"
] | null | null | null |
models/memnet.py
|
Subrata-Chattopadhyay/memnet
|
bb382ce0cc3d0a4af29a704d090532b5d2ee00eb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# file: memnet.py
# author: songyouwei <youwei0314@gmail.com>
# Copyright (C) 2018. All Rights Reserved.
from layers.attention import Attention
import torch
import torch.nn as nn
from numpy.random import seed
#torch.manual_seed(10)
#seed(4)
from layers.squeeze_embedding import SqueezeEmbedding
class MemNet(nn.Module):
def locationed_memory(self, memory, memory_len):
# here we just simply calculate the location vector in Model2's manner
batch_size = memory.shape[0]
seq_len = memory.shape[1]
memory_len = memory_len.cpu().numpy()
weight = [[] for i in range(batch_size)]
for i in range(batch_size):
for idx in range(memory_len[i]):
weight[i].append(1-float(idx+1)/memory_len[i])
for idx in range(memory_len[i], seq_len):
weight[i].append(1)
weight = torch.tensor(weight).to(self.opt.device)
memory = weight.unsqueeze(2)*memory
return memory
def __init__(self, embedding_matrix, opt):
super(MemNet, self).__init__()
self.opt = opt
self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
self.squeeze_embedding = SqueezeEmbedding(batch_first=True)
self.attention = Attention(opt.embed_dim, score_function='mlp')
self.x_linear = nn.Linear(opt.embed_dim, opt.embed_dim)
self.dense = nn.Linear(opt.embed_dim, opt.polarities_dim)
def forward(self, inputs):
text_raw_without_aspect_indices, aspect_indices = inputs[0], inputs[1]
memory_len = torch.sum(text_raw_without_aspect_indices != 0, dim=-1)
aspect_len = torch.sum(aspect_indices != 0, dim=-1)
nonzeros_aspect = torch.tensor(aspect_len, dtype=torch.float).to(self.opt.device)
memory = self.embed(text_raw_without_aspect_indices)
memory = self.squeeze_embedding(memory, memory_len)
# memory = self.locationed_memory(memory, memory_len)
aspect = self.embed(aspect_indices)
aspect = torch.sum(aspect, dim=1)
aspect = torch.div(aspect, nonzeros_aspect.view(nonzeros_aspect.size(0), 1))
x = aspect.unsqueeze(dim=1)
for _ in range(self.opt.hops):
x = self.x_linear(x)
out_at, _ = self.attention(memory, x)
x = out_at + x
x = x.view(x.size(0), -1)
out = self.dense(x)
return out
| 40.131148
| 100
| 0.655229
|
0d715a64f66f713efa7be3b8d5cbc093c2440ff1
| 180
|
py
|
Python
|
privagal/wsgi.py
|
ychab/privagal
|
118197c8fdeb7e32c95cf9672b87fb0350a5a874
|
[
"BSD-3-Clause"
] | 6
|
2016-06-06T15:27:24.000Z
|
2016-07-14T01:26:42.000Z
|
privagal/wsgi.py
|
ychab/privagal
|
118197c8fdeb7e32c95cf9672b87fb0350a5a874
|
[
"BSD-3-Clause"
] | null | null | null |
privagal/wsgi.py
|
ychab/privagal
|
118197c8fdeb7e32c95cf9672b87fb0350a5a874
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "privagal.settings.production")
application = get_wsgi_application()
| 22.5
| 79
| 0.833333
|
90882091ba1ae3b8dd024dff01eb7701d68d6fee
| 1,852
|
py
|
Python
|
code/denoising_event_lm/training/metrics/squad_em_and_f1.py
|
Willyoung2017/event-process-tracie
|
cc582facea64c096aa7f4d14156e43a19136151e
|
[
"Apache-2.0"
] | 6
|
2021-04-19T12:18:16.000Z
|
2021-10-20T06:47:40.000Z
|
denoising_event_lm/training/metrics/squad_em_and_f1.py
|
allenai/TemporalBART
|
180bdd1b86a1cad361158f5e78b5901498471d78
|
[
"MIT"
] | null | null | null |
denoising_event_lm/training/metrics/squad_em_and_f1.py
|
allenai/TemporalBART
|
180bdd1b86a1cad361158f5e78b5901498471d78
|
[
"MIT"
] | 2
|
2021-01-27T18:31:29.000Z
|
2021-08-30T15:57:22.000Z
|
from typing import Tuple
from allennlp.training.metrics.metric import Metric
from overrides import overrides
from denoising_event_lm.utils import squad_eval
@Metric.register("squad")
class SquadEmAndF1(Metric):
"""
This :class:`Metric` takes the best span string computed by a model, along with the answer
strings labeled in the data, and computed exact match and F1 score using the official SQuAD
evaluation script.
"""
def __init__(self) -> None:
self._total_em = 0.0
self._total_f1 = 0.0
self._count = 0
@overrides
def __call__(self, best_span_string, answer_strings):
"""
Parameters
----------
value : ``float``
The value to average.
"""
exact_match = squad_eval.metric_max_over_ground_truths(
squad_eval.exact_match_score, best_span_string, answer_strings
)
f1_score = squad_eval.metric_max_over_ground_truths(
squad_eval.f1_score, best_span_string, answer_strings
)
self._total_em += exact_match
self._total_f1 += f1_score
self._count += 1
@overrides
def get_metric(self, reset: bool = False) -> Tuple[float, float]:
"""
Returns
-------
Average exact match and F1 score (in that order) as computed by the official SQuAD script
over all inputs.
"""
exact_match = self._total_em / self._count if self._count > 0 else 0
f1_score = self._total_f1 / self._count if self._count > 0 else 0
if reset:
self.reset()
return exact_match, f1_score
@overrides
def reset(self):
self._total_em = 0.0
self._total_f1 = 0.0
self._count = 0
def __str__(self):
return f"SquadEmAndF1(em={self._total_em}, f1={self._total_f1})"
| 29.870968
| 97
| 0.62851
|
e3f704bd1acaccfe477d4709a8349da52ed15697
| 160
|
py
|
Python
|
abinit/__init__.py
|
wirawan0/pyqmc
|
8d641ba2b91d1d7a05a90574d0787fb991ee15e2
|
[
"Apache-2.0"
] | null | null | null |
abinit/__init__.py
|
wirawan0/pyqmc
|
8d641ba2b91d1d7a05a90574d0787fb991ee15e2
|
[
"Apache-2.0"
] | null | null | null |
abinit/__init__.py
|
wirawan0/pyqmc
|
8d641ba2b91d1d7a05a90574d0787fb991ee15e2
|
[
"Apache-2.0"
] | null | null | null |
# $Id: __init__.py,v 1.1 2010-02-10 23:11:54 wirawan Exp $
#
# pyqmc.abinit module
# Created: 20100208
# Wirawan Purwanto
#
# Namespace for abinit stuff.
pass
| 16
| 58
| 0.70625
|
5b013f989ac46eadc39660c67d3ab3edb8ea2235
| 609
|
py
|
Python
|
tests/pyre.pkg/calc/node_instance.py
|
avalentino/pyre
|
7e1f0287eb7eba1c6d1ef385e5160079283ac363
|
[
"BSD-3-Clause"
] | 25
|
2018-04-23T01:45:39.000Z
|
2021-12-10T06:01:23.000Z
|
tests/pyre.pkg/calc/node_instance.py
|
avalentino/pyre
|
7e1f0287eb7eba1c6d1ef385e5160079283ac363
|
[
"BSD-3-Clause"
] | 53
|
2018-05-31T04:55:00.000Z
|
2021-10-07T21:41:32.000Z
|
tests/pyre.pkg/calc/node_instance.py
|
avalentino/pyre
|
7e1f0287eb7eba1c6d1ef385e5160079283ac363
|
[
"BSD-3-Clause"
] | 12
|
2018-04-23T22:50:40.000Z
|
2022-02-20T17:27:23.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2021 all rights reserved
#
"""
Check that the refcount is zero after all nodes have gone out of scope
"""
def test():
# get the package
import pyre.calc
# make a node class
class node(metaclass=pyre.calc.calculator): pass
# make a couple
n1 = node.variable()
n2 = node.variable()
# all done
return
# main
if __name__ == "__main__":
# skip pyre initialization since we don't rely on the executive
pyre_noboot = True
# run the test
test()
# end of file
| 16.026316
| 70
| 0.633826
|
b18ed9e86528dcb1cc0318dfd780806dba36fe0e
| 2,067
|
py
|
Python
|
zun/network/network.py
|
PradeepSingh1988/zun
|
0e691ad6d6499bcfa7bbd03030415485396ff2d1
|
[
"Apache-2.0"
] | null | null | null |
zun/network/network.py
|
PradeepSingh1988/zun
|
0e691ad6d6499bcfa7bbd03030415485396ff2d1
|
[
"Apache-2.0"
] | null | null | null |
zun/network/network.py
|
PradeepSingh1988/zun
|
0e691ad6d6499bcfa7bbd03030415485396ff2d1
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from stevedore import driver
import zun.conf
CONF = zun.conf.CONF
def api(*args, **kwargs):
network_driver = CONF.network.driver
network_api = driver.DriverManager(
"zun.network.driver",
network_driver,
invoke_on_load=True).driver
network_api.init(*args, **kwargs)
return network_api
class Network(object, metaclass=abc.ABCMeta):
"""The base class that all Network classes should inherit from."""
def init(self, context, *args, **kwargs):
raise NotImplementedError()
def create_network(self, *args, **kwargs):
raise NotImplementedError()
def remove_network(self, network_name, **kwargs):
raise NotImplementedError()
def inspect_network(self, network_name, **kwargs):
raise NotImplementedError()
def list_networks(self, **kwargs):
raise NotImplementedError()
def connect_container_to_network(self, container, network_name, **kwargs):
raise NotImplementedError()
def disconnect_container_from_network(self, container, network_name,
**kwargs):
raise NotImplementedError()
def add_security_groups_to_ports(self, container, security_group_ids,
**kwargs):
raise NotImplementedError()
def remove_security_groups_from_ports(self, container, security_group_ids,
**kwargs):
raise NotImplementedError()
| 31.318182
| 78
| 0.671505
|
b1165a3b105680ddabff43a47abfd4dc3e4e73f4
| 1,219
|
py
|
Python
|
python_fundamentals/Filter by Type/index.py
|
justnclrk/Python
|
0922961cbd94694a69ae8132a5c33baf552d8d89
|
[
"MIT"
] | null | null | null |
python_fundamentals/Filter by Type/index.py
|
justnclrk/Python
|
0922961cbd94694a69ae8132a5c33baf552d8d89
|
[
"MIT"
] | 8
|
2020-06-06T01:02:06.000Z
|
2022-03-12T00:24:13.000Z
|
python_fundamentals/Filter by Type/index.py
|
justnclrk/Python
|
0922961cbd94694a69ae8132a5c33baf552d8d89
|
[
"MIT"
] | null | null | null |
# Write a program that, given some value, tests that value for its type. Here's what you should do for each type:
# Integer
# If the integer is greater than or equal to 100, print "That's a big number!" If the integer is less than 100, print "That's a small number"
sI = 45
bI = 200
if sI <= 100:
print("That's a small number!")
else:
print("That's a big number!")
if bI >= 100:
print("That's a big number!")
else:
print("That's a small number!")
# String
# If the string is greater than or equal to 50 characters print "Long sentence." If the string is shorter than 50 characters print "Short sentence."
sS = "Rubber baby buggy bumpers"
bS = "Experience is simply the name we give our mistakes"
if len(sS) <= 50:
print("Short sentence.")
else:
print("Long sentence.")
if len(bS) >= 50:
print("Long sentence.")
else:
print("Short sentence.")
# List
# If the length of the list is greater than or equal to 10 print "Big list!" If the list has fewer than 10 values print "Short list."
aL = [1, 7, 4, 21]
bL = [3, 5, 7, 34, 3, 2, 113, 65, 8, 89]
if len(aL) <= 10:
print("Short list.")
else:
print("Big list!")
if len(bL) >= 10:
print("Big list!")
else:
print("Small list.")
| 32.078947
| 148
| 0.658737
|
bf51ac30439e7d7693d5ef5e22556d98a77ba3fe
| 1,514
|
py
|
Python
|
app/bin/TA_mts_federal_reserve_rh_fred_series.py
|
dmuegge/TA_mts_federal_reserve
|
6ea8dca8906ba69042b9070cc0c37b5df2674f71
|
[
"Apache-2.0"
] | null | null | null |
app/bin/TA_mts_federal_reserve_rh_fred_series.py
|
dmuegge/TA_mts_federal_reserve
|
6ea8dca8906ba69042b9070cc0c37b5df2674f71
|
[
"Apache-2.0"
] | null | null | null |
app/bin/TA_mts_federal_reserve_rh_fred_series.py
|
dmuegge/TA_mts_federal_reserve
|
6ea8dca8906ba69042b9070cc0c37b5df2674f71
|
[
"Apache-2.0"
] | null | null | null |
import ta_mts_federal_reserve_declare
from splunktaucclib.rest_handler.endpoint import (
field,
validator,
RestModel,
DataInputModel,
)
from splunktaucclib.rest_handler import admin_external, util
from splunk_aoblib.rest_migration import ConfigMigrationHandler
util.remove_http_proxy_env_vars()
fields = [
field.RestField(
'interval',
required=True,
encrypted=False,
default=None,
validator=validator.Pattern(
regex=r"""^\-[1-9]\d*$|^\d*$""",
)
),
field.RestField(
'index',
required=True,
encrypted=False,
default='default',
validator=validator.String(
min_len=1,
max_len=80,
)
),
field.RestField(
'series_id',
required=True,
encrypted=False,
default=None,
validator=validator.String(
min_len=0,
max_len=8192,
)
),
field.RestField(
'seed_date',
required=True,
encrypted=False,
default='1970-01-02',
validator=validator.String(
min_len=0,
max_len=8192,
)
),
field.RestField(
'disabled',
required=False,
validator=None
)
]
model = RestModel(fields, name=None)
endpoint = DataInputModel(
'fred_series',
model,
)
if __name__ == '__main__':
admin_external.handle(
endpoint,
handler=ConfigMigrationHandler,
)
| 19.164557
| 63
| 0.567371
|
268f40e9cbabcb0bb82d44060b0a129eb50cb3cc
| 3,190
|
py
|
Python
|
src/main/python/afp_resource_maker/rolemaker.py
|
ImmobilienScout24/add_aws_roles
|
1bd9233ea091646312d64c7453b28a3f7647c408
|
[
"Apache-2.0"
] | 1
|
2015-10-10T09:57:34.000Z
|
2015-10-10T09:57:34.000Z
|
src/main/python/afp_resource_maker/rolemaker.py
|
ImmobilienScout24/add_aws_roles
|
1bd9233ea091646312d64c7453b28a3f7647c408
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/afp_resource_maker/rolemaker.py
|
ImmobilienScout24/add_aws_roles
|
1bd9233ea091646312d64c7453b28a3f7647c408
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import traceback
import boto
class LimitExceededException(Exception):
pass
class CanNotContinueException(Exception):
pass
class InvalidClientTokenIdException(Exception):
pass
class RoleMaker(object):
"""Create a role with policies to allow cross account operations"""
def __init__(self, configuration):
self.prefix = configuration['role']['prefix']
self.trust_policy_document = \
configuration['role']['trust_policy_document']
self.policy_name = configuration['role']['policy_name']
self.policy_document = configuration['role']['policy_document']
access_key_id = configuration['access_key_id']
secret_access_key = configuration['secret_access_key']
self.boto_connection = self._boto_connect(access_key_id,
secret_access_key)
def _boto_connect(self, access_key_id, secret_access_key):
"""Establish a boto iam connection and return the connection object"""
try:
return boto.connect_iam(
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key)
except boto.exception.NoAuthHandlerFound as exc:
raise CanNotContinueException(exc)
def _add_policy(self, role_name):
"""Add policy document to given role"""
try:
self.boto_connection.put_role_policy(role_name,
self.policy_name,
self.policy_document)
except boto.exception.BotoServerError as exc:
message = "Cannot add inline policy to role: %s" % exc.message
raise CanNotContinueException(message)
def _add_trust_relationship(self, role_name):
"""Add trust relationship to given role"""
try:
self.boto_connection.update_assume_role_policy(
role_name, self.trust_policy_document)
except boto.exception.BotoServerError as exc:
message = "Cannot add trust relationship to role: %s" % exc.message
raise CanNotContinueException(message)
def _create_role(self, role_name):
"""Add Role to AWS"""
try:
self.boto_connection.create_role(role_name)
except boto.exception.BotoServerError as exc:
message = "Failed to create role: '{0}'".format(role_name)
if exc.error_code == "EntityAlreadyExists":
return
elif exc.error_code == "LimitExceeded":
raise LimitExceededException(message)
elif exc.error_code == "InvalidClientTokenId":
raise InvalidClientTokenIdException(message)
else:
raise CanNotContinueException(traceback.format_exc())
def make_role(self, role_name):
"""Generate Role with Trust relationship and policy"""
prefixed_role_name = '{0}{1}'.format(self.prefix, role_name)
self._create_role(prefixed_role_name)
self._add_trust_relationship(prefixed_role_name)
self._add_policy(prefixed_role_name)
| 37.97619
| 79
| 0.638871
|
f00e3d6aefd2f1264257053e5d3464a819960035
| 19,011
|
py
|
Python
|
dataLoader/fused_utils.py
|
NVIDIA-AI-IOT/centernet_kinect
|
92438e9f1469d1d8deffb33569068d2b635f99b6
|
[
"MIT"
] | 17
|
2020-12-21T04:10:16.000Z
|
2022-02-03T18:12:42.000Z
|
dataLoader/fused_utils.py
|
NVIDIA-AI-IOT/centernet_kinect
|
92438e9f1469d1d8deffb33569068d2b635f99b6
|
[
"MIT"
] | null | null | null |
dataLoader/fused_utils.py
|
NVIDIA-AI-IOT/centernet_kinect
|
92438e9f1469d1d8deffb33569068d2b635f99b6
|
[
"MIT"
] | 3
|
2020-12-24T02:55:31.000Z
|
2021-05-22T02:27:21.000Z
|
'''
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import os
import sys
import cv2
import json
import torch
import random
import numpy as np
from math import sqrt
from torch.utils.data import Dataset
# Adding Project Path
DIR_PATH = os.path.dirname(os.path.abspath(__file__))
PROJ_PATH = os.path.join(DIR_PATH, os.path.pardir)
sys.path.append(PROJ_PATH)
# Importing Project Libraries
import pipeline.constants as const
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# random.seed(42) # Answer to everything
def Transform(depth_image: np.array, ir_image: np.array, boxes: np.array, labels: np.array, train: bool):
"""
Apply transformation.
:param depth_image: np.array(uint16), Depth image
:param ir_image: np.array(uint16), IR image
:param boxes: np.array(np.float), a list of N boxes of this image
:param labels: np.array(np.uint8), a lost of N labels of the boxes
:param train: bool, if the item if for training of testing
:return: Transformed image, transformed Boxes, transformed labels
"""
new_depth_img = depth_image
new_ir_image = ir_image
new_boxes = boxes
new_labels = labels
if train and random.random() > 0.05:
if random.random() > 0.7:
new_depth_img, new_ir_image = distort_image(new_depth_img, new_ir_image)
if random.random() > 0.7:
new_depth_img, new_ir_image = random_magnification(new_depth_img, new_ir_image, new_boxes)
if random.random() > 0.5:
new_depth_img, new_ir_image, new_boxes = zoom_out(new_depth_img, new_ir_image, new_boxes)
new_depth_img, new_ir_image, new_boxes, new_labels = random_crop(new_depth_img, new_ir_image, new_boxes, new_labels)
elif train:
if random.random() > 0.5:
new_depth_img, new_ir_image, new_boxes = flip(new_depth_img, new_ir_image, new_boxes)
new_depth_img, new_ir_image = new_depth_img.astype(np.float32), new_ir_image.astype(np.float32)
return new_depth_img, new_ir_image, new_boxes, new_labels
def CreateHeatMap(depth_image: np.array, ir_image: np.array, bboxes:np.array, labels: np.array, img_shape=const.IMG_SHAPE,\
num_classes=const.NUM_CLASSES, stride=const.STRIDE):
"""
Create the HeatMap of the corresponding image with the given bounding boxes of the givern size img_shape
:param depth_image: np.array(uint16), Depth image
:param ir_image: np.array(uint16), IR image
:param bboxes: np.array (float32), the bounding boxes of the input image
:param labels: np.array(np.uint8), a list of N labels of the boxes
:param img_shape: tubple (w, h), the shape of the input image
:param num_classes: int, number of classes in the dataset
:param stride: int, the downsampling facor of the model
"""
# Resize the original image to input dimentions
input_depth_image = cv2.resize(depth_image, img_shape, interpolation=cv2.INTER_NEAREST)
input_depth_image = normalize(input_depth_image)
input_ir_image = cv2.resize(ir_image, img_shape, interpolation=cv2.INTER_NEAREST)
input_ir_image = normalize(input_ir_image)
# Normalize the bounding boxes to output dimentions
h = depth_image.shape[0]
w = depth_image.shape[1]
old_dims = np.expand_dims(np.array([w, h, w, h], dtype=np.float32), axis=0)
output_shape = (img_shape[0]//const.STRIDE, img_shape[1]//const.STRIDE) # img_shape
output_h = output_shape[0]
output_w = output_shape[1]
output_dims = np.expand_dims(np.array([output_w, output_h, output_w, output_h], dtype=np.float32), axis=0)
new_bboxes = bboxes / old_dims * output_dims
# Create an output tensor containing heatmap, box sizes, and offsets
output_tensor = np.zeros((num_classes+4, output_shape[0], output_shape[1]), dtype=np.float32)
# Create a X,Y axis grid to compute the exponentials
out_x = np.arange(output_shape[1]) + 0.5
out_y = np.arange(output_shape[0]) + 0.5
xy_grid = np.stack(np.meshgrid(out_x, out_y))
for new_bbox, label in zip(new_bboxes, labels):
x_min = new_bbox[0]
y_min = new_bbox[1]
x_max = new_bbox[2]
y_max = new_bbox[3]
center_x = min((x_min + x_max) / 2, output_h-1)
center_y = min((y_min + y_max) / 2, output_w-1)
width = abs(x_max - x_min)
height = abs(y_max - y_min)
sigma = sqrt(width**2 + height**2)
heatmap = np.zeros((output_shape[0], output_shape[1]), dtype=np.float32)
heatmap += (xy_grid[0] - int(center_x))**2
heatmap += (xy_grid[1] - int(center_y))**2
heatmap /= (sigma/10*const.STRIDE)
heatmap *= -1
heatmap = np.exp(heatmap)
heatmap[heatmap < 0.02] = 0
heatmap[int(center_y), int(center_x)] = 1
# Heatmap
output_tensor[label-1] = np.where(output_tensor[label-1] > heatmap, output_tensor[label-1], heatmap)
# Size
output_tensor[-4:-2, int(center_y), int(center_x)] = np.array([height*const.STRIDE, width*const.STRIDE])
# offset
output_tensor[-2:, int(center_y), int(center_x)] = np.array([center_y, center_x]) - np.floor(np.array([center_y, center_x]))
input_depth_image = np.expand_dims(input_depth_image, 0)
input_ir_image = np.expand_dims(input_ir_image, 0)
return input_depth_image, input_ir_image, output_tensor, new_bboxes # (Comment for test)
############################
### Internal Functions ###
############################
def distort_image(depth_image: np.array, ir_image: np.array):
"""
Dirstort brightness, contrast
:param depth_image: np.array(uint16), Depth image
:param ir_image: np.array(uint16), IR image
:return: np.array, Distorted Depth Image
"""
def adjust_contrast(depth_image: np.array, ir_image: np.array):
adjust_factor_depth = random.uniform(0.1, 5) # (0.1, 5)
adjust_factor_depth_normalized = (adjust_factor_depth - 0.1)/4.9
adjust_factor_ir = (adjust_factor_depth_normalized * 0.8) + 0.8 # (0.8, 1.6)
min_val = min(1/adjust_factor_ir, adjust_factor_ir)
max_val = max(1/adjust_factor_ir, adjust_factor_ir)
adjust_factor_ir = random.uniform(min_val, max_val)
return np.clip(adjust_factor_depth * depth_image, 0, 65535).astype(np.uint16),\
np.clip(adjust_factor_ir * ir_image, 0, 65535).astype(np.uint16)
def adjust_brightness(depth_image: np.array, ir_image: np.array):
adjust_factor = random.randint(-100, 900)
return np.clip(depth_image + adjust_factor , 0, 65535).astype(np.uint16),\
np.clip(ir_image + -1*adjust_factor/5 , 0, 65535).astype(np.uint16)
depth_image, ir_image = adjust_contrast(depth_image, ir_image)
depth_image, ir_image = adjust_brightness(depth_image, ir_image)
return depth_image, ir_image
def random_magnification(depth_image: np.array, ir_image: np.array, boxes: np.array):
"""
Perform hand magnificatioon in an image by multiplying a random number in range [1, MAX_MAGNIFICATION]
by the pixel values in the hand region
Helps learning futher away values
:param depth_image: np.array(uint16), Depth image
:param ir_image: np.array(uint16), IR image
:param boxes: np.array, bounding boxes of the objects
:return: expanded image, updated coordinates of bounding box
"""
depth_image = depth_image.astype(np.float32)
ir_image = ir_image.astype(np.float32)
for box in boxes:
depth_factor = random.uniform(0.3, const.MAX_DEPTH_MAGNIFICATION)
depth_factor_normalized = (depth_factor - 0.3) / (const.MAX_DEPTH_MAGNIFICATION-0.3)
ir_factor = (depth_factor_normalized * (const.MAX_IR_MAGNIFICATION - 0.9)) + 0.9
min_val = min(1/ir_factor, ir_factor)
max_val = max(1/ir_factor, ir_factor)
ir_factor = random.uniform(min_val, max_val)
xmin = int(box[0])
ymin = int(box[1])
xmax = int(box[2])
ymax = int(box[3])
depth_image[ymin:ymax, xmin:xmax] *= depth_factor
ir_image[ymin:ymax, xmin:xmax] *= ir_factor
return depth_image, ir_image
def zoom_out(depth_image: np.array, ir_image: np.array, boxes: np.array):
"""
Perform zooming out of an image by placing the image in a larger canvas
of filler values.
filler will be the mean of the image
Helps learning smaller values
:param depth_image: np.array(uint16), Depth image
:param ir_image: np.array(uint16), IR image
:param boxes: np.array, bounding boxes of the objects
:return: expanded image, updated coordinates of bounding box
"""
h = depth_image.shape[0]
w = depth_image.shape[1]
max_scale = const.MAX_ZOOM_OUT
scale = random.uniform(1, max_scale)
new_h = int(h*scale)
new_w = int(w*scale)
depth_filler = depth_image.mean()
new_depth_image = np.ones((new_h, new_w), dtype=np.float) * depth_filler
ir_filler = ir_image.mean()
new_ir_image = np.ones((new_h, new_w), dtype=np.float) * ir_filler
left = random.randint(0, new_w - w)
right = left + w
top = random.randint(0, new_h - h)
bottom = top + h
new_depth_image[top:bottom, left:right] = depth_image
new_ir_image[top:bottom, left:right] = ir_image
new_boxes = boxes + np.array([left, top, left, top], dtype=np.float32)
return new_depth_image, new_ir_image, new_boxes
def random_crop(depth_image: np.array, ir_image: np.array, boxes: np.array, labels: np.array):
"""
Performs a random crop in the manner stated in the paper.
Helps detecting partial objects
:param depth_image: np.array(uint16), Depth image
:param ir_image: np.array(uint16), IR image
:param boxes: numpy Array, bounding boxes of the objects
:param labels: numpy Array, a lost of N labels of the boxes
:return: cropped image, boxes, and the remaining labels in the imag
"""
h = depth_image.shape[0]
w = depth_image.shape[1]
while True:
# Randomly draw a value for min_overlap
min_overlap = random.choice([0., .3, .5, .7, .9, None])
if min_overlap is None:
return depth_image, ir_image, boxes, labels
# Try 50 times for this choic of min_overlap
max_trials = 50
for _ in range(max_trials):
# Crop dimentions must be in range [0.3, 1] of the original image
# Note - its [0.1, 1] in the paper
min_scale = 0.3
scale_h = random.uniform(min_scale, 1)
scale_w = random.uniform(min_scale, 1)
new_h = h*scale_h
new_w = w*scale_w
# Aspect ratio must be in range [0.5, 2]
aspect_ratio = new_h / new_w
if not (0.5 < aspect_ratio < 2):
continue
# Crop coordinates
left = random.randint(0, int(w - new_w))
right = int(left + new_w)
top = random.randint(0, int(h - new_h))
bottom =int( top + new_h)
crop = np.expand_dims(np.array([left, top, right, bottom], dtype=np.float32), axis=0)
# Calculate IoU for crop and bounding boxs
overlap_crop = torch.FloatTensor(crop)
overlap_boxes = torch.FloatTensor(boxes)
overlap = find_jaccard_overlap(overlap_crop, overlap_boxes) # (1, n_objects)
overlap = overlap.squeeze(0) # (n_objects)
# If not a single bounding box satisfies the min overlap try again
if overlap.max().item() < min_overlap:
continue
new_depth_image = depth_image[top:bottom, left:right] # (1, new_h, new_w)
new_ir_image = ir_image[top:bottom, left:right] # (1, new_h, new_w)
# Find center of original bounding boxes
bb_centers = (boxes[:, :2] + boxes[:, 2:]) / 2. # (n_objects, 2)
# Find Bounding Boxes whos center is in the crop
centers_in_crop = (bb_centers[:, 0] > left) * (bb_centers[:, 0] < right) \
* (bb_centers[:, 1] > top) * (bb_centers[:, 1] < bottom)
# If not a single bounding box has its center in the crop, try again
if not centers_in_crop.any():
continue
# Discard bounding boxes that are not in the cropped image
new_boxes = boxes[centers_in_crop, :]
new_labels = labels[centers_in_crop]
# Calculate new bounding box coordinates
# crop : (left, top, right, bottom)
new_boxes[:, :2] = np.maximum(new_boxes[:, :2], crop[:, :2])
new_boxes[:, :2] -= crop[:, :2]
new_boxes[:, 2:] = np.minimum(new_boxes[:, 2:], crop[:, 2:])
new_boxes[:, 2:] -= crop[:, :2]
return new_depth_image, new_ir_image, new_boxes, new_labels
def flip(depth_image: np.array, ir_image: np.array, boxes: np.array):
"""
Flip the image horizantally for better augmentation
:param depth_image: np.array(uint16), Depth image
:param ir_image: np.array(uint16), IR image
:param boxes: numpy array, (n_objects, 4)
:return: fliped image and bounding boxes.
"""
h = depth_image.shape[0]
w = depth_image.shape[1]
random_rotate = random.choice([90, 180, 270])
matrix = cv2.getRotationMatrix2D((h/2, w/2), random_rotate, 1)
new_depth_image = cv2.warpAffine(depth_image, matrix, (h, w))
new_ir_image = cv2.warpAffine(ir_image, matrix, (h, w))
new_boxes = np.ones( (boxes.shape[0]*2, 3) )
new_boxes[:,:2] = boxes.reshape((boxes.shape[0]*2, 2))
new_boxes = np.matmul(matrix, new_boxes.transpose())
new_boxes = new_boxes.transpose()
new_boxes = new_boxes.reshape(boxes.shape)
for i, box in enumerate(new_boxes):
xmin = min(box[0], box[2])
ymin = min(box[1], box[3])
xmax = max(box[0], box[2])
ymax = max(box[1], box[3])
new_boxes[i] = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
return new_depth_image, new_ir_image, new_boxes
def normalize(image: np.array):
"""
Resize image to (300, 300)
:param image: np.array(uint16), image
:return: normalized image
"""
mean = np.mean(image)
std = image.std()
if std==0:
std = 1
new_image = (image - mean) / std
return new_image
def find_intersection(set_1: torch.tensor, set_2: torch.tensor):
"""
Find the intersection of every box combination betweeen 2 sets of boxes that are in boundary coordinates.
:param set_1: set_1 (n1, 4)
:param set_2: set 2 (n2, 4)
:return: intersection of each of the boxes in set 1 with respect to each of the set 2 (n1, n2)
"""
DEVICE = set_1.device
lower_bounds = torch.max(set_1[:, :2].unsqueeze(1).to(DEVICE), set_2[:, :2].unsqueeze(0).to(DEVICE)) # (n1, n2, 2)
upper_bounds = torch.min(set_1[:, 2:].unsqueeze(1).to(DEVICE), set_2[:, 2:].unsqueeze(0).to(DEVICE)) # (n1, n2, 2)
intersection_dims = torch.clamp(upper_bounds - lower_bounds, min=0) # (n1, n2, 2)
return intersection_dims[:, :, 0] * intersection_dims[:, :, 1] # (n1, n2)
def find_jaccard_overlap(set_1: torch.tensor, set_2: torch.tensor):
"""
Find IoU of every box combination in between the 2 sets (boxes in boundary coordinates)
:param set_1: set 1 (n1, 4)
:param set2: set 2 (n2, 4)
:return: Jaccard overlap of each of the boxes in the set 1 with respect to set 2 (n1, n2)
"""
DEVICE = set_1.device
intersection = find_intersection(set_1, set_2)
area_set_1 = (set_1[:, 2] - set_1[:, 0]) * (set_1[:, 3] - set_1[:, 1]) # (n1)
area_set_2 = (set_2[:, 2] - set_2[:, 0]) * (set_2[:, 3] - set_2[:, 1]) # (n1)
union = area_set_1.unsqueeze(1).to(DEVICE) + area_set_2.unsqueeze(0).to(DEVICE) - intersection # (n1, n2)
return intersection / union
###################
### Testing ###
###################
"""
import torch
import os
import torchvision.transforms.functional as FT
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from PIL import Image, ImageOps
import json
import cv2
import numpy as np
with open("/home/analog/Desktop/NVIDIA/SSD/preprocess/data/train.json", "r") as jf:
load_dict = json.load(jf)
img_annotation = load_dict[100]
img_num = img_annotation["img_path"].split("/")[-1].split(".")[0]
base_path = "/home/analog/Desktop/NVIDIA/DataCollection/dataset/data/"
depth_img_path = os.path.join(base_path, "depth_image/"+img_num+".png")
ir_img_path = os.path.join(base_path, "ir_image/"+img_num+".png")
boxes = np.array(img_annotation["boxes"], dtype=np.float32) # Boxes need to be casted into Numpy Float Array
labels = np.array(img_annotation["labels"], dtype=np.long) # Labels need to be casted into torch Long tensor
depth_image = cv2.imread(depth_img_path, cv2.COLOR_BGR2GRAY).astype(np.uint16) # 16 bit unsigned integer values for an Depth Image (H, W)
ir_image = cv2.imread(ir_img_path, cv2.COLOR_BGR2GRAY).astype(np.uint16) # 16 bit unsigned integer values for an Depth Image (H, W)
new_depth_img, new_ir_image, new_boxes, new_labels = Transform(depth_image, ir_image, boxes, labels, True)
new_depth_image, new_ir_image, heat_map, new_boxes = CreateHeatMap(new_depth_img, new_ir_image, new_boxes, new_labels)
fig = plt.figure(figsize=(6, 8))
ax1 = fig.add_subplot(2, 1, 1)
ax2 = fig.add_subplot(2, 1, 2)
for i in range(new_boxes.shape[0]):
box_locs = new_boxes[i].tolist()
x, y = box_locs[0], box_locs[1]
width, height = abs(box_locs[0] - box_locs[2]), abs(box_locs[1] - box_locs[3])
rect = patches.Rectangle((x,y),width,height,linewidth=10,edgecolor='r',facecolor='none')
ax1.add_patch(rect)
out_img1 = new_ir_image[0]
out_img2 = new_depth_image[0]
ax1.imshow(out_img1, interpolation='nearest', cmap ='gray')
ax2.imshow(out_img2, interpolation='nearest', cmap ='gray')
plt.show()
"""
| 40.277542
| 460
| 0.662564
|
0acb260fd135efe7de36ab39717d2bed27428776
| 10,117
|
py
|
Python
|
app/tests/pages_tests/test_pages.py
|
comic/comic-django
|
4f534fae2c7d2102e94991667398aef12394e32e
|
[
"Apache-2.0"
] | 7
|
2016-11-05T07:16:30.000Z
|
2017-11-23T03:38:03.000Z
|
app/tests/pages_tests/test_pages.py
|
comic/comic-django
|
4f534fae2c7d2102e94991667398aef12394e32e
|
[
"Apache-2.0"
] | 113
|
2015-05-26T09:27:59.000Z
|
2018-03-21T10:45:56.000Z
|
app/tests/pages_tests/test_pages.py
|
comic/comic-django
|
4f534fae2c7d2102e94991667398aef12394e32e
|
[
"Apache-2.0"
] | 7
|
2015-07-16T20:11:22.000Z
|
2017-06-06T02:41:24.000Z
|
from itertools import chain
import pytest
from django.db.models import BLANK_CHOICE_DASH
from grandchallenge.pages.models import Page
from tests.factories import PageFactory
from tests.utils import get_view_for_user, validate_admin_only_view
@pytest.mark.django_db
@pytest.mark.parametrize(
"view", ["pages:list", "pages:create", "pages:delete"]
)
def test_page_admin_permissions(view, client, two_challenge_sets):
if view == "pages:delete":
PageFactory(
challenge=two_challenge_sets.challenge_set_1.challenge,
display_title="challenge1pagepermtest",
)
reverse_kwargs = {"slug": "challenge1pagepermtest"}
else:
reverse_kwargs = None
validate_admin_only_view(
viewname=view,
two_challenge_set=two_challenge_sets,
client=client,
reverse_kwargs=reverse_kwargs,
)
@pytest.mark.django_db
def test_page_update_permissions(client, two_challenge_sets):
p1 = PageFactory(
challenge=two_challenge_sets.challenge_set_1.challenge,
display_title="challenge1page1permissiontest",
)
validate_admin_only_view(
viewname="pages:update",
two_challenge_set=two_challenge_sets,
client=client,
reverse_kwargs={"slug": p1.slug},
)
@pytest.mark.django_db
def test_page_list_filter(client, two_challenge_sets):
"""Check that only pages related to this challenge are listed."""
p1 = PageFactory(
challenge=two_challenge_sets.challenge_set_1.challenge,
display_title="challenge1page1",
)
p2 = PageFactory(
challenge=two_challenge_sets.challenge_set_2.challenge,
display_title="challenge2page1",
)
response = get_view_for_user(
viewname="pages:list",
client=client,
challenge=two_challenge_sets.challenge_set_1.challenge,
user=two_challenge_sets.admin12,
)
assert p1.display_title in response.rendered_content
assert p2.display_title not in response.rendered_content
response = get_view_for_user(
viewname="pages:list",
client=client,
challenge=two_challenge_sets.challenge_set_2.challenge,
user=two_challenge_sets.admin12,
)
assert p1.display_title not in response.rendered_content
assert p2.display_title in response.rendered_content
@pytest.mark.django_db
def test_page_create(client, two_challenge_sets):
page_html = "<h1>HELLO WORLD</h1>"
page_title = "testpage1"
response = get_view_for_user(
viewname="pages:create",
client=client,
method=client.post,
challenge=two_challenge_sets.challenge_set_1.challenge,
user=two_challenge_sets.challenge_set_1.admin,
data={
"display_title": page_title,
"html": page_html,
"permission_level": Page.ALL,
},
)
assert response.status_code == 302
response = get_view_for_user(url=response.url, client=client)
assert response.status_code == 200
assert page_html in str(response.content)
# Check that it was created in the correct challenge
response = get_view_for_user(
viewname="pages:detail",
client=client,
challenge=two_challenge_sets.challenge_set_1.challenge,
reverse_kwargs={"slug": page_title},
)
assert response.status_code == 200
response = get_view_for_user(
viewname="pages:detail",
client=client,
challenge=two_challenge_sets.challenge_set_2.challenge,
reverse_kwargs={"slug": page_title},
)
assert response.status_code == 404
@pytest.mark.django_db
def test_page_update(client, two_challenge_sets):
p1 = PageFactory(
challenge=two_challenge_sets.challenge_set_1.challenge,
display_title="page1updatetest",
html="oldhtml",
)
# page with the same name in another challenge to check selection
PageFactory(
challenge=two_challenge_sets.challenge_set_2.challenge,
display_title="page1updatetest",
html="oldhtml",
)
response = get_view_for_user(
viewname="pages:update",
client=client,
challenge=two_challenge_sets.challenge_set_1.challenge,
user=two_challenge_sets.admin12,
reverse_kwargs={"slug": p1.slug},
)
assert response.status_code == 200
assert 'value="page1updatetest"' in response.rendered_content
response = get_view_for_user(
viewname="pages:update",
client=client,
method=client.post,
challenge=two_challenge_sets.challenge_set_1.challenge,
user=two_challenge_sets.admin12,
reverse_kwargs={"slug": p1.slug},
data={
"display_title": "editedtitle",
"permission_level": Page.ALL,
"html": "newhtml",
},
)
assert response.status_code == 302
# The slug shouldn't change
response = get_view_for_user(
viewname="pages:detail",
client=client,
challenge=two_challenge_sets.challenge_set_1.challenge,
user=two_challenge_sets.admin12,
reverse_kwargs={"slug": "page1updatetest"},
)
assert response.status_code == 200
assert "newhtml" in str(response.content)
# check that the other page is unaffected
response = get_view_for_user(
viewname="pages:detail",
client=client,
challenge=two_challenge_sets.challenge_set_2.challenge,
user=two_challenge_sets.admin12,
reverse_kwargs={"slug": "page1updatetest"},
)
assert response.status_code == 200
assert "oldhtml" in str(response.content)
@pytest.mark.django_db
def test_page_delete(client, two_challenge_sets):
# Two pages with the same title, make sure the right one is deleted
c1p1 = PageFactory(
challenge=two_challenge_sets.challenge_set_1.challenge,
display_title="page1",
)
c2p1 = PageFactory(
challenge=two_challenge_sets.challenge_set_2.challenge,
display_title="page1",
)
assert Page.objects.filter(pk=c1p1.pk).exists()
assert Page.objects.filter(pk=c2p1.pk).exists()
response = get_view_for_user(
viewname="pages:delete",
client=client,
method=client.post,
challenge=two_challenge_sets.challenge_set_1.challenge,
user=two_challenge_sets.admin12,
reverse_kwargs={"slug": "page1"},
)
assert response.status_code == 302
assert not Page.objects.filter(pk=c1p1.pk).exists()
assert Page.objects.filter(pk=c2p1.pk).exists()
response = get_view_for_user(
url=response.url, client=client, user=two_challenge_sets.admin12
)
assert response.status_code == 200
@pytest.mark.django_db
@pytest.mark.parametrize(
"page_to_move,move_op,expected",
[
(2, Page.UP, [1, 3, 2, 4]),
(1, Page.DOWN, [1, 3, 2, 4]),
(2, Page.FIRST, [2, 3, 1, 4]),
(1, Page.LAST, [1, 4, 2, 3]),
(0, BLANK_CHOICE_DASH[0], [1, 2, 3, 4]),
],
)
def test_page_move(
page_to_move, move_op, expected, client, two_challenge_sets
):
pages = [*two_challenge_sets.challenge_set_1.challenge.page_set.all()]
c2_pages = [*two_challenge_sets.challenge_set_2.challenge.page_set.all()]
for i in range(3):
pages.append(
PageFactory(challenge=two_challenge_sets.challenge_set_1.challenge)
)
# Same page name in challenge 2, make sure that these are unaffected
c2_pages.append(
PageFactory(
challenge=two_challenge_sets.challenge_set_2.challenge,
display_title=pages[i + 1].display_title,
)
)
assert [p.order for p in pages] == [1, 2, 3, 4]
assert [p.order for p in c2_pages] == [1, 2, 3, 4]
response = get_view_for_user(
viewname="pages:update",
client=client,
method=client.post,
challenge=two_challenge_sets.challenge_set_1.challenge,
user=two_challenge_sets.admin12,
reverse_kwargs={"slug": pages[page_to_move].slug},
data={
"display_title": pages[page_to_move].display_title,
"permission_level": pages[page_to_move].permission_level,
"html": pages[page_to_move].html,
"move": move_op,
},
)
for p in chain(pages, c2_pages):
p.refresh_from_db()
assert response.status_code == 302
assert [p.order for p in pages] == expected
assert [p.order for p in c2_pages] == [1, 2, 3, 4]
@pytest.mark.django_db
def test_create_page_with_same_title(client, two_challenge_sets):
PageFactory(
challenge=two_challenge_sets.challenge_set_1.challenge,
display_title="page1",
)
# Creating a page with the same title should be created with a different slug
response = get_view_for_user(
viewname="pages:create",
client=client,
method=client.post,
challenge=two_challenge_sets.challenge_set_1.challenge,
user=two_challenge_sets.challenge_set_1.admin,
data={
"display_title": "page1",
"html": "hello",
"permission_level": Page.ALL,
},
)
assert response.status_code == 302
challenge_pages = Page.objects.filter(
challenge=two_challenge_sets.challenge_set_1.challenge,
display_title="page1",
)
assert len(challenge_pages) == 2
assert challenge_pages[0].slug == "page1"
assert challenge_pages[1].slug == "page1-2"
# Creating one in another challenge should work
response = get_view_for_user(
viewname="pages:create",
client=client,
method=client.post,
challenge=two_challenge_sets.challenge_set_2.challenge,
user=two_challenge_sets.challenge_set_2.admin,
data={
"display_title": "page1",
"html": "hello",
"permission_level": Page.ALL,
},
)
assert response.status_code == 302
assert (
Page.objects.get(
challenge=two_challenge_sets.challenge_set_2.challenge,
display_title="page1",
).slug
== "page1"
)
| 32.847403
| 81
| 0.666008
|
75111b6e9d490b0a49c0024c1dd9ed4a6189b55e
| 4,307
|
py
|
Python
|
tempest/api/object_storage/test_object_version.py
|
rishabh20111990/tempest
|
df15531cd4231000b0da016f5cd8641523ce984e
|
[
"Apache-2.0"
] | 2
|
2015-08-13T00:07:49.000Z
|
2020-08-07T06:38:44.000Z
|
tempest/api/object_storage/test_object_version.py
|
rishabh20111990/tempest
|
df15531cd4231000b0da016f5cd8641523ce984e
|
[
"Apache-2.0"
] | 1
|
2019-08-08T10:36:44.000Z
|
2019-08-09T05:58:23.000Z
|
tempest/api/object_storage/test_object_version.py
|
rishabh20111990/tempest
|
df15531cd4231000b0da016f5cd8641523ce984e
|
[
"Apache-2.0"
] | 3
|
2016-08-30T06:53:54.000Z
|
2021-03-22T16:54:39.000Z
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.object_storage import base
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
CONF = config.CONF
class ContainerTest(base.BaseObjectTest):
def assertContainer(self, container, count, byte, versioned):
resp, _ = self.container_client.list_container_metadata(container)
self.assertHeaders(resp, 'Container', 'HEAD')
header_value = resp.get('x-container-object-count', 'Missing Header')
self.assertEqual(header_value, count)
header_value = resp.get('x-container-bytes-used', 'Missing Header')
self.assertEqual(header_value, byte)
header_value = resp.get('x-versions-location', 'Missing Header')
self.assertEqual(header_value, versioned)
@decorators.idempotent_id('a151e158-dcbf-4a1f-a1e7-46cd65895a6f')
@testtools.skipIf(
not CONF.object_storage_feature_enabled.object_versioning,
'Object-versioning is disabled')
def test_versioned_container(self):
# create container
vers_container_name = data_utils.rand_name(name='TestVersionContainer')
resp, _ = self.container_client.update_container(vers_container_name)
self.addCleanup(base.delete_containers,
[vers_container_name],
self.container_client,
self.object_client)
self.assertHeaders(resp, 'Container', 'PUT')
self.assertContainer(vers_container_name, '0', '0', 'Missing Header')
base_container_name = data_utils.rand_name(name='TestBaseContainer')
headers = {'X-versions-Location': vers_container_name}
resp, _ = self.container_client.update_container(
base_container_name,
**headers)
self.addCleanup(base.delete_containers,
[base_container_name],
self.container_client,
self.object_client)
self.assertHeaders(resp, 'Container', 'PUT')
self.assertContainer(base_container_name, '0', '0',
vers_container_name)
object_name = data_utils.rand_name(name='TestObject')
# create object
data_1 = data_utils.random_bytes()
resp, _ = self.object_client.create_object(base_container_name,
object_name, data_1)
# create 2nd version of object
data_2 = data_utils.random_bytes()
resp, _ = self.object_client.create_object(base_container_name,
object_name, data_2)
_, body = self.object_client.get_object(base_container_name,
object_name)
self.assertEqual(body, data_2)
# delete object version 2
resp, _ = self.object_client.delete_object(base_container_name,
object_name)
self.assertContainer(base_container_name, '1', '1024',
vers_container_name)
_, body = self.object_client.get_object(base_container_name,
object_name)
self.assertEqual(body, data_1)
# delete object version 1
self.object_client.delete_object(base_container_name,
object_name)
# containers should be empty
self.assertContainer(base_container_name, '0', '0',
vers_container_name)
self.assertContainer(vers_container_name, '0', '0',
'Missing Header')
| 46.815217
| 79
| 0.630369
|
597581b0fbfea1c9bb630519e6f93d103ff1fd1c
| 2,429
|
py
|
Python
|
Data_Analysis_with_Python/medical_data_visualizer.py
|
gusleak/freeCodeCamp
|
2a9e8ae5c1d8a1c738071f6ce26ef61b64ce5b65
|
[
"MIT"
] | null | null | null |
Data_Analysis_with_Python/medical_data_visualizer.py
|
gusleak/freeCodeCamp
|
2a9e8ae5c1d8a1c738071f6ce26ef61b64ce5b65
|
[
"MIT"
] | null | null | null |
Data_Analysis_with_Python/medical_data_visualizer.py
|
gusleak/freeCodeCamp
|
2a9e8ae5c1d8a1c738071f6ce26ef61b64ce5b65
|
[
"MIT"
] | null | null | null |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
# Import data
df = pd.read_csv('medical_examination.csv')
# Add 'overweight' column
def overweight_check(row):
if row.weight / (row.height / 100) ** 2 > 25:
return 1
return 0
df['overweight'] = df.apply(lambda row: overweight_check(row), axis=1)
# Normalize data by making 0 always good and 1 always bad. If the value of 'cholestorol' or 'gluc' is 1, make the value 0. If the value is more than 1, make the value 1.
def normalize_chol(row):
if row.cholesterol == 1:
return 0
return 1
def normalize_gluc(row):
if row.gluc == 1:
return 0
return 1
df['cholesterol'] = df.apply(lambda row: normalize_chol(row), axis=1)
df['gluc'] = df.apply(lambda row: normalize_gluc(row), axis=1)
# Draw Categorical Plot
def draw_cat_plot():
# Create DataFrame for cat plot using `pd.melt` using just the values from 'cholesterol', 'gluc', 'smoke', 'alco', 'active', and 'overweight'.
df_cat = df.melt(id_vars='cardio', value_vars=['cholesterol', 'gluc', 'smoke', 'alco', 'active', 'overweight'])
# Group and reformat the data to split it by 'cardio'. Show the counts of each feature.
df_cat['total'] = df_cat.groupby(['cardio', 'variable', 'value'])['cardio'].transform('size')
df_cat = df_cat.drop_duplicates().reset_index()
order_list = sorted(df_cat['variable'].unique().tolist())
# Draw the catplot with 'sns.catplot()'
fig = sns.catplot(x="variable", y="total", order=order_list, col="cardio", col_wrap=2, hue="value", kind="bar", data=df_cat)
fig.savefig('catplot.png')
return fig
# Draw Heat Map
def draw_heat_map():
# Clean the data
df_heat = df[(df['ap_lo'] <= df['ap_hi']) & (df['height'] >= df['height'].quantile(0.025)) & (df['height'] <= df['height'].quantile(0.975)) & (df['weight'] >= df['weight'].quantile(0.025)) & (df['weight'] <= df['weight'].quantile(0.975))]
# Calculate the correlation matrix
corr = df_heat.corr()
# Generate a mask for the upper triangle
mask = np.triu(np.ones_like(corr, dtype=bool))
# Set up the matplotlib figure
fig, ax = plt.subplots(figsize=(11, 9))
# Draw the heatmap with 'sns.heatmap()'
sns.heatmap(corr, annot=True, fmt=".1f", mask=mask, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5})
fig.savefig('heatmap.png')
return fig
| 37.369231
| 242
| 0.660354
|
436a208a64da069c59a2212e61d0de70274c6f26
| 491
|
py
|
Python
|
mp/mp09valuearray.py
|
showa-yojyo/bin
|
8ddd29b3c629634212b3708904cf615c42a6eaf5
|
[
"MIT"
] | 1
|
2017-04-27T19:58:41.000Z
|
2017-04-27T19:58:41.000Z
|
mp/mp09valuearray.py
|
showa-yojyo/bin
|
8ddd29b3c629634212b3708904cf615c42a6eaf5
|
[
"MIT"
] | 8
|
2016-10-30T17:16:45.000Z
|
2018-05-15T15:01:45.000Z
|
mp/mp09valuearray.py
|
showa-yojyo/bin
|
8ddd29b3c629634212b3708904cf615c42a6eaf5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""mp9valuearray.py: Use multiprocessing.Value and multiprocessing.Array.
Usage:
mp9valuearray.py
"""
import multiprocessing as mp
def f(n, a):
n.value = 3.1415927
for i in range(len(a)):
a[i] = -a[i]
def main():
num = mp.Value('d', 0.0)
arr = mp.Array('i', range(10))
p = mp.Process(target=f, args=(num, arr))
p.start()
p.join()
print(num.value)
print(arr[:]) # explicitly copy
if __name__ == '__main__':
main()
| 18.185185
| 73
| 0.596741
|
82671cd8e72f84733f5a28acdb4b5fb9d56a0a03
| 454
|
py
|
Python
|
numpy/f2py/tests/test_quoted_character.py
|
andrewkwolek/numpy
|
cbccbe9dee293ff2bf0167e37443ce4975781562
|
[
"BSD-3-Clause"
] | 2
|
2022-01-20T18:13:17.000Z
|
2022-03-25T04:30:01.000Z
|
numpy/f2py/tests/test_quoted_character.py
|
andrewkwolek/numpy
|
cbccbe9dee293ff2bf0167e37443ce4975781562
|
[
"BSD-3-Clause"
] | 60
|
2021-06-14T08:54:35.000Z
|
2022-01-13T01:34:27.000Z
|
numpy/f2py/tests/test_quoted_character.py
|
andrewkwolek/numpy
|
cbccbe9dee293ff2bf0167e37443ce4975781562
|
[
"BSD-3-Clause"
] | 2
|
2021-08-21T13:22:45.000Z
|
2022-01-20T10:08:43.000Z
|
"""See https://github.com/numpy/numpy/pull/10676.
"""
import sys
import pytest
from . import util
class TestQuotedCharacter(util.F2PyTest):
sources = [util.getpath("tests", "src", "quoted_character", "foo.f")]
@pytest.mark.skipif(sys.platform == "win32",
reason="Fails with MinGW64 Gfortran (Issue #9673)")
def test_quoted_character(self):
assert self.module.foo() == (b"'", b'"', b";", b"!", b"(", b")")
| 26.705882
| 75
| 0.610132
|
adef93b65fb1b14aee9661178fab5ba44a198e23
| 342
|
py
|
Python
|
tests/test_scripts.py
|
toots/sinagot
|
f2d274d6475a3cee316ecf3b5d73d051e70e6c1c
|
[
"BSD-3-Clause"
] | 1
|
2020-12-21T16:19:31.000Z
|
2020-12-21T16:19:31.000Z
|
tests/test_scripts.py
|
toots/sinagot
|
f2d274d6475a3cee316ecf3b5d73d051e70e6c1c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_scripts.py
|
toots/sinagot
|
f2d274d6475a3cee316ecf3b5d73d051e70e6c1c
|
[
"BSD-3-Clause"
] | null | null | null |
"""Test scripts"""
from pathlib import Path
def test_path(dataset, ID):
script = dataset.behavior.get(ID).steps.first().script
assert script.path.input == Path(dataset._data_path, "HDC", ID, "Trial1_report.txt")
assert script.path.output == Path(
dataset._data_path, "PROCESSED", ID, "HDC", "behavior-scores.csv"
)
| 28.5
| 88
| 0.678363
|
d35d0b33e173e654cebfb112a1746d8263ba1217
| 1,906
|
py
|
Python
|
setup.py
|
freddi301/spid-cie-oidc-django
|
7cfa8d775ee46e24efdc5ecfd32f7f39fb018149
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
freddi301/spid-cie-oidc-django
|
7cfa8d775ee46e24efdc5ecfd32f7f39fb018149
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
freddi301/spid-cie-oidc-django
|
7cfa8d775ee46e24efdc5ecfd32f7f39fb018149
|
[
"Apache-2.0"
] | null | null | null |
import re
import os
import re
import sys
from glob import glob
from setuptools import setup, find_packages
SRC_FOLDER = '.'
PKG_NAME = 'spid_cie_oidc'
INSTALL_REQUIRES = [
"Django>=4.0",
"oidcmsg>=1.5.4",
"pydantic>=1.8.2",
"pytz>=2021.3",
"aiohttp",
"requests",
"pydantic",
"pydantic[email]"
]
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
with open(f'{SRC_FOLDER}{os.path.sep}{PKG_NAME}/__init__.py', 'r') as fd:
VERSION = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE
).group(1)
LICENSE = "License :: OSI Approved :: Apache Software License"
setup(
name=PKG_NAME,
version=VERSION,
description="SPID/CIE OIDC Federation Entity",
long_description=README,
long_description_content_type='text/markdown',
author='Giuseppe De Marco',
author_email='demarcog83@gmail.com',
license=LICENSE,
url=f"https://github.com/peppelinux/{PKG_NAME.replace('_', '-')}",
packages=[PKG_NAME, ],
package_dir={f"{PKG_NAME}": f"{SRC_FOLDER}/{PKG_NAME}"},
package_data={
f"{PKG_NAME}": [
i.replace(
f'{SRC_FOLDER}{os.path.sep}{PKG_NAME}{os.path.sep}', ''
)
for i in glob(
f'{SRC_FOLDER}{os.path.sep}{PKG_NAME}{os.path.sep}**',
recursive=True
)
if i and '__pycache__' not in i
]
},
classifiers=[
"Development Status :: 5 - Production/Stable",
LICENSE,
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Software Development :: Libraries :: Python Modules"],
install_requires=INSTALL_REQUIRES,
zip_safe=False,
)
| 27.228571
| 74
| 0.591291
|
f3a3f049fa223a351236728b1df4656be16e8e0d
| 959
|
py
|
Python
|
pptx/replace.py
|
aleksiej-ostrowski/py_tools
|
d47eef6f9897d3412613e66730c34cd1d24faa84
|
[
"MIT"
] | null | null | null |
pptx/replace.py
|
aleksiej-ostrowski/py_tools
|
d47eef6f9897d3412613e66730c34cd1d24faa84
|
[
"MIT"
] | null | null | null |
pptx/replace.py
|
aleksiej-ostrowski/py_tools
|
d47eef6f9897d3412613e66730c34cd1d24faa84
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#----------------------------#
# #
# Version 0.0.1 #
# Ostrovsky Alexey, 2016 #
# 0x0all@gmail.com #
# #
#----------------------------#
filename = "ideal.pptx"
out_filename = "ideal_new.pptx"
import os
fn_ = filename.split('.')
import glob2
all_files = glob2.glob("./" + fn_[0] + "/**/*.xml")
all_codes = ["7071%s" % i for i in xrange(1,4279)]
all_id = [str(i) for i in xrange(1,4279)]
dk = { key : value for (key, value) in zip(all_id, all_codes) }
# print all_files
# print all_codes
for f_ in all_files:
lines = []
with open(f_) as infile:
for line in infile:
for k1, k2 in dk.iteritems():
line = line.replace("%s<" % k2, "%s<" % k1).replace("%s " % k2, "%s " % k1)
lines.append(line)
with open(f_, 'w') as outfile:
for line in lines:
outfile.write(line)
| 22.833333
| 91
| 0.475495
|
1fac879bd66b4152a1bb065a2bc0c9c5bca14251
| 483
|
py
|
Python
|
python-student/exercise-02/timer.py
|
yahav876/Yahav-DevOps
|
308a10758150824061f14cc2589738355dec91e8
|
[
"CNRI-Python"
] | null | null | null |
python-student/exercise-02/timer.py
|
yahav876/Yahav-DevOps
|
308a10758150824061f14cc2589738355dec91e8
|
[
"CNRI-Python"
] | null | null | null |
python-student/exercise-02/timer.py
|
yahav876/Yahav-DevOps
|
308a10758150824061f14cc2589738355dec91e8
|
[
"CNRI-Python"
] | null | null | null |
#!/bin/python3
#import time
# To use time function without using the time word before calling the method.
from time import localtime, mktime, strftime
start_time = localtime()
print(f"Timer Starts at {strftime('%X',start_time)}")
# Wait for user to stop timer
input("Press 'Enter' to stop the timer")
stop_time = localtime()
difference = mktime(stop_time) - mktime(start_time)
print(f"Timer Stopped at {strftime('%X',stop_time)}")
print(f"Total time: {difference} seconds.")
| 23
| 77
| 0.73706
|
6eb69ac5ef38dc11ffa93eac4943d62673b81287
| 87
|
py
|
Python
|
liif/datasets/__init__.py
|
NejcHirci/material-addon
|
c08e2081413c3319b712c2f7193ac8013f601382
|
[
"MIT"
] | 4
|
2022-01-31T14:26:39.000Z
|
2022-02-06T06:34:27.000Z
|
liif/datasets/__init__.py
|
NejcHirci/material_addon
|
c08e2081413c3319b712c2f7193ac8013f601382
|
[
"MIT"
] | 2
|
2022-01-30T10:35:04.000Z
|
2022-01-30T10:35:04.000Z
|
liif/datasets/__init__.py
|
NejcHirci/material-addon
|
c08e2081413c3319b712c2f7193ac8013f601382
|
[
"MIT"
] | null | null | null |
from .datasets import register, make
from . import image_folder
from . import wrappers
| 21.75
| 36
| 0.804598
|
13705a521e39d353497e5a9f33c0306d8f5ae803
| 15,333
|
py
|
Python
|
qiskit/pulse/transforms/alignments.py
|
wshanks/qiskit-terra
|
580bc7dd61921c3e784aea5fb86fb3054c1d6a29
|
[
"Apache-2.0"
] | null | null | null |
qiskit/pulse/transforms/alignments.py
|
wshanks/qiskit-terra
|
580bc7dd61921c3e784aea5fb86fb3054c1d6a29
|
[
"Apache-2.0"
] | null | null | null |
qiskit/pulse/transforms/alignments.py
|
wshanks/qiskit-terra
|
580bc7dd61921c3e784aea5fb86fb3054c1d6a29
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""A collection of passes to reallocate the timeslots of instructions according to context."""
import abc
from typing import Callable, Dict, Any, Union
import numpy as np
from qiskit.circuit.parameterexpression import ParameterExpression
from qiskit.pulse.exceptions import PulseError
from qiskit.pulse.schedule import Schedule, ScheduleComponent
from qiskit.pulse.utils import instruction_duration_validation, deprecated_functionality
class AlignmentKind(abc.ABC):
"""An abstract class for schedule alignment."""
is_sequential = None
def __init__(self):
"""Create new context."""
self._context_params = tuple()
@abc.abstractmethod
def align(self, schedule: Schedule) -> Schedule:
"""Reallocate instructions according to the policy.
Only top-level sub-schedules are aligned. If sub-schedules are nested,
nested schedules are not recursively aligned.
Args:
schedule: Schedule to align.
Returns:
Schedule with reallocated instructions.
"""
pass
def to_dict(self) -> Dict[str, Any]:
"""Returns dictionary to represent this alignment."""
return {'alignment': self.__class__.__name__}
def __eq__(self, other):
"""Check equality of two transforms."""
return isinstance(other, type(self)) and self.to_dict() == other.to_dict()
def __repr__(self):
name = self.__class__.__name__
opts = self.to_dict()
opts.pop('alignment')
opts_str = ', '.join(f'{key}={val}' for key, val in opts.items())
return f'{name}({opts_str})'
class AlignLeft(AlignmentKind):
"""Align instructions in as-soon-as-possible manner.
Instructions are placed at earliest available timeslots.
"""
is_sequential = False
def align(self, schedule: Schedule) -> Schedule:
"""Reallocate instructions according to the policy.
Only top-level sub-schedules are aligned. If sub-schedules are nested,
nested schedules are not recursively aligned.
Args:
schedule: Schedule to align.
Returns:
Schedule with reallocated instructions.
"""
aligned = Schedule()
for _, child in schedule.children:
self._push_left_append(aligned, child)
return aligned
@staticmethod
def _push_left_append(this: Schedule, other: ScheduleComponent) -> Schedule:
"""Return ``this`` with ``other`` inserted at the maximum time over
all channels shared between ```this`` and ``other``.
Args:
this: Input schedule to which ``other`` will be inserted.
other: Other schedule to insert.
Returns:
Push left appended schedule.
"""
this_channels = set(this.channels)
other_channels = set(other.channels)
shared_channels = list(this_channels & other_channels)
ch_slacks = [this.stop_time - this.ch_stop_time(channel) + other.ch_start_time(channel)
for channel in shared_channels]
if ch_slacks:
slack_chan = shared_channels[np.argmin(ch_slacks)]
shared_insert_time = this.ch_stop_time(slack_chan) - other.ch_start_time(slack_chan)
else:
shared_insert_time = 0
# Handle case where channels not common to both might actually start
# after ``this`` has finished.
other_only_insert_time = other.ch_start_time(*(other_channels - this_channels))
# Choose whichever is greatest.
insert_time = max(shared_insert_time, other_only_insert_time)
return this.insert(insert_time, other, inplace=True)
class AlignRight(AlignmentKind):
"""Align instructions in as-late-as-possible manner.
Instructions are placed at latest available timeslots.
"""
is_sequential = False
def align(self, schedule: Schedule) -> Schedule:
"""Reallocate instructions according to the policy.
Only top-level sub-schedules are aligned. If sub-schedules are nested,
nested schedules are not recursively aligned.
Args:
schedule: Schedule to align.
Returns:
Schedule with reallocated instructions.
"""
aligned = Schedule()
for _, child in reversed(schedule.children):
aligned = self._push_right_prepend(aligned, child)
return aligned
@staticmethod
def _push_right_prepend(this: ScheduleComponent, other: ScheduleComponent) -> Schedule:
"""Return ``this`` with ``other`` inserted at the latest possible time
such that ``other`` ends before it overlaps with any of ``this``.
If required ``this`` is shifted to start late enough so that there is room
to insert ``other``.
Args:
this: Input schedule to which ``other`` will be inserted.
other: Other schedule to insert.
Returns:
Push right prepended schedule.
"""
this_channels = set(this.channels)
other_channels = set(other.channels)
shared_channels = list(this_channels & other_channels)
ch_slacks = [this.ch_start_time(channel) - other.ch_stop_time(channel)
for channel in shared_channels]
if ch_slacks:
insert_time = min(ch_slacks) + other.start_time
else:
insert_time = this.stop_time - other.stop_time + other.start_time
if insert_time < 0:
this.shift(-insert_time, inplace=True)
this.insert(0, other, inplace=True)
else:
this.insert(insert_time, other, inplace=True)
return this
class AlignSequential(AlignmentKind):
"""Align instructions sequentially.
Instructions played on different channels are also arranged in a sequence.
No buffer time is inserted in between instructions.
"""
is_sequential = True
def align(self, schedule: Schedule) -> Schedule:
"""Reallocate instructions according to the policy.
Only top-level sub-schedules are aligned. If sub-schedules are nested,
nested schedules are not recursively aligned.
Args:
schedule: Schedule to align.
Returns:
Schedule with reallocated instructions.
"""
aligned = Schedule()
for _, child in schedule.children:
aligned.insert(aligned.duration, child, inplace=True)
return aligned
class AlignEquispaced(AlignmentKind):
"""Align instructions with equispaced interval within a specified duration.
Instructions played on different channels are also arranged in a sequence.
This alignment is convenient to create dynamical decoupling sequences such as PDD.
"""
is_sequential = True
def __init__(self,
duration: Union[int, ParameterExpression]):
"""Create new equispaced context.
Args:
duration: Duration of this context. This should be larger than the schedule duration.
If the specified duration is shorter than the schedule duration,
no alignment is performed and the input schedule is just returned.
This duration can be parametrized.
"""
super().__init__()
self._context_params = (duration, )
@property
def duration(self):
"""Return context duration."""
return self._context_params[0]
def align(self, schedule: Schedule) -> Schedule:
"""Reallocate instructions according to the policy.
Only top-level sub-schedules are aligned. If sub-schedules are nested,
nested schedules are not recursively aligned.
Args:
schedule: Schedule to align.
Returns:
Schedule with reallocated instructions.
"""
instruction_duration_validation(self.duration)
total_duration = sum([child.duration for _, child in schedule.children])
if self.duration < total_duration:
return schedule
total_delay = self.duration - total_duration
if len(schedule.children) > 1:
# Calculate the interval in between sub-schedules.
# If the duration cannot be divided by the number of sub-schedules,
# the modulo is appended and prepended to the input schedule.
interval, mod = np.divmod(total_delay, len(schedule.children) - 1)
else:
interval = 0
mod = total_delay
# Calculate pre schedule delay
delay, mod = np.divmod(mod, 2)
aligned = Schedule()
# Insert sub-schedules with interval
_t0 = int(aligned.stop_time + delay + mod)
for _, child in schedule.children:
aligned.insert(_t0, child, inplace=True)
_t0 = int(aligned.stop_time + interval)
return aligned
def to_dict(self) -> Dict[str, Any]:
"""Returns dictionary to represent this alignment."""
return {'alignment': self.__class__.__name__,
'duration': self.duration}
class AlignFunc(AlignmentKind):
"""Allocate instructions at position specified by callback function.
The position is specified for each instruction of index ``j`` as a
fractional coordinate in [0, 1] within the specified duration.
Instructions played on different channels are also arranged in a sequence.
This alignment is convenient to create dynamical decoupling sequences such as UDD.
For example, UDD sequence with 10 pulses can be specified with following function.
.. code-block:: python
def udd10_pos(j):
return np.sin(np.pi*j/(2*10 + 2))**2
"""
is_sequential = True
def __init__(self, duration: Union[int, ParameterExpression], func: Callable):
"""Create new equispaced context.
Args:
duration: Duration of this context. This should be larger than the schedule duration.
If the specified duration is shorter than the schedule duration,
no alignment is performed and the input schedule is just returned.
This duration can be parametrized.
func: A function that takes an index of sub-schedule and returns the
fractional coordinate of of that sub-schedule. The returned value should be
defined within [0, 1]. The pulse index starts from 1.
"""
super().__init__()
self._context_params = (duration, )
self._func = func
@property
def duration(self):
"""Return context duration."""
return self._context_params[0]
def align(self, schedule: Schedule) -> Schedule:
"""Reallocate instructions according to the policy.
Only top-level sub-schedules are aligned. If sub-schedules are nested,
nested schedules are not recursively aligned.
Args:
schedule: Schedule to align.
Returns:
Schedule with reallocated instructions.
"""
instruction_duration_validation(self.duration)
if self.duration < schedule.duration:
return schedule
aligned = Schedule()
for ind, (_, child) in enumerate(schedule.children):
_t_center = self.duration * self._func(ind + 1)
_t0 = int(_t_center - 0.5 * child.duration)
if _t0 < 0 or _t0 > self.duration:
PulseError('Invalid schedule position t=%d is specified at index=%d' % (_t0, ind))
aligned.insert(_t0, child, inplace=True)
return aligned
def to_dict(self) -> Dict[str, Any]:
"""Returns dictionary to represent this alignment.
.. note:: ``func`` is not presented in this dictionary. Just name.
"""
return {'alignment': self.__class__.__name__,
'duration': self._context_params[0],
'func': self._func.__name__}
@deprecated_functionality
def align_left(schedule: Schedule) -> Schedule:
"""Align a list of pulse instructions on the left.
Args:
schedule: Input schedule of which top-level sub-schedules will be rescheduled.
Returns:
New schedule with input `schedule`` child schedules and instructions
left aligned.
"""
context = AlignLeft()
return context.align(schedule)
@deprecated_functionality
def align_right(schedule: Schedule) -> Schedule:
"""Align a list of pulse instructions on the right.
Args:
schedule: Input schedule of which top-level sub-schedules will be rescheduled.
Returns:
New schedule with input `schedule`` child schedules and instructions
right aligned.
"""
context = AlignRight()
return context.align(schedule)
@deprecated_functionality
def align_sequential(schedule: Schedule) -> Schedule:
"""Schedule all top-level nodes in parallel.
Args:
schedule: Input schedule of which top-level sub-schedules will be rescheduled.
Returns:
New schedule with input `schedule`` child schedules and instructions
applied sequentially across channels
"""
context = AlignSequential()
return context.align(schedule)
@deprecated_functionality
def align_equispaced(schedule: Schedule, duration: int) -> Schedule:
"""Schedule a list of pulse instructions with equivalent interval.
Args:
schedule: Input schedule of which top-level sub-schedules will be rescheduled.
duration: Duration of context. This should be larger than the schedule duration.
Returns:
New schedule with input `schedule`` child schedules and instructions
aligned with equivalent interval.
Notes:
This context is convenient for writing PDD or Hahn echo sequence for example.
"""
context = AlignEquispaced(duration=duration)
return context.align(schedule)
@deprecated_functionality
def align_func(schedule: Schedule, duration: int, func: Callable[[int], float]) -> Schedule:
"""Schedule a list of pulse instructions with schedule position defined by the
numerical expression.
Args:
schedule: Input schedule of which top-level sub-schedules will be rescheduled.
duration: Duration of context. This should be larger than the schedule duration.
func: A function that takes an index of sub-schedule and returns the
fractional coordinate of of that sub-schedule.
The returned value should be defined within [0, 1].
The pulse index starts from 1.
Returns:
New schedule with input `schedule`` child schedules and instructions
aligned with equivalent interval.
Notes:
This context is convenient for writing UDD sequence for example.
"""
context = AlignFunc(duration=duration, func=func)
return context.align(schedule)
| 34.378924
| 98
| 0.659101
|
28dd6ea2a4bd4fe854e6d7e4be03cc6988dc88bd
| 9,590
|
py
|
Python
|
openmdao.lib/src/openmdao/lib/doegenerators/optlh.py
|
mjfwest/OpenMDAO-Framework
|
a5521f47ad7686c25b203de74e1c7dff5fd7a52b
|
[
"Apache-2.0"
] | 69
|
2015-01-02T19:10:08.000Z
|
2021-11-14T04:42:28.000Z
|
openmdao.lib/src/openmdao/lib/doegenerators/optlh.py
|
jcchin/OpenMDAO-Framework
|
038e89b06da1c74f00918f4c6fbd8bd365e25657
|
[
"Apache-2.0"
] | 3
|
2015-01-15T23:08:18.000Z
|
2015-03-11T16:57:35.000Z
|
openmdao.lib/src/openmdao/lib/doegenerators/optlh.py
|
jcchin/OpenMDAO-Framework
|
038e89b06da1c74f00918f4c6fbd8bd365e25657
|
[
"Apache-2.0"
] | 31
|
2015-09-16T00:37:35.000Z
|
2022-01-10T06:27:55.000Z
|
# This implementation is based on a matlab implementation that has the
# following license:
#
# Copyright 2007 A Sobester
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or any
# later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License and GNU
# Lesser General Public License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import logging
from random import randint, shuffle, seed
# pylint: disable-msg=E0611,F0401
from numpy import array, size, sum, floor, zeros
from numpy.linalg import norm
from openmdao.main.datatypes.api import Int, Enum
from openmdao.main.interfaces import implements, IDOEgenerator
from openmdao.main.api import Container
def rand_latin_hypercube(n, k, edges=False):
"""
Calculates a random Latin hypercube set of n points in k
dimensions within [0,1]^k hypercube.
n: int
Desired number of points.
k: int
Number of design variables (dimensions).
edges: bool (optional)
If Edges=True, the extreme bins will have their centres on the
edges of the domain; otherwise the bins will be entirely
contained within the domain (default setting).
Returns an n by k numpy array.
"""
#generate nxk array of random numbers from the list of range(n) choices
X = zeros((n, k))
row = range(1, n+1)
for i in range(k):
shuffle(row)
X[:,i] = row
if edges:
return (X-1.0)/float((n-1))
return (X-.5)/float(n)
def is_latin_hypercube(lh):
"""Returns True if the given array is a Latin hypercube.
The given array is assumed to be a numpy array.
"""
n,k = lh.shape
for j in range(k):
col = lh[:,j]
colset = set(col)
if len(colset) < len(col):
return False # something was duplicated
return True
class LHC_indivudal(object):
def __init__(self, doe, q=2, p=1):
self.q = q
self.p = p
self.doe = doe
self.phi = None # Morris-Mitchell sampling criterion
@property
def shape(self):
"""Size of the LatinHypercube DOE (rows,cols)."""
return self.doe.shape
def mmphi(self):
"""Returns the Morris-Mitchell sampling criterion for this Latin hypercube."""
if self.phi is None:
n,m = self.doe.shape
distdict = {}
#calculate the norm between each pair of points in the DOE
# TODO: This norm takes up the majority of the computation time. It
# should be converted to C or ShedSkin.
arr = self.doe
for i in range(n):
for j in range(i+1, n):
nrm = norm(arr[i]-arr[j], ord=self.p)
distdict[nrm] = distdict.get(nrm, 0) + 1
distinct_d = array(distdict.keys())
#mutltiplicity array with a count of how many pairs of points have a given distance
J = array(distdict.values())
self.phi = sum(J*(distinct_d**(-self.q)))**(1.0/self.q)
return self.phi
def perturb(self, mutation_count):
""" Interchanges pairs of randomly chosen elements within randomly chosen
columns of a DOE a number of times. The result of this operation will also
be a Latin hypercube.
"""
new_doe = self.doe.copy()
n,k = self.doe.shape
for count in range(mutation_count):
col = randint(0, k-1)
#choosing two distinct random points
el1 = randint(0, n-1)
el2 = randint(0, n-1)
while el1==el2:
el2 = randint(0, n-1)
new_doe[el1, col] = self.doe[el2, col]
new_doe[el2, col] = self.doe[el1, col]
return LHC_indivudal(new_doe, self.q, self.p)
def __iter__(self):
return self._get_rows()
def _get_rows(self):
for row in self.doe:
yield row
def __repr__(self):
return repr(self.doe)
def __str__(self):
return str(self.doe)
def __getitem__(self,*args):
return self.doe.__getitem__(*args)
_norm_map = {"1-norm":1,"2-norm":2}
class LatinHypercube(Container):
"""IDOEgenerator which provides a Latin hypercube DOE sample set.
"""
implements(IDOEgenerator)
num_samples = Int(20, desc="Number of sample points in the DOE sample set.")
num_parameters = Int(2, desc="Number of parameters, or dimensions, for the DOE.")
seed = Int(None, iotype="in",
desc="Random seed for the optimizer. Set to a specific value "
"for repeatable results; otherwise leave as None for truly "
"random seeding.")
def __init__(self, num_samples=None, ):
super(LatinHypercube,self).__init__()
if num_samples is not None:
self.num_samples = num_samples
def __iter__(self):
"""Return an iterator over our sets of input values."""
if self.seed is not None:
seed(self.seed)
return self._get_input_values()
def _get_input_values(self):
rand_doe = rand_latin_hypercube(self.num_samples, self.num_parameters)
for row in rand_doe:
yield row
class OptLatinHypercube(Container):
"""IDOEgenerator which provides a Latin hypercube DOE sample set.
The Morris-Mitchell sampling criterion of the DOE is optimzied
using an evolutionary algorithm.
"""
implements(IDOEgenerator)
num_samples = Int(20, desc="Number of sample points in the DOE sample set.")
num_parameters = Int(2, desc="Number of parameters, or dimensions, for the DOE.")
population = Int(20,
desc="Size of the population used in the evolutionary optimization.")
generations = Int(2,
desc="Number of generations the optimization will evolve over.")
norm_method = Enum(["1-norm","2-norm"],
desc="Vector norm calculation method. '1-norm' is faster but less accurate.")
seed = Int(None, iotype="in",
desc="Random seed for the optimizer. Set to a specific value "
"for repeatable results; otherwise leave as None for truly "
"random seeding.")
def __init__(self, num_samples=None, population=None,generations=None):
super(OptLatinHypercube,self).__init__()
self.qs = [1,2,5,10,20,50,100] #list of qs to try for Phi_q optimization
if num_samples is not None:
self.num_samples = num_samples
if population is not None:
self.population = population
if generations is not None:
self.generations = generations
def __iter__(self):
"""Return an iterator over our sets of input values."""
if self.seed is not None:
seed(self.seed)
return self._get_input_values()
def _get_input_values(self):
rand_doe = rand_latin_hypercube(self.num_samples, self.num_parameters)
best_lhc = LHC_indivudal(rand_doe, q=1, p=_norm_map[self.norm_method])
for q in self.qs:
lh = LHC_indivudal(rand_doe, q, _norm_map[self.norm_method])
lh_opt = _mmlhs(lh, self.population, self.generations)
if lh_opt.mmphi() < best_lhc.mmphi():
best_lhc = lh_opt
for row in best_lhc:
yield row
def _mmlhs(x_start, population, generations):
"""Evolutionary search for most space filling Latin-Hypercube.
Returns a new LatinHypercube instance with an optimized set of points.
"""
x_best = x_start
phi_best = x_start.mmphi()
n = x_start.shape[1]
level_off = floor(0.85*generations)
for it in range(generations):
if it < level_off and level_off > 1.:
mutations = int(round(1+(0.5*n-1)*(level_off-it)/(level_off-1)))
else:
mutations = 1
x_improved = x_best
phi_improved = phi_best
for offspring in range(population):
x_try = x_best.perturb(mutations)
phi_try = x_try.mmphi()
if phi_try < phi_improved:
x_improved = x_try
phi_improved = phi_try
if phi_improved < phi_best:
phi_best = phi_improved
x_best = x_improved
return x_best
if __name__== "__main__": # pragma no cover
import sys
lh1 = array([[1,2,3],[3,1,2],[2,3,1]])
assert(is_latin_hypercube(lh1))
badlh = array([[1,2,3],[1,3,2],[3,2,1]])
assert(is_latin_hypercube(badlh) is False)
try:
from matplotlib import pyplot
except ImportError:
print "Couldn't find matplotlib"
test = """
x = rand_latin_hypercube(80,2)
lh = LatinHypercube(x,2,1)
print lh.mmphi()
lh_opt = _mmlhs(lh,20,20)
print lh_opt.mmphi()
"""
if '--profile' in sys.argv:
import cProfile
import pstats
cProfile.run(test,"test.prof")
p = pstats.Stats("test.prof")
p.sort_stats('cumulative').print_stats(10)
else:
exec(test)
if 'pyplot' in globals():
pyplot.figure(1)
pyplot.scatter(lh[:,0],lh[:,1])
pyplot.scatter(lh_opt[:,0],lh_opt[:,1],c='r')
pyplot.show()
| 30.935484
| 97
| 0.623462
|
c45a8e5133d6365732f2b9f340248ad629996bab
| 2,093
|
py
|
Python
|
app/backend/wells/migrations/0110_delete_legacy_dupes_20200401_2244.py
|
bcgov/gwells
|
7d69e65e993d37070961e06e6ce9c58a02d79363
|
[
"Apache-2.0"
] | 37
|
2017-06-30T18:08:51.000Z
|
2022-02-13T18:04:10.000Z
|
app/backend/wells/migrations/0110_delete_legacy_dupes_20200401_2244.py
|
bcgov/gwells
|
7d69e65e993d37070961e06e6ce9c58a02d79363
|
[
"Apache-2.0"
] | 544
|
2017-06-21T00:29:20.000Z
|
2022-02-01T21:37:38.000Z
|
app/backend/wells/migrations/0110_delete_legacy_dupes_20200401_2244.py
|
bcgov/gwells
|
7d69e65e993d37070961e06e6ce9c58a02d79363
|
[
"Apache-2.0"
] | 59
|
2017-03-10T17:55:02.000Z
|
2021-11-16T19:20:08.000Z
|
# Generated by Django 2.2.12 on 2020-04-01 22:44
from django.db import migrations, connection, transaction
SELECT_DUPLICATE_LEGACY_RECORDS = """
SELECT MAX(filing_number) FROM activity_submission WHERE well_tag_number IN (
SELECT well_tag_number
FROM activity_submission
WHERE well_activity_code='LEGACY'
GROUP BY well_tag_number
HAVING COUNT(*) > 1
ORDER BY well_tag_number
) AND well_activity_code='LEGACY' GROUP BY well_tag_number ORDER BY well_tag_number;
"""
def delete_legacy_dupes(apps, schema_editor):
ActivitySubmission = apps.get_model('wells', 'ActivitySubmission')
with connection.cursor() as cursor:
cursor.execute(SELECT_DUPLICATE_LEGACY_RECORDS)
ids = [row[0] for row in cursor.fetchall()]
print(f'Found {len(ids)} duplicate Legacy Activity Submissions')
activity_submissions = ActivitySubmission.objects.filter(pk__in=ids)
for activity_submission in activity_submissions:
print(f'Deleting activity submission {activity_submission.filing_number}')
try:
delete_entire_submission(activity_submission)
except Exception as e:
print(f'Failed to delete activity submission')
print(e)
@transaction.atomic
def delete_entire_submission(activity_submission):
activity_submission.drilling_methods.clear()
activity_submission.development_methods.clear()
activity_submission.water_quality_characteristics.clear()
activity_submission.lithologydescription_set.all().delete()
activity_submission.casing_set.all().delete()
activity_submission.decommission_description_set.all().delete()
activity_submission.screen_set.all().delete()
activity_submission.linerperforation_set.all().delete()
activity_submission.fields_provided.delete()
activity_submission.delete()
class Migration(migrations.Migration):
dependencies = [
('wells', '0109_positive_longitude_20200218_2310'),
]
operations = [
migrations.RunPython(delete_legacy_dupes),
]
| 32.703125
| 88
| 0.732919
|
014ed28749049a65f94796c92518d5c87f7da594
| 2,006
|
py
|
Python
|
src/k8s-extension/setup.py
|
santosh02iiit/azure-cli-extensions
|
24247cfa19e2a5894937f19e17fbdc8308b28ef6
|
[
"MIT"
] | 1
|
2021-08-03T18:32:54.000Z
|
2021-08-03T18:32:54.000Z
|
src/k8s-extension/setup.py
|
santosh02iiit/azure-cli-extensions
|
24247cfa19e2a5894937f19e17fbdc8308b28ef6
|
[
"MIT"
] | 1
|
2020-03-09T22:34:27.000Z
|
2020-03-09T22:34:27.000Z
|
src/k8s-extension/setup.py
|
santosh02iiit/azure-cli-extensions
|
24247cfa19e2a5894937f19e17fbdc8308b28ef6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from codecs import open
from setuptools import setup, find_packages
try:
from azure_bdist_wheel import cmdclass
except ImportError:
from distutils import log as logger
logger.warn("Wheel is not available, disabling bdist_wheel hook")
# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License',
]
# TODO: Add any additional SDK dependencies here
DEPENDENCIES = []
VERSION = "0.6.1"
with open('README.rst', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name="k8s-extension",
version=VERSION,
description='Microsoft Azure Command-Line Tools K8s-extension Extension',
# TODO: Update author and email, if applicable
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
# TODO: consider pointing directly to your source code instead of the generic repo
url='https://github.com/Azure/azure-cli-extensions',
long_description=README + '\n\n' + HISTORY,
license='MIT',
classifiers=CLASSIFIERS,
packages=find_packages(),
install_requires=DEPENDENCIES,
package_data={'azext_k8s_extension': ['azext_metadata.json']},
)
| 34.586207
| 94
| 0.638584
|
acc704de739c40fd9ec7920f40d58bbb206596a5
| 351
|
py
|
Python
|
website/website/website/urls.py
|
Ferdous-Al-Imran/Aqualizer
|
5a718f81183c82bf9d82abdd00fe4baeb0bf9c71
|
[
"MIT"
] | null | null | null |
website/website/website/urls.py
|
Ferdous-Al-Imran/Aqualizer
|
5a718f81183c82bf9d82abdd00fe4baeb0bf9c71
|
[
"MIT"
] | null | null | null |
website/website/website/urls.py
|
Ferdous-Al-Imran/Aqualizer
|
5a718f81183c82bf9d82abdd00fe4baeb0bf9c71
|
[
"MIT"
] | null | null | null |
from django.conf.urls import include,url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
#url(r'^sensorValue/', include(sensorValue.urls)),
url(r'^home/',include('home.urls')),
url(r'^userAuthentication/',include('userAuthentication.urls')),
url(r'^sensorValue/',include('sensorValue.urls')),
]
| 31.909091
| 68
| 0.692308
|
8b58ad1ae29f1c734e5020217a6e735802c4505f
| 75,196
|
py
|
Python
|
sympy/printing/mathml.py
|
qcgm1978/sympy
|
cc46047f4449b525b7b0edd4c634bf93d6e7c83d
|
[
"BSD-3-Clause"
] | 2
|
2021-01-09T23:11:25.000Z
|
2021-01-11T15:04:22.000Z
|
sympy/printing/mathml.py
|
qcgm1978/sympy
|
cc46047f4449b525b7b0edd4c634bf93d6e7c83d
|
[
"BSD-3-Clause"
] | 3
|
2021-02-28T03:58:40.000Z
|
2021-03-07T06:12:47.000Z
|
sympy/printing/mathml.py
|
qcgm1978/sympy
|
cc46047f4449b525b7b0edd4c634bf93d6e7c83d
|
[
"BSD-3-Clause"
] | 2
|
2021-01-08T23:03:23.000Z
|
2021-01-13T18:57:02.000Z
|
"""
A MathML printer.
"""
from typing import Any, Dict
from sympy import sympify, S, Mul
from sympy.core.compatibility import default_sort_key
from sympy.core.function import _coeff_isneg
from sympy.printing.conventions import split_super_sub, requires_partial
from sympy.printing.precedence import \
precedence_traditional, PRECEDENCE, PRECEDENCE_TRADITIONAL
from sympy.printing.pretty.pretty_symbology import greek_unicode
from sympy.printing.printer import Printer, print_function
import mpmath.libmp as mlib
from mpmath.libmp import prec_to_dps, repr_dps, to_str as mlib_to_str
class MathMLPrinterBase(Printer):
"""Contains common code required for MathMLContentPrinter and
MathMLPresentationPrinter.
"""
_default_settings = {
"order": None,
"encoding": "utf-8",
"fold_frac_powers": False,
"fold_func_brackets": False,
"fold_short_frac": None,
"inv_trig_style": "abbreviated",
"ln_notation": False,
"long_frac_ratio": None,
"mat_delim": "[",
"mat_symbol_style": "plain",
"mul_symbol": None,
"root_notation": True,
"symbol_names": {},
"mul_symbol_mathml_numbers": '·',
} # type: Dict[str, Any]
def __init__(self, settings=None):
Printer.__init__(self, settings)
from xml.dom.minidom import Document, Text
self.dom = Document()
# Workaround to allow strings to remain unescaped
# Based on
# https://stackoverflow.com/questions/38015864/python-xml-dom-minidom-\
# please-dont-escape-my-strings/38041194
class RawText(Text):
def writexml(self, writer, indent='', addindent='', newl=''):
if self.data:
writer.write('{}{}{}'.format(indent, self.data, newl))
def createRawTextNode(data):
r = RawText()
r.data = data
r.ownerDocument = self.dom
return r
self.dom.createTextNode = createRawTextNode
def doprint(self, expr):
"""
Prints the expression as MathML.
"""
mathML = Printer._print(self, expr)
unistr = mathML.toxml()
xmlbstr = unistr.encode('ascii', 'xmlcharrefreplace')
res = xmlbstr.decode()
return res
def apply_patch(self):
# Applying the patch of xml.dom.minidom bug
# Date: 2011-11-18
# Description: http://ronrothman.com/public/leftbraned/xml-dom-minidom\
# -toprettyxml-and-silly-whitespace/#best-solution
# Issue: http://bugs.python.org/issue4147
# Patch: http://hg.python.org/cpython/rev/7262f8f276ff/
from xml.dom.minidom import Element, Text, Node, _write_data
def writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent + "<" + self.tagName)
attrs = self._get_attributes()
a_names = list(attrs.keys())
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
writer.write(">")
if (len(self.childNodes) == 1 and
self.childNodes[0].nodeType == Node.TEXT_NODE):
self.childNodes[0].writexml(writer, '', '', '')
else:
writer.write(newl)
for node in self.childNodes:
node.writexml(
writer, indent + addindent, addindent, newl)
writer.write(indent)
writer.write("</%s>%s" % (self.tagName, newl))
else:
writer.write("/>%s" % (newl))
self._Element_writexml_old = Element.writexml
Element.writexml = writexml
def writexml(self, writer, indent="", addindent="", newl=""):
_write_data(writer, "%s%s%s" % (indent, self.data, newl))
self._Text_writexml_old = Text.writexml
Text.writexml = writexml
def restore_patch(self):
from xml.dom.minidom import Element, Text
Element.writexml = self._Element_writexml_old
Text.writexml = self._Text_writexml_old
class MathMLContentPrinter(MathMLPrinterBase):
"""Prints an expression to the Content MathML markup language.
References: https://www.w3.org/TR/MathML2/chapter4.html
"""
printmethod = "_mathml_content"
def mathml_tag(self, e):
"""Returns the MathML tag for an expression."""
translate = {
'Add': 'plus',
'Mul': 'times',
'Derivative': 'diff',
'Number': 'cn',
'int': 'cn',
'Pow': 'power',
'Max': 'max',
'Min': 'min',
'Abs': 'abs',
'And': 'and',
'Or': 'or',
'Xor': 'xor',
'Not': 'not',
'Implies': 'implies',
'Symbol': 'ci',
'MatrixSymbol': 'ci',
'RandomSymbol': 'ci',
'Integral': 'int',
'Sum': 'sum',
'sin': 'sin',
'cos': 'cos',
'tan': 'tan',
'cot': 'cot',
'csc': 'csc',
'sec': 'sec',
'sinh': 'sinh',
'cosh': 'cosh',
'tanh': 'tanh',
'coth': 'coth',
'csch': 'csch',
'sech': 'sech',
'asin': 'arcsin',
'asinh': 'arcsinh',
'acos': 'arccos',
'acosh': 'arccosh',
'atan': 'arctan',
'atanh': 'arctanh',
'atan2': 'arctan',
'acot': 'arccot',
'acoth': 'arccoth',
'asec': 'arcsec',
'asech': 'arcsech',
'acsc': 'arccsc',
'acsch': 'arccsch',
'log': 'ln',
'Equality': 'eq',
'Unequality': 'neq',
'GreaterThan': 'geq',
'LessThan': 'leq',
'StrictGreaterThan': 'gt',
'StrictLessThan': 'lt',
'Union': 'union',
'Intersection': 'intersect',
}
for cls in e.__class__.__mro__:
n = cls.__name__
if n in translate:
return translate[n]
# Not found in the MRO set
n = e.__class__.__name__
return n.lower()
def _print_Mul(self, expr):
if _coeff_isneg(expr):
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('minus'))
x.appendChild(self._print_Mul(-expr))
return x
from sympy.simplify import fraction
numer, denom = fraction(expr)
if denom is not S.One:
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('divide'))
x.appendChild(self._print(numer))
x.appendChild(self._print(denom))
return x
coeff, terms = expr.as_coeff_mul()
if coeff is S.One and len(terms) == 1:
# XXX since the negative coefficient has been handled, I don't
# think a coeff of 1 can remain
return self._print(terms[0])
if self.order != 'old':
terms = Mul._from_args(terms).as_ordered_factors()
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('times'))
if coeff != 1:
x.appendChild(self._print(coeff))
for term in terms:
x.appendChild(self._print(term))
return x
def _print_Add(self, expr, order=None):
args = self._as_ordered_terms(expr, order=order)
lastProcessed = self._print(args[0])
plusNodes = []
for arg in args[1:]:
if _coeff_isneg(arg):
# use minus
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('minus'))
x.appendChild(lastProcessed)
x.appendChild(self._print(-arg))
# invert expression since this is now minused
lastProcessed = x
if arg == args[-1]:
plusNodes.append(lastProcessed)
else:
plusNodes.append(lastProcessed)
lastProcessed = self._print(arg)
if arg == args[-1]:
plusNodes.append(self._print(arg))
if len(plusNodes) == 1:
return lastProcessed
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('plus'))
while plusNodes:
x.appendChild(plusNodes.pop(0))
return x
def _print_Piecewise(self, expr):
if expr.args[-1].cond != True:
# We need the last conditional to be a True, otherwise the resulting
# function may not return a result.
raise ValueError("All Piecewise expressions must contain an "
"(expr, True) statement to be used as a default "
"condition. Without one, the generated "
"expression may not evaluate to anything under "
"some condition.")
root = self.dom.createElement('piecewise')
for i, (e, c) in enumerate(expr.args):
if i == len(expr.args) - 1 and c == True:
piece = self.dom.createElement('otherwise')
piece.appendChild(self._print(e))
else:
piece = self.dom.createElement('piece')
piece.appendChild(self._print(e))
piece.appendChild(self._print(c))
root.appendChild(piece)
return root
def _print_MatrixBase(self, m):
x = self.dom.createElement('matrix')
for i in range(m.rows):
x_r = self.dom.createElement('matrixrow')
for j in range(m.cols):
x_r.appendChild(self._print(m[i, j]))
x.appendChild(x_r)
return x
def _print_Rational(self, e):
if e.q == 1:
# don't divide
x = self.dom.createElement('cn')
x.appendChild(self.dom.createTextNode(str(e.p)))
return x
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('divide'))
# numerator
xnum = self.dom.createElement('cn')
xnum.appendChild(self.dom.createTextNode(str(e.p)))
# denominator
xdenom = self.dom.createElement('cn')
xdenom.appendChild(self.dom.createTextNode(str(e.q)))
x.appendChild(xnum)
x.appendChild(xdenom)
return x
def _print_Limit(self, e):
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement(self.mathml_tag(e)))
x_1 = self.dom.createElement('bvar')
x_2 = self.dom.createElement('lowlimit')
x_1.appendChild(self._print(e.args[1]))
x_2.appendChild(self._print(e.args[2]))
x.appendChild(x_1)
x.appendChild(x_2)
x.appendChild(self._print(e.args[0]))
return x
def _print_ImaginaryUnit(self, e):
return self.dom.createElement('imaginaryi')
def _print_EulerGamma(self, e):
return self.dom.createElement('eulergamma')
def _print_GoldenRatio(self, e):
"""We use unicode #x3c6 for Greek letter phi as defined here
http://www.w3.org/2003/entities/2007doc/isogrk1.html"""
x = self.dom.createElement('cn')
x.appendChild(self.dom.createTextNode("\N{GREEK SMALL LETTER PHI}"))
return x
def _print_Exp1(self, e):
return self.dom.createElement('exponentiale')
def _print_Pi(self, e):
return self.dom.createElement('pi')
def _print_Infinity(self, e):
return self.dom.createElement('infinity')
def _print_NaN(self, e):
return self.dom.createElement('notanumber')
def _print_EmptySet(self, e):
return self.dom.createElement('emptyset')
def _print_BooleanTrue(self, e):
return self.dom.createElement('true')
def _print_BooleanFalse(self, e):
return self.dom.createElement('false')
def _print_NegativeInfinity(self, e):
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('minus'))
x.appendChild(self.dom.createElement('infinity'))
return x
def _print_Integral(self, e):
def lime_recur(limits):
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement(self.mathml_tag(e)))
bvar_elem = self.dom.createElement('bvar')
bvar_elem.appendChild(self._print(limits[0][0]))
x.appendChild(bvar_elem)
if len(limits[0]) == 3:
low_elem = self.dom.createElement('lowlimit')
low_elem.appendChild(self._print(limits[0][1]))
x.appendChild(low_elem)
up_elem = self.dom.createElement('uplimit')
up_elem.appendChild(self._print(limits[0][2]))
x.appendChild(up_elem)
if len(limits[0]) == 2:
up_elem = self.dom.createElement('uplimit')
up_elem.appendChild(self._print(limits[0][1]))
x.appendChild(up_elem)
if len(limits) == 1:
x.appendChild(self._print(e.function))
else:
x.appendChild(lime_recur(limits[1:]))
return x
limits = list(e.limits)
limits.reverse()
return lime_recur(limits)
def _print_Sum(self, e):
# Printer can be shared because Sum and Integral have the
# same internal representation.
return self._print_Integral(e)
def _print_Symbol(self, sym):
ci = self.dom.createElement(self.mathml_tag(sym))
def join(items):
if len(items) > 1:
mrow = self.dom.createElement('mml:mrow')
for i, item in enumerate(items):
if i > 0:
mo = self.dom.createElement('mml:mo')
mo.appendChild(self.dom.createTextNode(" "))
mrow.appendChild(mo)
mi = self.dom.createElement('mml:mi')
mi.appendChild(self.dom.createTextNode(item))
mrow.appendChild(mi)
return mrow
else:
mi = self.dom.createElement('mml:mi')
mi.appendChild(self.dom.createTextNode(items[0]))
return mi
# translate name, supers and subs to unicode characters
def translate(s):
if s in greek_unicode:
return greek_unicode.get(s)
else:
return s
name, supers, subs = split_super_sub(sym.name)
name = translate(name)
supers = [translate(sup) for sup in supers]
subs = [translate(sub) for sub in subs]
mname = self.dom.createElement('mml:mi')
mname.appendChild(self.dom.createTextNode(name))
if not supers:
if not subs:
ci.appendChild(self.dom.createTextNode(name))
else:
msub = self.dom.createElement('mml:msub')
msub.appendChild(mname)
msub.appendChild(join(subs))
ci.appendChild(msub)
else:
if not subs:
msup = self.dom.createElement('mml:msup')
msup.appendChild(mname)
msup.appendChild(join(supers))
ci.appendChild(msup)
else:
msubsup = self.dom.createElement('mml:msubsup')
msubsup.appendChild(mname)
msubsup.appendChild(join(subs))
msubsup.appendChild(join(supers))
ci.appendChild(msubsup)
return ci
_print_MatrixSymbol = _print_Symbol
_print_RandomSymbol = _print_Symbol
def _print_Pow(self, e):
# Here we use root instead of power if the exponent is the reciprocal
# of an integer
if (self._settings['root_notation'] and e.exp.is_Rational
and e.exp.p == 1):
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('root'))
if e.exp.q != 2:
xmldeg = self.dom.createElement('degree')
xmlci = self.dom.createElement('ci')
xmlci.appendChild(self.dom.createTextNode(str(e.exp.q)))
xmldeg.appendChild(xmlci)
x.appendChild(xmldeg)
x.appendChild(self._print(e.base))
return x
x = self.dom.createElement('apply')
x_1 = self.dom.createElement(self.mathml_tag(e))
x.appendChild(x_1)
x.appendChild(self._print(e.base))
x.appendChild(self._print(e.exp))
return x
def _print_Number(self, e):
x = self.dom.createElement(self.mathml_tag(e))
x.appendChild(self.dom.createTextNode(str(e)))
return x
def _print_Float(self, e):
x = self.dom.createElement(self.mathml_tag(e))
repr_e = mlib_to_str(e._mpf_, repr_dps(e._prec))
x.appendChild(self.dom.createTextNode(repr_e))
return x
def _print_Derivative(self, e):
x = self.dom.createElement('apply')
diff_symbol = self.mathml_tag(e)
if requires_partial(e.expr):
diff_symbol = 'partialdiff'
x.appendChild(self.dom.createElement(diff_symbol))
x_1 = self.dom.createElement('bvar')
for sym, times in reversed(e.variable_count):
x_1.appendChild(self._print(sym))
if times > 1:
degree = self.dom.createElement('degree')
degree.appendChild(self._print(sympify(times)))
x_1.appendChild(degree)
x.appendChild(x_1)
x.appendChild(self._print(e.expr))
return x
def _print_Function(self, e):
x = self.dom.createElement("apply")
x.appendChild(self.dom.createElement(self.mathml_tag(e)))
for arg in e.args:
x.appendChild(self._print(arg))
return x
def _print_Basic(self, e):
x = self.dom.createElement(self.mathml_tag(e))
for arg in e.args:
x.appendChild(self._print(arg))
return x
def _print_AssocOp(self, e):
x = self.dom.createElement('apply')
x_1 = self.dom.createElement(self.mathml_tag(e))
x.appendChild(x_1)
for arg in e.args:
x.appendChild(self._print(arg))
return x
def _print_Relational(self, e):
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement(self.mathml_tag(e)))
x.appendChild(self._print(e.lhs))
x.appendChild(self._print(e.rhs))
return x
def _print_list(self, seq):
"""MathML reference for the <list> element:
http://www.w3.org/TR/MathML2/chapter4.html#contm.list"""
dom_element = self.dom.createElement('list')
for item in seq:
dom_element.appendChild(self._print(item))
return dom_element
def _print_int(self, p):
dom_element = self.dom.createElement(self.mathml_tag(p))
dom_element.appendChild(self.dom.createTextNode(str(p)))
return dom_element
_print_Implies = _print_AssocOp
_print_Not = _print_AssocOp
_print_Xor = _print_AssocOp
def _print_FiniteSet(self, e):
x = self.dom.createElement('set')
for arg in e.args:
x.appendChild(self._print(arg))
return x
def _print_Complement(self, e):
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('setdiff'))
for arg in e.args:
x.appendChild(self._print(arg))
return x
def _print_ProductSet(self, e):
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('cartesianproduct'))
for arg in e.args:
x.appendChild(self._print(arg))
return x
# XXX Symmetric difference is not supported for MathML content printers.
class MathMLPresentationPrinter(MathMLPrinterBase):
"""Prints an expression to the Presentation MathML markup language.
References: https://www.w3.org/TR/MathML2/chapter3.html
"""
printmethod = "_mathml_presentation"
def mathml_tag(self, e):
"""Returns the MathML tag for an expression."""
translate = {
'Number': 'mn',
'Limit': '→',
'Derivative': 'ⅆ',
'int': 'mn',
'Symbol': 'mi',
'Integral': '∫',
'Sum': '∑',
'sin': 'sin',
'cos': 'cos',
'tan': 'tan',
'cot': 'cot',
'asin': 'arcsin',
'asinh': 'arcsinh',
'acos': 'arccos',
'acosh': 'arccosh',
'atan': 'arctan',
'atanh': 'arctanh',
'acot': 'arccot',
'atan2': 'arctan',
'Equality': '=',
'Unequality': '≠',
'GreaterThan': '≥',
'LessThan': '≤',
'StrictGreaterThan': '>',
'StrictLessThan': '<',
'lerchphi': 'Φ',
'zeta': 'ζ',
'dirichlet_eta': 'η',
'elliptic_k': 'Κ',
'lowergamma': 'γ',
'uppergamma': 'Γ',
'gamma': 'Γ',
'totient': 'ϕ',
'reduced_totient': 'λ',
'primenu': 'ν',
'primeomega': 'Ω',
'fresnels': 'S',
'fresnelc': 'C',
'LambertW': 'W',
'Heaviside': 'Θ',
'BooleanTrue': 'True',
'BooleanFalse': 'False',
'NoneType': 'None',
'mathieus': 'S',
'mathieuc': 'C',
'mathieusprime': 'S′',
'mathieucprime': 'C′',
}
def mul_symbol_selection():
if (self._settings["mul_symbol"] is None or
self._settings["mul_symbol"] == 'None'):
return '⁢'
elif self._settings["mul_symbol"] == 'times':
return '×'
elif self._settings["mul_symbol"] == 'dot':
return '·'
elif self._settings["mul_symbol"] == 'ldot':
return '․'
elif not isinstance(self._settings["mul_symbol"], str):
raise TypeError
else:
return self._settings["mul_symbol"]
for cls in e.__class__.__mro__:
n = cls.__name__
if n in translate:
return translate[n]
# Not found in the MRO set
if e.__class__.__name__ == "Mul":
return mul_symbol_selection()
n = e.__class__.__name__
return n.lower()
def parenthesize(self, item, level, strict=False):
prec_val = precedence_traditional(item)
if (prec_val < level) or ((not strict) and prec_val <= level):
brac = self.dom.createElement('mfenced')
brac.appendChild(self._print(item))
return brac
else:
return self._print(item)
def _print_Mul(self, expr):
def multiply(expr, mrow):
from sympy.simplify import fraction
numer, denom = fraction(expr)
if denom is not S.One:
frac = self.dom.createElement('mfrac')
if self._settings["fold_short_frac"] and len(str(expr)) < 7:
frac.setAttribute('bevelled', 'true')
xnum = self._print(numer)
xden = self._print(denom)
frac.appendChild(xnum)
frac.appendChild(xden)
mrow.appendChild(frac)
return mrow
coeff, terms = expr.as_coeff_mul()
if coeff is S.One and len(terms) == 1:
mrow.appendChild(self._print(terms[0]))
return mrow
if self.order != 'old':
terms = Mul._from_args(terms).as_ordered_factors()
if coeff != 1:
x = self._print(coeff)
y = self.dom.createElement('mo')
y.appendChild(self.dom.createTextNode(self.mathml_tag(expr)))
mrow.appendChild(x)
mrow.appendChild(y)
for term in terms:
mrow.appendChild(self.parenthesize(term, PRECEDENCE['Mul']))
if not term == terms[-1]:
y = self.dom.createElement('mo')
y.appendChild(self.dom.createTextNode(self.mathml_tag(expr)))
mrow.appendChild(y)
return mrow
mrow = self.dom.createElement('mrow')
if _coeff_isneg(expr):
x = self.dom.createElement('mo')
x.appendChild(self.dom.createTextNode('-'))
mrow.appendChild(x)
mrow = multiply(-expr, mrow)
else:
mrow = multiply(expr, mrow)
return mrow
def _print_Add(self, expr, order=None):
mrow = self.dom.createElement('mrow')
args = self._as_ordered_terms(expr, order=order)
mrow.appendChild(self._print(args[0]))
for arg in args[1:]:
if _coeff_isneg(arg):
# use minus
x = self.dom.createElement('mo')
x.appendChild(self.dom.createTextNode('-'))
y = self._print(-arg)
# invert expression since this is now minused
else:
x = self.dom.createElement('mo')
x.appendChild(self.dom.createTextNode('+'))
y = self._print(arg)
mrow.appendChild(x)
mrow.appendChild(y)
return mrow
def _print_MatrixBase(self, m):
table = self.dom.createElement('mtable')
for i in range(m.rows):
x = self.dom.createElement('mtr')
for j in range(m.cols):
y = self.dom.createElement('mtd')
y.appendChild(self._print(m[i, j]))
x.appendChild(y)
table.appendChild(x)
if self._settings["mat_delim"] == '':
return table
brac = self.dom.createElement('mfenced')
if self._settings["mat_delim"] == "[":
brac.setAttribute('close', ']')
brac.setAttribute('open', '[')
brac.appendChild(table)
return brac
def _get_printed_Rational(self, e, folded=None):
if e.p < 0:
p = -e.p
else:
p = e.p
x = self.dom.createElement('mfrac')
if folded or self._settings["fold_short_frac"]:
x.setAttribute('bevelled', 'true')
x.appendChild(self._print(p))
x.appendChild(self._print(e.q))
if e.p < 0:
mrow = self.dom.createElement('mrow')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('-'))
mrow.appendChild(mo)
mrow.appendChild(x)
return mrow
else:
return x
def _print_Rational(self, e):
if e.q == 1:
# don't divide
return self._print(e.p)
return self._get_printed_Rational(e, self._settings["fold_short_frac"])
def _print_Limit(self, e):
mrow = self.dom.createElement('mrow')
munder = self.dom.createElement('munder')
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode('lim'))
x = self.dom.createElement('mrow')
x_1 = self._print(e.args[1])
arrow = self.dom.createElement('mo')
arrow.appendChild(self.dom.createTextNode(self.mathml_tag(e)))
x_2 = self._print(e.args[2])
x.appendChild(x_1)
x.appendChild(arrow)
x.appendChild(x_2)
munder.appendChild(mi)
munder.appendChild(x)
mrow.appendChild(munder)
mrow.appendChild(self._print(e.args[0]))
return mrow
def _print_ImaginaryUnit(self, e):
x = self.dom.createElement('mi')
x.appendChild(self.dom.createTextNode('ⅈ'))
return x
def _print_GoldenRatio(self, e):
x = self.dom.createElement('mi')
x.appendChild(self.dom.createTextNode('Φ'))
return x
def _print_Exp1(self, e):
x = self.dom.createElement('mi')
x.appendChild(self.dom.createTextNode('ⅇ'))
return x
def _print_Pi(self, e):
x = self.dom.createElement('mi')
x.appendChild(self.dom.createTextNode('π'))
return x
def _print_Infinity(self, e):
x = self.dom.createElement('mi')
x.appendChild(self.dom.createTextNode('∞'))
return x
def _print_NegativeInfinity(self, e):
mrow = self.dom.createElement('mrow')
y = self.dom.createElement('mo')
y.appendChild(self.dom.createTextNode('-'))
x = self._print_Infinity(e)
mrow.appendChild(y)
mrow.appendChild(x)
return mrow
def _print_HBar(self, e):
x = self.dom.createElement('mi')
x.appendChild(self.dom.createTextNode('ℏ'))
return x
def _print_EulerGamma(self, e):
x = self.dom.createElement('mi')
x.appendChild(self.dom.createTextNode('γ'))
return x
def _print_TribonacciConstant(self, e):
x = self.dom.createElement('mi')
x.appendChild(self.dom.createTextNode('TribonacciConstant'))
return x
def _print_Dagger(self, e):
msup = self.dom.createElement('msup')
msup.appendChild(self._print(e.args[0]))
msup.appendChild(self.dom.createTextNode('†'))
return msup
def _print_Contains(self, e):
mrow = self.dom.createElement('mrow')
mrow.appendChild(self._print(e.args[0]))
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('∈'))
mrow.appendChild(mo)
mrow.appendChild(self._print(e.args[1]))
return mrow
def _print_HilbertSpace(self, e):
x = self.dom.createElement('mi')
x.appendChild(self.dom.createTextNode('ℋ'))
return x
def _print_ComplexSpace(self, e):
msup = self.dom.createElement('msup')
msup.appendChild(self.dom.createTextNode('𝒞'))
msup.appendChild(self._print(e.args[0]))
return msup
def _print_FockSpace(self, e):
x = self.dom.createElement('mi')
x.appendChild(self.dom.createTextNode('ℱ'))
return x
def _print_Integral(self, expr):
intsymbols = {1: "∫", 2: "∬", 3: "∭"}
mrow = self.dom.createElement('mrow')
if len(expr.limits) <= 3 and all(len(lim) == 1 for lim in expr.limits):
# Only up to three-integral signs exists
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode(intsymbols[len(expr.limits)]))
mrow.appendChild(mo)
else:
# Either more than three or limits provided
for lim in reversed(expr.limits):
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode(intsymbols[1]))
if len(lim) == 1:
mrow.appendChild(mo)
if len(lim) == 2:
msup = self.dom.createElement('msup')
msup.appendChild(mo)
msup.appendChild(self._print(lim[1]))
mrow.appendChild(msup)
if len(lim) == 3:
msubsup = self.dom.createElement('msubsup')
msubsup.appendChild(mo)
msubsup.appendChild(self._print(lim[1]))
msubsup.appendChild(self._print(lim[2]))
mrow.appendChild(msubsup)
# print function
mrow.appendChild(self.parenthesize(expr.function, PRECEDENCE["Mul"],
strict=True))
# print integration variables
for lim in reversed(expr.limits):
d = self.dom.createElement('mo')
d.appendChild(self.dom.createTextNode('ⅆ'))
mrow.appendChild(d)
mrow.appendChild(self._print(lim[0]))
return mrow
def _print_Sum(self, e):
limits = list(e.limits)
subsup = self.dom.createElement('munderover')
low_elem = self._print(limits[0][1])
up_elem = self._print(limits[0][2])
summand = self.dom.createElement('mo')
summand.appendChild(self.dom.createTextNode(self.mathml_tag(e)))
low = self.dom.createElement('mrow')
var = self._print(limits[0][0])
equal = self.dom.createElement('mo')
equal.appendChild(self.dom.createTextNode('='))
low.appendChild(var)
low.appendChild(equal)
low.appendChild(low_elem)
subsup.appendChild(summand)
subsup.appendChild(low)
subsup.appendChild(up_elem)
mrow = self.dom.createElement('mrow')
mrow.appendChild(subsup)
if len(str(e.function)) == 1:
mrow.appendChild(self._print(e.function))
else:
fence = self.dom.createElement('mfenced')
fence.appendChild(self._print(e.function))
mrow.appendChild(fence)
return mrow
def _print_Symbol(self, sym, style='plain'):
def join(items):
if len(items) > 1:
mrow = self.dom.createElement('mrow')
for i, item in enumerate(items):
if i > 0:
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode(" "))
mrow.appendChild(mo)
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode(item))
mrow.appendChild(mi)
return mrow
else:
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode(items[0]))
return mi
# translate name, supers and subs to unicode characters
def translate(s):
if s in greek_unicode:
return greek_unicode.get(s)
else:
return s
name, supers, subs = split_super_sub(sym.name)
name = translate(name)
supers = [translate(sup) for sup in supers]
subs = [translate(sub) for sub in subs]
mname = self.dom.createElement('mi')
mname.appendChild(self.dom.createTextNode(name))
if len(supers) == 0:
if len(subs) == 0:
x = mname
else:
x = self.dom.createElement('msub')
x.appendChild(mname)
x.appendChild(join(subs))
else:
if len(subs) == 0:
x = self.dom.createElement('msup')
x.appendChild(mname)
x.appendChild(join(supers))
else:
x = self.dom.createElement('msubsup')
x.appendChild(mname)
x.appendChild(join(subs))
x.appendChild(join(supers))
# Set bold font?
if style == 'bold':
x.setAttribute('mathvariant', 'bold')
return x
def _print_MatrixSymbol(self, sym):
return self._print_Symbol(sym,
style=self._settings['mat_symbol_style'])
_print_RandomSymbol = _print_Symbol
def _print_conjugate(self, expr):
enc = self.dom.createElement('menclose')
enc.setAttribute('notation', 'top')
enc.appendChild(self._print(expr.args[0]))
return enc
def _print_operator_after(self, op, expr):
row = self.dom.createElement('mrow')
row.appendChild(self.parenthesize(expr, PRECEDENCE["Func"]))
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode(op))
row.appendChild(mo)
return row
def _print_factorial(self, expr):
return self._print_operator_after('!', expr.args[0])
def _print_factorial2(self, expr):
return self._print_operator_after('!!', expr.args[0])
def _print_binomial(self, expr):
brac = self.dom.createElement('mfenced')
frac = self.dom.createElement('mfrac')
frac.setAttribute('linethickness', '0')
frac.appendChild(self._print(expr.args[0]))
frac.appendChild(self._print(expr.args[1]))
brac.appendChild(frac)
return brac
def _print_Pow(self, e):
# Here we use root instead of power if the exponent is the
# reciprocal of an integer
if (e.exp.is_Rational and abs(e.exp.p) == 1 and e.exp.q != 1 and
self._settings['root_notation']):
if e.exp.q == 2:
x = self.dom.createElement('msqrt')
x.appendChild(self._print(e.base))
if e.exp.q != 2:
x = self.dom.createElement('mroot')
x.appendChild(self._print(e.base))
x.appendChild(self._print(e.exp.q))
if e.exp.p == -1:
frac = self.dom.createElement('mfrac')
frac.appendChild(self._print(1))
frac.appendChild(x)
return frac
else:
return x
if e.exp.is_Rational and e.exp.q != 1:
if e.exp.is_negative:
top = self.dom.createElement('mfrac')
top.appendChild(self._print(1))
x = self.dom.createElement('msup')
x.appendChild(self.parenthesize(e.base, PRECEDENCE['Pow']))
x.appendChild(self._get_printed_Rational(-e.exp,
self._settings['fold_frac_powers']))
top.appendChild(x)
return top
else:
x = self.dom.createElement('msup')
x.appendChild(self.parenthesize(e.base, PRECEDENCE['Pow']))
x.appendChild(self._get_printed_Rational(e.exp,
self._settings['fold_frac_powers']))
return x
if e.exp.is_negative:
top = self.dom.createElement('mfrac')
top.appendChild(self._print(1))
if e.exp == -1:
top.appendChild(self._print(e.base))
else:
x = self.dom.createElement('msup')
x.appendChild(self.parenthesize(e.base, PRECEDENCE['Pow']))
x.appendChild(self._print(-e.exp))
top.appendChild(x)
return top
x = self.dom.createElement('msup')
x.appendChild(self.parenthesize(e.base, PRECEDENCE['Pow']))
x.appendChild(self._print(e.exp))
return x
def _print_Number(self, e):
x = self.dom.createElement(self.mathml_tag(e))
x.appendChild(self.dom.createTextNode(str(e)))
return x
def _print_AccumulationBounds(self, i):
brac = self.dom.createElement('mfenced')
brac.setAttribute('close', '\u27e9')
brac.setAttribute('open', '\u27e8')
brac.appendChild(self._print(i.min))
brac.appendChild(self._print(i.max))
return brac
def _print_Derivative(self, e):
if requires_partial(e.expr):
d = '∂'
else:
d = self.mathml_tag(e)
# Determine denominator
m = self.dom.createElement('mrow')
dim = 0 # Total diff dimension, for numerator
for sym, num in reversed(e.variable_count):
dim += num
if num >= 2:
x = self.dom.createElement('msup')
xx = self.dom.createElement('mo')
xx.appendChild(self.dom.createTextNode(d))
x.appendChild(xx)
x.appendChild(self._print(num))
else:
x = self.dom.createElement('mo')
x.appendChild(self.dom.createTextNode(d))
m.appendChild(x)
y = self._print(sym)
m.appendChild(y)
mnum = self.dom.createElement('mrow')
if dim >= 2:
x = self.dom.createElement('msup')
xx = self.dom.createElement('mo')
xx.appendChild(self.dom.createTextNode(d))
x.appendChild(xx)
x.appendChild(self._print(dim))
else:
x = self.dom.createElement('mo')
x.appendChild(self.dom.createTextNode(d))
mnum.appendChild(x)
mrow = self.dom.createElement('mrow')
frac = self.dom.createElement('mfrac')
frac.appendChild(mnum)
frac.appendChild(m)
mrow.appendChild(frac)
# Print function
mrow.appendChild(self._print(e.expr))
return mrow
def _print_Function(self, e):
mrow = self.dom.createElement('mrow')
x = self.dom.createElement('mi')
if self.mathml_tag(e) == 'log' and self._settings["ln_notation"]:
x.appendChild(self.dom.createTextNode('ln'))
else:
x.appendChild(self.dom.createTextNode(self.mathml_tag(e)))
y = self.dom.createElement('mfenced')
for arg in e.args:
y.appendChild(self._print(arg))
mrow.appendChild(x)
mrow.appendChild(y)
return mrow
def _print_Float(self, expr):
# Based off of that in StrPrinter
dps = prec_to_dps(expr._prec)
str_real = mlib.to_str(expr._mpf_, dps, strip_zeros=True)
# Must always have a mul symbol (as 2.5 10^{20} just looks odd)
# thus we use the number separator
separator = self._settings['mul_symbol_mathml_numbers']
mrow = self.dom.createElement('mrow')
if 'e' in str_real:
(mant, exp) = str_real.split('e')
if exp[0] == '+':
exp = exp[1:]
mn = self.dom.createElement('mn')
mn.appendChild(self.dom.createTextNode(mant))
mrow.appendChild(mn)
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode(separator))
mrow.appendChild(mo)
msup = self.dom.createElement('msup')
mn = self.dom.createElement('mn')
mn.appendChild(self.dom.createTextNode("10"))
msup.appendChild(mn)
mn = self.dom.createElement('mn')
mn.appendChild(self.dom.createTextNode(exp))
msup.appendChild(mn)
mrow.appendChild(msup)
return mrow
elif str_real == "+inf":
return self._print_Infinity(None)
elif str_real == "-inf":
return self._print_NegativeInfinity(None)
else:
mn = self.dom.createElement('mn')
mn.appendChild(self.dom.createTextNode(str_real))
return mn
def _print_polylog(self, expr):
mrow = self.dom.createElement('mrow')
m = self.dom.createElement('msub')
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode('Li'))
m.appendChild(mi)
m.appendChild(self._print(expr.args[0]))
mrow.appendChild(m)
brac = self.dom.createElement('mfenced')
brac.appendChild(self._print(expr.args[1]))
mrow.appendChild(brac)
return mrow
def _print_Basic(self, e):
mrow = self.dom.createElement('mrow')
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode(self.mathml_tag(e)))
mrow.appendChild(mi)
brac = self.dom.createElement('mfenced')
for arg in e.args:
brac.appendChild(self._print(arg))
mrow.appendChild(brac)
return mrow
def _print_Tuple(self, e):
mrow = self.dom.createElement('mrow')
x = self.dom.createElement('mfenced')
for arg in e.args:
x.appendChild(self._print(arg))
mrow.appendChild(x)
return mrow
def _print_Interval(self, i):
mrow = self.dom.createElement('mrow')
brac = self.dom.createElement('mfenced')
if i.start == i.end:
# Most often, this type of Interval is converted to a FiniteSet
brac.setAttribute('close', '}')
brac.setAttribute('open', '{')
brac.appendChild(self._print(i.start))
else:
if i.right_open:
brac.setAttribute('close', ')')
else:
brac.setAttribute('close', ']')
if i.left_open:
brac.setAttribute('open', '(')
else:
brac.setAttribute('open', '[')
brac.appendChild(self._print(i.start))
brac.appendChild(self._print(i.end))
mrow.appendChild(brac)
return mrow
def _print_Abs(self, expr, exp=None):
mrow = self.dom.createElement('mrow')
x = self.dom.createElement('mfenced')
x.setAttribute('close', '|')
x.setAttribute('open', '|')
x.appendChild(self._print(expr.args[0]))
mrow.appendChild(x)
return mrow
_print_Determinant = _print_Abs
def _print_re_im(self, c, expr):
mrow = self.dom.createElement('mrow')
mi = self.dom.createElement('mi')
mi.setAttribute('mathvariant', 'fraktur')
mi.appendChild(self.dom.createTextNode(c))
mrow.appendChild(mi)
brac = self.dom.createElement('mfenced')
brac.appendChild(self._print(expr))
mrow.appendChild(brac)
return mrow
def _print_re(self, expr, exp=None):
return self._print_re_im('R', expr.args[0])
def _print_im(self, expr, exp=None):
return self._print_re_im('I', expr.args[0])
def _print_AssocOp(self, e):
mrow = self.dom.createElement('mrow')
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode(self.mathml_tag(e)))
mrow.appendChild(mi)
for arg in e.args:
mrow.appendChild(self._print(arg))
return mrow
def _print_SetOp(self, expr, symbol, prec):
mrow = self.dom.createElement('mrow')
mrow.appendChild(self.parenthesize(expr.args[0], prec))
for arg in expr.args[1:]:
x = self.dom.createElement('mo')
x.appendChild(self.dom.createTextNode(symbol))
y = self.parenthesize(arg, prec)
mrow.appendChild(x)
mrow.appendChild(y)
return mrow
def _print_Union(self, expr):
prec = PRECEDENCE_TRADITIONAL['Union']
return self._print_SetOp(expr, '∪', prec)
def _print_Intersection(self, expr):
prec = PRECEDENCE_TRADITIONAL['Intersection']
return self._print_SetOp(expr, '∩', prec)
def _print_Complement(self, expr):
prec = PRECEDENCE_TRADITIONAL['Complement']
return self._print_SetOp(expr, '∖', prec)
def _print_SymmetricDifference(self, expr):
prec = PRECEDENCE_TRADITIONAL['SymmetricDifference']
return self._print_SetOp(expr, '∆', prec)
def _print_ProductSet(self, expr):
prec = PRECEDENCE_TRADITIONAL['ProductSet']
return self._print_SetOp(expr, '×', prec)
def _print_FiniteSet(self, s):
return self._print_set(s.args)
def _print_set(self, s):
items = sorted(s, key=default_sort_key)
brac = self.dom.createElement('mfenced')
brac.setAttribute('close', '}')
brac.setAttribute('open', '{')
for item in items:
brac.appendChild(self._print(item))
return brac
_print_frozenset = _print_set
def _print_LogOp(self, args, symbol):
mrow = self.dom.createElement('mrow')
if args[0].is_Boolean and not args[0].is_Not:
brac = self.dom.createElement('mfenced')
brac.appendChild(self._print(args[0]))
mrow.appendChild(brac)
else:
mrow.appendChild(self._print(args[0]))
for arg in args[1:]:
x = self.dom.createElement('mo')
x.appendChild(self.dom.createTextNode(symbol))
if arg.is_Boolean and not arg.is_Not:
y = self.dom.createElement('mfenced')
y.appendChild(self._print(arg))
else:
y = self._print(arg)
mrow.appendChild(x)
mrow.appendChild(y)
return mrow
def _print_BasisDependent(self, expr):
from sympy.vector import Vector
if expr == expr.zero:
# Not clear if this is ever called
return self._print(expr.zero)
if isinstance(expr, Vector):
items = expr.separate().items()
else:
items = [(0, expr)]
mrow = self.dom.createElement('mrow')
for system, vect in items:
inneritems = list(vect.components.items())
inneritems.sort(key = lambda x:x[0].__str__())
for i, (k, v) in enumerate(inneritems):
if v == 1:
if i: # No + for first item
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('+'))
mrow.appendChild(mo)
mrow.appendChild(self._print(k))
elif v == -1:
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('-'))
mrow.appendChild(mo)
mrow.appendChild(self._print(k))
else:
if i: # No + for first item
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('+'))
mrow.appendChild(mo)
mbrac = self.dom.createElement('mfenced')
mbrac.appendChild(self._print(v))
mrow.appendChild(mbrac)
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('⁢'))
mrow.appendChild(mo)
mrow.appendChild(self._print(k))
return mrow
def _print_And(self, expr):
args = sorted(expr.args, key=default_sort_key)
return self._print_LogOp(args, '∧')
def _print_Or(self, expr):
args = sorted(expr.args, key=default_sort_key)
return self._print_LogOp(args, '∨')
def _print_Xor(self, expr):
args = sorted(expr.args, key=default_sort_key)
return self._print_LogOp(args, '⊻')
def _print_Implies(self, expr):
return self._print_LogOp(expr.args, '⇒')
def _print_Equivalent(self, expr):
args = sorted(expr.args, key=default_sort_key)
return self._print_LogOp(args, '⇔')
def _print_Not(self, e):
mrow = self.dom.createElement('mrow')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('¬'))
mrow.appendChild(mo)
if (e.args[0].is_Boolean):
x = self.dom.createElement('mfenced')
x.appendChild(self._print(e.args[0]))
else:
x = self._print(e.args[0])
mrow.appendChild(x)
return mrow
def _print_bool(self, e):
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode(self.mathml_tag(e)))
return mi
_print_BooleanTrue = _print_bool
_print_BooleanFalse = _print_bool
def _print_NoneType(self, e):
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode(self.mathml_tag(e)))
return mi
def _print_Range(self, s):
dots = "\u2026"
brac = self.dom.createElement('mfenced')
brac.setAttribute('close', '}')
brac.setAttribute('open', '{')
if s.start.is_infinite and s.stop.is_infinite:
if s.step.is_positive:
printset = dots, -1, 0, 1, dots
else:
printset = dots, 1, 0, -1, dots
elif s.start.is_infinite:
printset = dots, s[-1] - s.step, s[-1]
elif s.stop.is_infinite:
it = iter(s)
printset = next(it), next(it), dots
elif len(s) > 4:
it = iter(s)
printset = next(it), next(it), dots, s[-1]
else:
printset = tuple(s)
for el in printset:
if el == dots:
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode(dots))
brac.appendChild(mi)
else:
brac.appendChild(self._print(el))
return brac
def _hprint_variadic_function(self, expr):
args = sorted(expr.args, key=default_sort_key)
mrow = self.dom.createElement('mrow')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode((str(expr.func)).lower()))
mrow.appendChild(mo)
brac = self.dom.createElement('mfenced')
for symbol in args:
brac.appendChild(self._print(symbol))
mrow.appendChild(brac)
return mrow
_print_Min = _print_Max = _hprint_variadic_function
def _print_exp(self, expr):
msup = self.dom.createElement('msup')
msup.appendChild(self._print_Exp1(None))
msup.appendChild(self._print(expr.args[0]))
return msup
def _print_Relational(self, e):
mrow = self.dom.createElement('mrow')
mrow.appendChild(self._print(e.lhs))
x = self.dom.createElement('mo')
x.appendChild(self.dom.createTextNode(self.mathml_tag(e)))
mrow.appendChild(x)
mrow.appendChild(self._print(e.rhs))
return mrow
def _print_int(self, p):
dom_element = self.dom.createElement(self.mathml_tag(p))
dom_element.appendChild(self.dom.createTextNode(str(p)))
return dom_element
def _print_BaseScalar(self, e):
msub = self.dom.createElement('msub')
index, system = e._id
mi = self.dom.createElement('mi')
mi.setAttribute('mathvariant', 'bold')
mi.appendChild(self.dom.createTextNode(system._variable_names[index]))
msub.appendChild(mi)
mi = self.dom.createElement('mi')
mi.setAttribute('mathvariant', 'bold')
mi.appendChild(self.dom.createTextNode(system._name))
msub.appendChild(mi)
return msub
def _print_BaseVector(self, e):
msub = self.dom.createElement('msub')
index, system = e._id
mover = self.dom.createElement('mover')
mi = self.dom.createElement('mi')
mi.setAttribute('mathvariant', 'bold')
mi.appendChild(self.dom.createTextNode(system._vector_names[index]))
mover.appendChild(mi)
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('^'))
mover.appendChild(mo)
msub.appendChild(mover)
mi = self.dom.createElement('mi')
mi.setAttribute('mathvariant', 'bold')
mi.appendChild(self.dom.createTextNode(system._name))
msub.appendChild(mi)
return msub
def _print_VectorZero(self, e):
mover = self.dom.createElement('mover')
mi = self.dom.createElement('mi')
mi.setAttribute('mathvariant', 'bold')
mi.appendChild(self.dom.createTextNode("0"))
mover.appendChild(mi)
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('^'))
mover.appendChild(mo)
return mover
def _print_Cross(self, expr):
mrow = self.dom.createElement('mrow')
vec1 = expr._expr1
vec2 = expr._expr2
mrow.appendChild(self.parenthesize(vec1, PRECEDENCE['Mul']))
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('×'))
mrow.appendChild(mo)
mrow.appendChild(self.parenthesize(vec2, PRECEDENCE['Mul']))
return mrow
def _print_Curl(self, expr):
mrow = self.dom.createElement('mrow')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('∇'))
mrow.appendChild(mo)
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('×'))
mrow.appendChild(mo)
mrow.appendChild(self.parenthesize(expr._expr, PRECEDENCE['Mul']))
return mrow
def _print_Divergence(self, expr):
mrow = self.dom.createElement('mrow')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('∇'))
mrow.appendChild(mo)
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('·'))
mrow.appendChild(mo)
mrow.appendChild(self.parenthesize(expr._expr, PRECEDENCE['Mul']))
return mrow
def _print_Dot(self, expr):
mrow = self.dom.createElement('mrow')
vec1 = expr._expr1
vec2 = expr._expr2
mrow.appendChild(self.parenthesize(vec1, PRECEDENCE['Mul']))
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('·'))
mrow.appendChild(mo)
mrow.appendChild(self.parenthesize(vec2, PRECEDENCE['Mul']))
return mrow
def _print_Gradient(self, expr):
mrow = self.dom.createElement('mrow')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('∇'))
mrow.appendChild(mo)
mrow.appendChild(self.parenthesize(expr._expr, PRECEDENCE['Mul']))
return mrow
def _print_Laplacian(self, expr):
mrow = self.dom.createElement('mrow')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('∆'))
mrow.appendChild(mo)
mrow.appendChild(self.parenthesize(expr._expr, PRECEDENCE['Mul']))
return mrow
def _print_Integers(self, e):
x = self.dom.createElement('mi')
x.setAttribute('mathvariant', 'normal')
x.appendChild(self.dom.createTextNode('ℤ'))
return x
def _print_Complexes(self, e):
x = self.dom.createElement('mi')
x.setAttribute('mathvariant', 'normal')
x.appendChild(self.dom.createTextNode('ℂ'))
return x
def _print_Reals(self, e):
x = self.dom.createElement('mi')
x.setAttribute('mathvariant', 'normal')
x.appendChild(self.dom.createTextNode('ℝ'))
return x
def _print_Naturals(self, e):
x = self.dom.createElement('mi')
x.setAttribute('mathvariant', 'normal')
x.appendChild(self.dom.createTextNode('ℕ'))
return x
def _print_Naturals0(self, e):
sub = self.dom.createElement('msub')
x = self.dom.createElement('mi')
x.setAttribute('mathvariant', 'normal')
x.appendChild(self.dom.createTextNode('ℕ'))
sub.appendChild(x)
sub.appendChild(self._print(S.Zero))
return sub
def _print_SingularityFunction(self, expr):
shift = expr.args[0] - expr.args[1]
power = expr.args[2]
sup = self.dom.createElement('msup')
brac = self.dom.createElement('mfenced')
brac.setAttribute('close', '\u27e9')
brac.setAttribute('open', '\u27e8')
brac.appendChild(self._print(shift))
sup.appendChild(brac)
sup.appendChild(self._print(power))
return sup
def _print_NaN(self, e):
x = self.dom.createElement('mi')
x.appendChild(self.dom.createTextNode('NaN'))
return x
def _print_number_function(self, e, name):
# Print name_arg[0] for one argument or name_arg[0](arg[1])
# for more than one argument
sub = self.dom.createElement('msub')
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode(name))
sub.appendChild(mi)
sub.appendChild(self._print(e.args[0]))
if len(e.args) == 1:
return sub
# TODO: copy-pasted from _print_Function: can we do better?
mrow = self.dom.createElement('mrow')
y = self.dom.createElement('mfenced')
for arg in e.args[1:]:
y.appendChild(self._print(arg))
mrow.appendChild(sub)
mrow.appendChild(y)
return mrow
def _print_bernoulli(self, e):
return self._print_number_function(e, 'B')
_print_bell = _print_bernoulli
def _print_catalan(self, e):
return self._print_number_function(e, 'C')
def _print_euler(self, e):
return self._print_number_function(e, 'E')
def _print_fibonacci(self, e):
return self._print_number_function(e, 'F')
def _print_lucas(self, e):
return self._print_number_function(e, 'L')
def _print_stieltjes(self, e):
return self._print_number_function(e, 'γ')
def _print_tribonacci(self, e):
return self._print_number_function(e, 'T')
def _print_ComplexInfinity(self, e):
x = self.dom.createElement('mover')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('∞'))
x.appendChild(mo)
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('~'))
x.appendChild(mo)
return x
def _print_EmptySet(self, e):
x = self.dom.createElement('mo')
x.appendChild(self.dom.createTextNode('∅'))
return x
def _print_UniversalSet(self, e):
x = self.dom.createElement('mo')
x.appendChild(self.dom.createTextNode('𝕌'))
return x
def _print_Adjoint(self, expr):
from sympy.matrices import MatrixSymbol
mat = expr.arg
sup = self.dom.createElement('msup')
if not isinstance(mat, MatrixSymbol):
brac = self.dom.createElement('mfenced')
brac.appendChild(self._print(mat))
sup.appendChild(brac)
else:
sup.appendChild(self._print(mat))
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('†'))
sup.appendChild(mo)
return sup
def _print_Transpose(self, expr):
from sympy.matrices import MatrixSymbol
mat = expr.arg
sup = self.dom.createElement('msup')
if not isinstance(mat, MatrixSymbol):
brac = self.dom.createElement('mfenced')
brac.appendChild(self._print(mat))
sup.appendChild(brac)
else:
sup.appendChild(self._print(mat))
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('T'))
sup.appendChild(mo)
return sup
def _print_Inverse(self, expr):
from sympy.matrices import MatrixSymbol
mat = expr.arg
sup = self.dom.createElement('msup')
if not isinstance(mat, MatrixSymbol):
brac = self.dom.createElement('mfenced')
brac.appendChild(self._print(mat))
sup.appendChild(brac)
else:
sup.appendChild(self._print(mat))
sup.appendChild(self._print(-1))
return sup
def _print_MatMul(self, expr):
from sympy import MatMul
x = self.dom.createElement('mrow')
args = expr.args
if isinstance(args[0], Mul):
args = args[0].as_ordered_factors() + list(args[1:])
else:
args = list(args)
if isinstance(expr, MatMul) and _coeff_isneg(expr):
if args[0] == -1:
args = args[1:]
else:
args[0] = -args[0]
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('-'))
x.appendChild(mo)
for arg in args[:-1]:
x.appendChild(self.parenthesize(arg, precedence_traditional(expr),
False))
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('⁢'))
x.appendChild(mo)
x.appendChild(self.parenthesize(args[-1], precedence_traditional(expr),
False))
return x
def _print_MatPow(self, expr):
from sympy.matrices import MatrixSymbol
base, exp = expr.base, expr.exp
sup = self.dom.createElement('msup')
if not isinstance(base, MatrixSymbol):
brac = self.dom.createElement('mfenced')
brac.appendChild(self._print(base))
sup.appendChild(brac)
else:
sup.appendChild(self._print(base))
sup.appendChild(self._print(exp))
return sup
def _print_HadamardProduct(self, expr):
x = self.dom.createElement('mrow')
args = expr.args
for arg in args[:-1]:
x.appendChild(
self.parenthesize(arg, precedence_traditional(expr), False))
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('∘'))
x.appendChild(mo)
x.appendChild(
self.parenthesize(args[-1], precedence_traditional(expr), False))
return x
def _print_ZeroMatrix(self, Z):
x = self.dom.createElement('mn')
x.appendChild(self.dom.createTextNode('𝟘'))
return x
def _print_OneMatrix(self, Z):
x = self.dom.createElement('mn')
x.appendChild(self.dom.createTextNode('𝟙'))
return x
def _print_Identity(self, I):
x = self.dom.createElement('mi')
x.appendChild(self.dom.createTextNode('𝕀'))
return x
def _print_floor(self, e):
mrow = self.dom.createElement('mrow')
x = self.dom.createElement('mfenced')
x.setAttribute('close', '\u230B')
x.setAttribute('open', '\u230A')
x.appendChild(self._print(e.args[0]))
mrow.appendChild(x)
return mrow
def _print_ceiling(self, e):
mrow = self.dom.createElement('mrow')
x = self.dom.createElement('mfenced')
x.setAttribute('close', '\u2309')
x.setAttribute('open', '\u2308')
x.appendChild(self._print(e.args[0]))
mrow.appendChild(x)
return mrow
def _print_Lambda(self, e):
x = self.dom.createElement('mfenced')
mrow = self.dom.createElement('mrow')
symbols = e.args[0]
if len(symbols) == 1:
symbols = self._print(symbols[0])
else:
symbols = self._print(symbols)
mrow.appendChild(symbols)
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('↦'))
mrow.appendChild(mo)
mrow.appendChild(self._print(e.args[1]))
x.appendChild(mrow)
return x
def _print_tuple(self, e):
x = self.dom.createElement('mfenced')
for i in e:
x.appendChild(self._print(i))
return x
def _print_IndexedBase(self, e):
return self._print(e.label)
def _print_Indexed(self, e):
x = self.dom.createElement('msub')
x.appendChild(self._print(e.base))
if len(e.indices) == 1:
x.appendChild(self._print(e.indices[0]))
return x
x.appendChild(self._print(e.indices))
return x
def _print_MatrixElement(self, e):
x = self.dom.createElement('msub')
x.appendChild(self.parenthesize(e.parent, PRECEDENCE["Atom"], strict = True))
brac = self.dom.createElement('mfenced')
brac.setAttribute("close", "")
brac.setAttribute("open", "")
for i in e.indices:
brac.appendChild(self._print(i))
x.appendChild(brac)
return x
def _print_elliptic_f(self, e):
x = self.dom.createElement('mrow')
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode('𝖥'))
x.appendChild(mi)
y = self.dom.createElement('mfenced')
y.setAttribute("separators", "|")
for i in e.args:
y.appendChild(self._print(i))
x.appendChild(y)
return x
def _print_elliptic_e(self, e):
x = self.dom.createElement('mrow')
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode('𝖤'))
x.appendChild(mi)
y = self.dom.createElement('mfenced')
y.setAttribute("separators", "|")
for i in e.args:
y.appendChild(self._print(i))
x.appendChild(y)
return x
def _print_elliptic_pi(self, e):
x = self.dom.createElement('mrow')
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode('𝛱'))
x.appendChild(mi)
y = self.dom.createElement('mfenced')
if len(e.args) == 2:
y.setAttribute("separators", "|")
else:
y.setAttribute("separators", ";|")
for i in e.args:
y.appendChild(self._print(i))
x.appendChild(y)
return x
def _print_Ei(self, e):
x = self.dom.createElement('mrow')
mi = self.dom.createElement('mi')
mi.appendChild(self.dom.createTextNode('Ei'))
x.appendChild(mi)
x.appendChild(self._print(e.args))
return x
def _print_expint(self, e):
x = self.dom.createElement('mrow')
y = self.dom.createElement('msub')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('E'))
y.appendChild(mo)
y.appendChild(self._print(e.args[0]))
x.appendChild(y)
x.appendChild(self._print(e.args[1:]))
return x
def _print_jacobi(self, e):
x = self.dom.createElement('mrow')
y = self.dom.createElement('msubsup')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('P'))
y.appendChild(mo)
y.appendChild(self._print(e.args[0]))
y.appendChild(self._print(e.args[1:3]))
x.appendChild(y)
x.appendChild(self._print(e.args[3:]))
return x
def _print_gegenbauer(self, e):
x = self.dom.createElement('mrow')
y = self.dom.createElement('msubsup')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('C'))
y.appendChild(mo)
y.appendChild(self._print(e.args[0]))
y.appendChild(self._print(e.args[1:2]))
x.appendChild(y)
x.appendChild(self._print(e.args[2:]))
return x
def _print_chebyshevt(self, e):
x = self.dom.createElement('mrow')
y = self.dom.createElement('msub')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('T'))
y.appendChild(mo)
y.appendChild(self._print(e.args[0]))
x.appendChild(y)
x.appendChild(self._print(e.args[1:]))
return x
def _print_chebyshevu(self, e):
x = self.dom.createElement('mrow')
y = self.dom.createElement('msub')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('U'))
y.appendChild(mo)
y.appendChild(self._print(e.args[0]))
x.appendChild(y)
x.appendChild(self._print(e.args[1:]))
return x
def _print_legendre(self, e):
x = self.dom.createElement('mrow')
y = self.dom.createElement('msub')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('P'))
y.appendChild(mo)
y.appendChild(self._print(e.args[0]))
x.appendChild(y)
x.appendChild(self._print(e.args[1:]))
return x
def _print_assoc_legendre(self, e):
x = self.dom.createElement('mrow')
y = self.dom.createElement('msubsup')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('P'))
y.appendChild(mo)
y.appendChild(self._print(e.args[0]))
y.appendChild(self._print(e.args[1:2]))
x.appendChild(y)
x.appendChild(self._print(e.args[2:]))
return x
def _print_laguerre(self, e):
x = self.dom.createElement('mrow')
y = self.dom.createElement('msub')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('L'))
y.appendChild(mo)
y.appendChild(self._print(e.args[0]))
x.appendChild(y)
x.appendChild(self._print(e.args[1:]))
return x
def _print_assoc_laguerre(self, e):
x = self.dom.createElement('mrow')
y = self.dom.createElement('msubsup')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('L'))
y.appendChild(mo)
y.appendChild(self._print(e.args[0]))
y.appendChild(self._print(e.args[1:2]))
x.appendChild(y)
x.appendChild(self._print(e.args[2:]))
return x
def _print_hermite(self, e):
x = self.dom.createElement('mrow')
y = self.dom.createElement('msub')
mo = self.dom.createElement('mo')
mo.appendChild(self.dom.createTextNode('H'))
y.appendChild(mo)
y.appendChild(self._print(e.args[0]))
x.appendChild(y)
x.appendChild(self._print(e.args[1:]))
return x
@print_function(MathMLPrinterBase)
def mathml(expr, printer='content', **settings):
"""Returns the MathML representation of expr. If printer is presentation
then prints Presentation MathML else prints content MathML.
"""
if printer == 'presentation':
return MathMLPresentationPrinter(settings).doprint(expr)
else:
return MathMLContentPrinter(settings).doprint(expr)
def print_mathml(expr, printer='content', **settings):
"""
Prints a pretty representation of the MathML code for expr. If printer is
presentation then prints Presentation MathML else prints content MathML.
Examples
========
>>> ##
>>> from sympy.printing.mathml import print_mathml
>>> from sympy.abc import x
>>> print_mathml(x+1) #doctest: +NORMALIZE_WHITESPACE
<apply>
<plus/>
<ci>x</ci>
<cn>1</cn>
</apply>
>>> print_mathml(x+1, printer='presentation')
<mrow>
<mi>x</mi>
<mo>+</mo>
<mn>1</mn>
</mrow>
"""
if printer == 'presentation':
s = MathMLPresentationPrinter(settings)
else:
s = MathMLContentPrinter(settings)
xml = s._print(sympify(expr))
s.apply_patch()
pretty_xml = xml.toprettyxml()
s.restore_patch()
print(pretty_xml)
# For backward compatibility
MathMLPrinter = MathMLContentPrinter
| 35.369708
| 85
| 0.565921
|
127f1b45b02580c60bd4bc64c9528d653bbe5bf2
| 14,545
|
py
|
Python
|
src/synapse/azext_synapse/vendored_sdks/azure_mgmt_synapse/operations/_sql_pool_vulnerability_assessment_scans_operations.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
src/synapse/azext_synapse/vendored_sdks/azure_mgmt_synapse/operations/_sql_pool_vulnerability_assessment_scans_operations.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 226
|
2019-07-24T07:57:21.000Z
|
2019-10-15T01:07:24.000Z
|
src/synapse/azext_synapse/vendored_sdks/azure_mgmt_synapse/operations/_sql_pool_vulnerability_assessment_scans_operations.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 5
|
2020-05-09T17:47:09.000Z
|
2020-10-01T19:52:06.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class SqlPoolVulnerabilityAssessmentScansOperations(object):
"""SqlPoolVulnerabilityAssessmentScansOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: The API version to use for this operation. Constant value: "2019-06-01-preview".
:ivar vulnerability_assessment_name: The name of the vulnerability assessment. Constant value: "default".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2019-06-01-preview"
self.vulnerability_assessment_name = "default"
self.config = config
def list(
self, resource_group_name, workspace_name, sql_pool_name, custom_headers=None, raw=False, **operation_config):
"""Lists the vulnerability assessment scans of a SQL pool.
Lists the vulnerability assessment scans of a SQL pool.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace
:type workspace_name: str
:param sql_pool_name: SQL pool name
:type sql_pool_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of
VulnerabilityAssessmentScanRecord
:rtype:
~azure.mgmt.synapse.models.VulnerabilityAssessmentScanRecordPaged[~azure.mgmt.synapse.models.VulnerabilityAssessmentScanRecord]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'sqlPoolName': self._serialize.url("sql_pool_name", sql_pool_name, 'str'),
'vulnerabilityAssessmentName': self._serialize.url("self.vulnerability_assessment_name", self.vulnerability_assessment_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.VulnerabilityAssessmentScanRecordPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}/scans'}
def _initiate_scan_initial(
self, resource_group_name, workspace_name, sql_pool_name, scan_id, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.initiate_scan.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'sqlPoolName': self._serialize.url("sql_pool_name", sql_pool_name, 'str'),
'vulnerabilityAssessmentName': self._serialize.url("self.vulnerability_assessment_name", self.vulnerability_assessment_name, 'str'),
'scanId': self._serialize.url("scan_id", scan_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def initiate_scan(
self, resource_group_name, workspace_name, sql_pool_name, scan_id, custom_headers=None, raw=False, polling=True, **operation_config):
"""Executes a Vulnerability Assessment database scan.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace
:type workspace_name: str
:param sql_pool_name: SQL pool name
:type sql_pool_name: str
:param scan_id: The vulnerability assessment scan Id of the scan to
retrieve.
:type scan_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._initiate_scan_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
scan_id=scan_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
initiate_scan.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}/scans/{scanId}/initiateScan'}
def export(
self, resource_group_name, workspace_name, sql_pool_name, scan_id, custom_headers=None, raw=False, **operation_config):
"""Convert an existing scan result to a human readable format. If already
exists nothing happens.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace
:type workspace_name: str
:param sql_pool_name: SQL pool name
:type sql_pool_name: str
:param scan_id: The vulnerability assessment scan Id of the scan to
retrieve.
:type scan_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SqlPoolVulnerabilityAssessmentScansExport or
ClientRawResponse if raw=true
:rtype:
~azure.mgmt.synapse.models.SqlPoolVulnerabilityAssessmentScansExport
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.export.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'sqlPoolName': self._serialize.url("sql_pool_name", sql_pool_name, 'str'),
'vulnerabilityAssessmentName': self._serialize.url("self.vulnerability_assessment_name", self.vulnerability_assessment_name, 'str'),
'scanId': self._serialize.url("scan_id", scan_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1)
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SqlPoolVulnerabilityAssessmentScansExport', response)
if response.status_code == 201:
deserialized = self._deserialize('SqlPoolVulnerabilityAssessmentScansExport', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
export.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}/scans/{scanId}/export'}
| 50.155172
| 267
| 0.67659
|
d6bf84a8f17ebc5e649220e880269fcc7d2204f5
| 2,181
|
py
|
Python
|
tests/metrics/test_ksd.py
|
michaeldeistler/sbibm-1
|
8e9875f79beb828c07fbf4820b30413914d1ceca
|
[
"MIT"
] | 2
|
2021-05-06T06:19:27.000Z
|
2022-02-20T19:49:55.000Z
|
tests/metrics/test_ksd.py
|
mackelab/sbibm
|
b9781c610a1a80d2de014ee46a29cf061fb6074a
|
[
"MIT"
] | null | null | null |
tests/metrics/test_ksd.py
|
mackelab/sbibm
|
b9781c610a1a80d2de014ee46a29cf061fb6074a
|
[
"MIT"
] | 1
|
2022-01-23T15:54:06.000Z
|
2022-01-23T15:54:06.000Z
|
from pathlib import Path
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pyro
import scipy.stats as stats
import torch
import torch.distributions.transforms as transforms
import sbibm
import sbibm.third_party.kgof
import sbibm.third_party.kgof.data as data
import sbibm.third_party.kgof.density as density
import sbibm.third_party.kgof.goftest as gof
import sbibm.third_party.kgof.kernel as kernel
import sbibm.third_party.kgof.util as util
from sbibm.third_party.kgof.density import UnnormalizedDensity
def test_ksd():
"""Test quadratic time KSD
Following the example in:
https://github.com/wittawatj/kernel-gof/blob/master/ipynb/gof_kernel_stein.ipynb
"""
seed = 42
d = 2 # dimensionality
n = 800 # samples
# Density
mean = np.zeros(d)
variance = 1.0
p = density.IsotropicNormal(mean, variance)
# Samples from same density
ds = data.DSIsotropicNormal(mean, variance)
samples = ds.sample(n, seed=seed + 1)
# Gaussian kernel with median heuristic
sig2 = util.meddistance(samples.data(), subsample=1000) ** 2
k = kernel.KGauss(sig2)
print(f"Kernel bandwidth: {sig2}")
# KSD
bootstrapper = gof.bootstrapper_rademacher
kstein = gof.KernelSteinTest(
p, k, bootstrapper=bootstrapper, alpha=0.01, n_simulate=500, seed=seed + 1
)
test_result = kstein.perform_test(
samples, return_simulated_stats=False, return_ustat_gram=False
)
print(test_result)
assert test_result["h0_rejected"] == False
# KSD with samples from different density
ds = data.DSLaplace(d=d, loc=0, scale=1.0 / np.sqrt(2))
samples = ds.sample(n, seed=seed + 1)
sig2 = util.meddistance(samples.data(), subsample=1000) ** 2
print(f"Kernel bandwidth: {sig2}")
k = kernel.KGauss(sig2)
bootstrapper = gof.bootstrapper_rademacher
kstein = gof.KernelSteinTest(
p, k, bootstrapper=bootstrapper, alpha=0.01, n_simulate=500, seed=seed + 1
)
test_result = kstein.perform_test(
samples, return_simulated_stats=False, return_ustat_gram=False
)
print(test_result)
assert test_result["h0_rejected"] == True
| 30.291667
| 84
| 0.714351
|
74ecc813780ce5550b0d73f4e488a126c35ab3c6
| 1,024
|
py
|
Python
|
harbinger/list_frameworks.py
|
att-comdev/harbinger
|
75371fcb8dfbfab211359f94300f7fb25fb63e9e
|
[
"Apache-2.0"
] | null | null | null |
harbinger/list_frameworks.py
|
att-comdev/harbinger
|
75371fcb8dfbfab211359f94300f7fb25fb63e9e
|
[
"Apache-2.0"
] | 1
|
2018-10-05T16:47:38.000Z
|
2018-10-05T19:15:22.000Z
|
harbinger/list_frameworks.py
|
att-comdev/harbinger
|
75371fcb8dfbfab211359f94300f7fb25fb63e9e
|
[
"Apache-2.0"
] | 4
|
2018-09-19T14:16:29.000Z
|
2018-09-25T18:42:33.000Z
|
"""
ListFrameworks class:
- Lists all the frameworks that have been provided
through the provided yaml file
"""
from oslo_config import cfg
from oslo_log import log as logging
from prettytable import PrettyTable
from harbinger import base
from harbinger.common.utils import Utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class ListFrameworks(base.Base):
description = "lists all supported frameworks"
def get_description(self):
return self.description
def get_parser(self, prog_name):
parser = super(ListFrameworks, self).get_parser(prog_name)
return parser
def take_action(self, parsed_args):
self.framework_tables = PrettyTable(["Frameworks"])
self.supported_frameworks = Utils.get_supported_frameworks()
self.log_frameworks(self.supported_frameworks)
def log_frameworks(self, supported_frameworks):
for key in supported_frameworks:
self.framework_tables.add_row([key])
print(self.framework_tables)
| 26.947368
| 68
| 0.733398
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.