hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f9b8b2c39935cb613faefcac6d668663e155e303
| 2,089
|
py
|
Python
|
transform/color_op.py
|
Lyken17/Colorize.PyTorch
|
9d352c52dc879988e2eb518261bc45e8216ffed6
|
[
"MIT"
] | 2
|
2018-05-23T20:29:07.000Z
|
2018-11-29T04:20:02.000Z
|
transform/color_op.py
|
Lyken17/Colorize.PyTorch
|
9d352c52dc879988e2eb518261bc45e8216ffed6
|
[
"MIT"
] | null | null | null |
transform/color_op.py
|
Lyken17/Colorize.PyTorch
|
9d352c52dc879988e2eb518261bc45e8216ffed6
|
[
"MIT"
] | null | null | null |
import math
import numpy as np
import torch
import torch.nn as nn
from torch import autograd
from torch.autograd import Variable
import torch.nn.functional as F
class RGB2GRAY(object):
def __init__(self):
pass
def __call__(self, tensor):
assert tensor.size(0) == 3
import cv2, numpy as np
npimg = tensor.numpy()
npimg = cv2.cvtColor(npimg, cv2.COLOR_RGB2GRAY)
npimg = cv2.cvtColor(npimg, cv2.COLOR_GRAY2RGB)
tensor = torch.from_numpy(npimg)
return tensor
class Gamma(object):
def __init__(self, power=2):
self.power = power
def __call__(self, tensor):
assert tensor.size(0) == 3
return tensor ** self.power
class Linearize(object):
def __init__(self, power=2):
self.power = power
def __call__(self, tensor):
assert tensor.size(0) == 3
alpha = 0.055
tensor[tensor < 0.04045] /= 12.92
tensor[tensor > 0.04045] = ((tensor[tensor > 0.04045] + alpha) / (1 + alpha)) ** 2.4
return tensor
class __color_space_convert(object):
def __init__(self, mat=None):
self.mat = mat
def __call__(self, tensor):
assert tensor.size(0) == 3
if self.mat is None:
raise NotImplementedError
# tensor: 3xHxW
s = tensor.size()
t = tensor.permute(1, 2, 0).view(-1, s[0])
res = torch.mm(t, self.mat).view(s[1], s[2], 3).permute(2, 0, 1)
return res
class SRGB2XYZ(__color_space_convert):
# D65
# https://en.wikipedia.org/wiki/SRGB
def __init__(self):
mat = torch.Tensor(
[[0.4124564, 0.3575761, 0.1804375],
[0.2126729, 0.7151522, 0.0721750],
[0.0193339, 0.1191920, 0.9503041]]
)
super(SRGB2XYZ, self).__init__(mat=mat)
class XYZ2CIE(__color_space_convert):
def __init__(self):
mat = torch.Tensor(
[[0.4002, 0.7076, -0.0808],
[-0.2263, 1.1653, 0.0457],
[0.0, 0.0, 0.9182]]
)
super(XYZ2CIE, self).__init__(mat=mat)
| 24.290698
| 92
| 0.582097
|
993a666ee970fbb26f6059d717990b56c017a7a5
| 8,993
|
py
|
Python
|
a10_octavia/common/data_models.py
|
sana-a10/a10-octavia
|
28dc85544b06840a43f3b10b205abb78aa24a795
|
[
"Apache-2.0"
] | null | null | null |
a10_octavia/common/data_models.py
|
sana-a10/a10-octavia
|
28dc85544b06840a43f3b10b205abb78aa24a795
|
[
"Apache-2.0"
] | null | null | null |
a10_octavia/common/data_models.py
|
sana-a10/a10-octavia
|
28dc85544b06840a43f3b10b205abb78aa24a795
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019, A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
import re
import six
from sqlalchemy.orm import collections
class BaseDataModel(object):
def to_dict(self, calling_classes=None, recurse=False, **kwargs):
"""Converts a data model to a dictionary."""
calling_classes = calling_classes or []
ret = {}
for attr in self.__dict__:
if attr.startswith('_') or not kwargs.get(attr, True):
continue
value = self.__dict__[attr]
if recurse:
if isinstance(getattr(self, attr), list):
ret[attr] = []
for item in value:
if isinstance(item, BaseDataModel):
if type(self) not in calling_classes:
ret[attr].append(
item.to_dict(calling_classes=(
calling_classes + [type(self)])))
else:
ret[attr] = None
else:
ret[attr] = item
elif isinstance(getattr(self, attr), BaseDataModel):
if type(self) not in calling_classes:
ret[attr] = value.to_dict(
calling_classes=calling_classes + [type(self)])
else:
ret[attr] = None
elif six.PY2 and isinstance(value, six.text_type):
ret[attr.encode('utf8')] = value.encode('utf8')
else:
ret[attr] = value
else:
if isinstance(getattr(self, attr), (BaseDataModel, list)):
ret[attr] = None
else:
ret[attr] = value
return ret
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.to_dict() == other.to_dict()
return False
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def from_dict(cls, dict):
return cls(**dict)
@classmethod
def _name(cls):
"""Returns class name in a more human readable form."""
# Split the class name up by capitalized words
return ' '.join(re.findall('[A-Z][^A-Z]*', cls.__name__))
def _get_unique_key(self, obj=None):
"""Returns a unique key for passed object for data model building."""
obj = obj or self
# First handle all objects with their own ID, then handle subordinate
# objects.
if obj.__class__.__name__ in ['VThunder']:
return obj.__class__.__name__ + obj.id
else:
raise NotImplementedError
def _find_in_graph(self, key, _visited_nodes=None):
"""Locates an object with the given unique key in the current
object graph and returns a reference to it.
"""
_visited_nodes = _visited_nodes or []
mykey = self._get_unique_key()
if mykey in _visited_nodes:
# Seen this node already, don't traverse further
return None
elif mykey == key:
return self
else:
_visited_nodes.append(mykey)
attr_names = [attr_name for attr_name in dir(self)
if not attr_name.startswith('_')]
for attr_name in attr_names:
attr = getattr(self, attr_name)
if isinstance(attr, BaseDataModel):
result = attr._find_in_graph(
key, _visited_nodes=_visited_nodes)
if result is not None:
return result
elif isinstance(attr, (collections.InstrumentedList, list)):
for item in attr:
if isinstance(item, BaseDataModel):
result = item._find_in_graph(
key, _visited_nodes=_visited_nodes)
if result is not None:
return result
# If we are here we didn't find it.
return None
def update(self, update_dict):
"""Generic update method which works for simple,
non-relational attributes.
"""
for key, value in update_dict.items():
setattr(self, key, value)
class Thunder(BaseDataModel):
def __init__(self, id=None, vthunder_id=None, amphora_id=None,
device_name=None, ip_address=None, username=None,
password=None, axapi_version=None, undercloud=None,
loadbalancer_id=None, project_id=None, compute_id=None,
topology="STANDALONE", role="MASTER", last_udp_update=None, status="ACTIVE",
created_at=datetime.utcnow(), updated_at=datetime.utcnow(),
partition_name="shared", hierarchical_multitenancy="disable",
last_write_mem=None, vrid_floating_ip=None,
device_network_map=None, acos_version=None, device_name_as_key=False):
self.id = id
self.vthunder_id = vthunder_id
self.amphora_id = amphora_id
self.device_name = device_name
self.ip_address = ip_address
self.username = username
self.password = password
self.axapi_version = axapi_version
self.undercloud = undercloud
self.loadbalancer_id = loadbalancer_id
self.project_id = project_id
self.compute_id = compute_id
self.topology = topology
self.role = role
self.last_udp_update = last_udp_update
self.status = status
self.created_at = created_at
self.updated_at = updated_at
self.partition_name = partition_name
self.hierarchical_multitenancy = hierarchical_multitenancy
self.last_write_mem = last_write_mem
self.vrid_floating_ip = vrid_floating_ip
self.device_network_map = device_network_map or []
self.acos_version = acos_version
self.device_name_as_key = device_name_as_key
class HardwareThunder(Thunder):
def __init__(self, **kwargs):
Thunder.__init__(self, **kwargs)
class VThunder(Thunder):
def __init__(self, **kwargs):
Thunder.__init__(self, **kwargs)
class Certificate(BaseDataModel):
def __init__(self, cert_filename=None, cert_content=None, key_filename=None,
key_content=None, key_pass=None, template_name=None):
self.cert_filename = cert_filename
self.cert_content = cert_content
self.key_filename = key_filename
self.key_content = key_content
self.key_pass = key_pass
self.template_name = template_name
class VRID(BaseDataModel):
def __init__(self, id=None, project_id=None, vrid=None, vrid_port_id=None,
vrid_floating_ip=None, subnet_id=None):
self.id = id
self.project_id = project_id
self.vrid = vrid
self.vrid_port_id = vrid_port_id
self.vrid_floating_ip = vrid_floating_ip
self.subnet_id = subnet_id
class Interface(BaseDataModel):
def __init__(self, interface_num=None, tags=None, ve_ips=None):
self.interface_num = interface_num
self.tags = tags or []
self.ve_ips = ve_ips or []
class DeviceNetworkMap(BaseDataModel):
def __init__(self, vcs_device_id=None, mgmt_ip_address=None, ethernet_interfaces=None,
trunk_interfaces=None):
self.vcs_device_id = vcs_device_id
self.mgmt_ip_address = mgmt_ip_address
self.ethernet_interfaces = ethernet_interfaces or []
self.trunk_interfaces = trunk_interfaces or []
self.state = 'Unknown'
class NATPool(BaseDataModel):
def __init__(self, id=None, name=None, subnet_id=None, start_address=None,
end_address=None, member_ref_count=None, port_id=None):
self.id = id
self.name = name
self.subnet_id = subnet_id
self.start_address = start_address
self.end_address = end_address
self.member_ref_count = member_ref_count
self.port_id = port_id
class VrrpSet(BaseDataModel):
def __init__(self, mgmt_subnet=None, project_id=None, set_id=None):
self.mgmt_subnet = mgmt_subnet
self.project_id = project_id
self.set_id = set_id
| 37.161157
| 93
| 0.597909
|
d415301ff727199547cd2261eba6339678b11441
| 20,616
|
py
|
Python
|
qiskit/pulse/transforms.py
|
renier/qiskit-terra
|
1f5e4c8f6768dfac5d68f39e9d38fdd783ba1346
|
[
"Apache-2.0"
] | null | null | null |
qiskit/pulse/transforms.py
|
renier/qiskit-terra
|
1f5e4c8f6768dfac5d68f39e9d38fdd783ba1346
|
[
"Apache-2.0"
] | null | null | null |
qiskit/pulse/transforms.py
|
renier/qiskit-terra
|
1f5e4c8f6768dfac5d68f39e9d38fdd783ba1346
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Basic rescheduling functions which take schedules or instructions
(and possibly some arguments) and return new schedules.
"""
import warnings
from collections import defaultdict
from typing import Callable
from typing import List, Optional, Iterable, Union
import numpy as np
from qiskit.pulse import channels as chans, exceptions, instructions
from qiskit.pulse.exceptions import PulseError
from qiskit.pulse.instruction_schedule_map import InstructionScheduleMap
from qiskit.pulse.instructions import directives
from qiskit.pulse.schedule import Schedule
def align_measures(schedules: Iterable[Union['Schedule', instructions.Instruction]],
inst_map: Optional[InstructionScheduleMap] = None,
cal_gate: str = 'u3',
max_calibration_duration: Optional[int] = None,
align_time: Optional[int] = None,
align_all: Optional[bool] = True,
) -> List[Schedule]:
"""Return new schedules where measurements occur at the same physical time.
This transformation will align the first :class:`qiskit.pulse.Acquire` on
every channel to occur at the same time.
Minimum measurement wait time (to allow for calibration pulses) is enforced
and may be set with ``max_calibration_duration``.
By default only instructions containing a :class:`~qiskit.pulse.AcquireChannel`
or :class:`~qiskit.pulse.MeasureChannel` will be shifted. If you wish to keep
the relative timing of all instructions in the schedule set ``align_all=True``.
This method assumes that ``MeasureChannel(i)`` and ``AcquireChannel(i)``
correspond to the same qubit and the acquire/play instructions
should be shifted together on these channels.
.. jupyter-kernel:: python3
:id: align_measures
.. jupyter-execute::
from qiskit import pulse
from qiskit.pulse import transforms
with pulse.build() as sched:
with pulse.align_sequential():
pulse.play(pulse.Constant(10, 0.5), pulse.DriveChannel(0))
pulse.play(pulse.Constant(10, 1.), pulse.MeasureChannel(0))
pulse.acquire(20, pulse.AcquireChannel(0), pulse.MemorySlot(0))
sched_shifted = sched << 20
aligned_sched, aligned_sched_shifted = transforms.align_measures([sched, sched_shifted])
assert aligned_sched == aligned_sched_shifted
If it is desired to only shift acqusition and measurement stimulus instructions
set the flag ``align_all=False``:
.. jupyter-execute::
aligned_sched, aligned_sched_shifted = transforms.align_measures(
[sched, sched_shifted],
align_all=False,
)
assert aligned_sched != aligned_sched_shifted
Args:
schedules: Collection of schedules to be aligned together
inst_map: Mapping of circuit operations to pulse schedules
cal_gate: The name of the gate to inspect for the calibration time
max_calibration_duration: If provided, inst_map and cal_gate will be ignored
align_time: If provided, this will be used as final align time.
align_all: Shift all instructions in the schedule such that they maintain
their relative alignment with the shifted acqusition instruction.
If ``False`` only the acqusition and measurement pulse instructions
will be shifted.
Returns:
The input list of schedules transformed to have their measurements aligned.
Raises:
PulseError: If the provided alignment time is negative.
"""
def get_first_acquire_times(schedules):
"""Return a list of first acquire times for each schedule."""
acquire_times = []
for schedule in schedules:
visited_channels = set()
qubit_first_acquire_times = defaultdict(lambda: None)
for time, inst in schedule.instructions:
if (isinstance(inst, instructions.Acquire) and
inst.channel not in visited_channels):
visited_channels.add(inst.channel)
qubit_first_acquire_times[inst.channel.index] = time
acquire_times.append(qubit_first_acquire_times)
return acquire_times
def get_max_calibration_duration(inst_map, cal_gate):
"""Return the time needed to allow for readout discrimination calibration pulses."""
max_calibration_duration = 0
for qubits in inst_map.qubits_with_instruction(cal_gate):
cmd = inst_map.get(cal_gate, qubits, np.pi, 0, np.pi)
max_calibration_duration = max(cmd.duration, max_calibration_duration)
return max_calibration_duration
if align_time is not None and align_time < 0:
raise exceptions.PulseError("Align time cannot be negative.")
first_acquire_times = get_first_acquire_times(schedules)
# Extract the maximum acquire in every schedule across all acquires in the schedule.
# If there are no acquires in the schedule default to 0.
max_acquire_times = [max(0, *times.values()) for times in first_acquire_times]
if align_time is None:
if max_calibration_duration is None:
if inst_map:
max_calibration_duration = get_max_calibration_duration(inst_map, cal_gate)
else:
max_calibration_duration = 0
align_time = max(max_calibration_duration, *max_acquire_times)
# Shift acquires according to the new scheduled time
new_schedules = []
for sched_idx, schedule in enumerate(schedules):
new_schedule = Schedule(name=schedule.name)
stop_time = schedule.stop_time
if align_all:
if first_acquire_times[sched_idx]:
shift = align_time - max_acquire_times[sched_idx]
else:
shift = align_time - stop_time
else:
shift = 0
for time, inst in schedule.instructions:
measurement_channels = {
chan.index for chan in inst.channels if
isinstance(chan, (chans.MeasureChannel, chans.AcquireChannel))
}
if measurement_channels:
sched_first_acquire_times = first_acquire_times[sched_idx]
max_start_time = max(sched_first_acquire_times[chan]
for chan in measurement_channels if
chan in sched_first_acquire_times)
shift = align_time - max_start_time
if shift < 0:
warnings.warn(
"The provided alignment time is scheduling an acquire instruction "
"earlier than it was scheduled for in the original Schedule. "
"This may result in an instruction being scheduled before t=0 and "
"an error being raised."
)
new_schedule.insert(time+shift, inst, inplace=True)
new_schedules.append(new_schedule)
return new_schedules
def add_implicit_acquires(schedule: Union['Schedule', instructions.Instruction],
meas_map: List[List[int]]
) -> Schedule:
"""Return a new schedule with implicit acquires from the measurement mapping replaced by
explicit ones.
.. warning:: Since new acquires are being added, Memory Slots will be set to match the
qubit index. This may overwrite your specification.
Args:
schedule: Schedule to be aligned.
meas_map: List of lists of qubits that are measured together.
Returns:
A ``Schedule`` with the additional acquisition instructions.
"""
new_schedule = Schedule(name=schedule.name)
acquire_map = dict()
for time, inst in schedule.instructions:
if isinstance(inst, instructions.Acquire):
if inst.mem_slot and inst.mem_slot.index != inst.channel.index:
warnings.warn("One of your acquires was mapped to a memory slot which didn't match"
" the qubit index. I'm relabeling them to match.")
# Get the label of all qubits that are measured with the qubit(s) in this instruction
all_qubits = []
for sublist in meas_map:
if inst.channel.index in sublist:
all_qubits.extend(sublist)
# Replace the old acquire instruction by a new one explicitly acquiring all qubits in
# the measurement group.
for i in all_qubits:
explicit_inst = instructions.Acquire(inst.duration,
chans.AcquireChannel(i),
mem_slot=chans.MemorySlot(i),
kernel=inst.kernel,
discriminator=inst.discriminator)
if time not in acquire_map:
new_schedule.insert(time, explicit_inst, inplace=True)
acquire_map = {time: {i}}
elif i not in acquire_map[time]:
new_schedule.insert(time, explicit_inst, inplace=True)
acquire_map[time].add(i)
else:
new_schedule.insert(time, inst, inplace=True)
return new_schedule
def pad(schedule: Schedule,
channels: Optional[Iterable[chans.Channel]] = None,
until: Optional[int] = None,
inplace: bool = False
) -> Schedule:
r"""Pad the input Schedule with ``Delay``\s on all unoccupied timeslots until
``schedule.duration`` or ``until`` if not ``None``.
Args:
schedule: Schedule to pad.
channels: Channels to pad. Defaults to all channels in
``schedule`` if not provided. If the supplied channel is not a member
of ``schedule`` it will be added.
until: Time to pad until. Defaults to ``schedule.duration`` if not provided.
inplace: Pad this schedule by mutating rather than returning a new schedule.
Returns:
The padded schedule.
"""
until = until or schedule.duration
channels = channels or schedule.channels
for channel in channels:
if channel not in schedule.channels:
schedule |= instructions.Delay(until, channel)
continue
curr_time = 0
# Use the copy of timeslots. When a delay is inserted before the current interval,
# current timeslot is pointed twice and the program crashes with the wrong pointer index.
timeslots = schedule.timeslots[channel].copy()
# TODO: Replace with method of getting instructions on a channel
for interval in timeslots:
if curr_time >= until:
break
if interval[0] != curr_time:
end_time = min(interval[0], until)
schedule = schedule.insert(
curr_time,
instructions.Delay(end_time - curr_time, channel),
inplace=inplace)
curr_time = interval[1]
if curr_time < until:
schedule = schedule.insert(
curr_time,
instructions.Delay(until - curr_time, channel),
inplace=inplace)
return schedule
def compress_pulses(schedules: List[Schedule]) -> List[Schedule]:
"""Optimization pass to replace identical pulses.
Args:
schedules: Schedules to compress.
Returns:
Compressed schedules.
"""
existing_pulses = []
new_schedules = []
for schedule in schedules:
new_schedule = Schedule(name=schedule.name)
for time, inst in schedule.instructions:
if isinstance(inst, instructions.Play):
if inst.pulse in existing_pulses:
idx = existing_pulses.index(inst.pulse)
identical_pulse = existing_pulses[idx]
new_schedule.insert(time,
instructions.Play(identical_pulse,
inst.channel,
inst.name),
inplace=True)
else:
existing_pulses.append(inst.pulse)
new_schedule.insert(time, inst, inplace=True)
else:
new_schedule.insert(time, inst, inplace=True)
new_schedules.append(new_schedule)
return new_schedules
def _push_left_append(this: Schedule,
other: Union['Schedule', instructions.Instruction],
) -> Schedule:
r"""Return ``this`` with ``other`` inserted at the maximum time over
all channels shared between ```this`` and ``other``.
Args:
this: Input schedule to which ``other`` will be inserted.
other: Other schedule to insert.
Returns:
Push left appended schedule.
"""
this_channels = set(this.channels)
other_channels = set(other.channels)
shared_channels = list(this_channels & other_channels)
ch_slacks = [this.stop_time - this.ch_stop_time(channel) + other.ch_start_time(channel)
for channel in shared_channels]
if ch_slacks:
slack_chan = shared_channels[np.argmin(ch_slacks)]
shared_insert_time = this.ch_stop_time(slack_chan) - other.ch_start_time(slack_chan)
else:
shared_insert_time = 0
# Handle case where channels not common to both might actually start
# after ``this`` has finished.
other_only_insert_time = other.ch_start_time(*(other_channels - this_channels))
# Choose whichever is greatest.
insert_time = max(shared_insert_time, other_only_insert_time)
return this.insert(insert_time, other, inplace=True)
def align_left(schedule: Schedule) -> Schedule:
"""Align a list of pulse instructions on the left.
Args:
schedule: Input schedule of which top-level ``child`` nodes will be
reschedulued.
Returns:
New schedule with input `schedule`` child schedules and instructions
left aligned.
"""
aligned = Schedule()
for _, child in schedule._children:
_push_left_append(aligned, child)
return aligned
def _push_right_prepend(this: Union['Schedule', instructions.Instruction],
other: Union['Schedule', instructions.Instruction],
) -> Schedule:
r"""Return ``this`` with ``other`` inserted at the latest possible time
such that ``other`` ends before it overlaps with any of ``this``.
If required ``this`` is shifted to start late enough so that there is room
to insert ``other``.
Args:
this: Input schedule to which ``other`` will be inserted.
other: Other schedule to insert.
Returns:
Push right prepended schedule.
"""
this_channels = set(this.channels)
other_channels = set(other.channels)
shared_channels = list(this_channels & other_channels)
ch_slacks = [this.ch_start_time(channel) - other.ch_stop_time(channel)
for channel in shared_channels]
if ch_slacks:
insert_time = min(ch_slacks) + other.start_time
else:
insert_time = this.stop_time - other.stop_time + other.start_time
if insert_time < 0:
this.shift(-insert_time, inplace=True)
this.insert(0, other, inplace=True)
else:
this.insert(insert_time, other, inplace=True)
return this
def align_right(schedule: Schedule) -> Schedule:
"""Align a list of pulse instructions on the right.
Args:
schedule: Input schedule of which top-level ``child`` nodes will be
reschedulued.
Returns:
New schedule with input `schedule`` child schedules and instructions
right aligned.
"""
aligned = Schedule()
for _, child in reversed(schedule._children):
aligned = _push_right_prepend(aligned, child)
return aligned
def align_sequential(schedule: Schedule) -> Schedule:
"""Schedule all top-level nodes in parallel.
Args:
schedule: Input schedule of which top-level ``child`` nodes will be
reschedulued.
Returns:
New schedule with input `schedule`` child schedules and instructions
applied sequentially across channels
"""
aligned = Schedule()
for _, child in schedule._children:
aligned.insert(aligned.duration, child, inplace=True)
return aligned
def align_equispaced(schedule: Schedule,
duration: int) -> Schedule:
"""Schedule a list of pulse instructions with equivalent interval.
Args:
schedule: Input schedule of which top-level ``child`` nodes will be
reschedulued.
duration: Duration of context. This should be larger than the schedule duration.
Returns:
New schedule with input `schedule`` child schedules and instructions
aligned with equivalent interval.
Notes:
This context is convenient for writing PDD or Hahn echo sequence for example.
"""
if duration and duration < schedule.duration:
return schedule
else:
total_delay = duration - schedule.duration
if len(schedule._children) > 1:
# Calculate the interval in between sub-schedules.
# If the duration cannot be divided by the number of sub-schedules,
# the modulo is appended and prepended to the input schedule.
interval, mod = np.divmod(total_delay, len(schedule._children) - 1)
else:
interval = 0
mod = total_delay
# Calculate pre schedule delay
delay, mod = np.divmod(mod, 2)
aligned = Schedule()
# Insert sub-schedules with interval
_t0 = int(aligned.stop_time + delay + mod)
for _, child in schedule._children:
aligned.insert(_t0, child, inplace=True)
_t0 = int(aligned.stop_time + interval)
return pad(aligned, aligned.channels, until=duration, inplace=True)
def align_func(schedule: Schedule,
duration: int,
func: Callable[[int], float]) -> Schedule:
"""Schedule a list of pulse instructions with schedule position defined by the
numerical expression.
Args:
schedule: Input schedule of which top-level ``child`` nodes will be
reschedulued.
duration: Duration of context. This should be larger than the schedule duration.
func: A function that takes an index of sub-schedule and returns the
fractional coordinate of of that sub-schedule.
The returned value should be defined within [0, 1].
The pulse index starts from 1.
Returns:
New schedule with input `schedule`` child schedules and instructions
aligned with equivalent interval.
Notes:
This context is convenient for writing UDD sequence for example.
"""
if duration < schedule.duration:
return schedule
aligned = Schedule()
for ind, (_, child) in enumerate(schedule._children):
_t_center = duration * func(ind + 1)
_t0 = int(_t_center - 0.5 * child.duration)
if _t0 < 0 or _t0 > duration:
PulseError('Invalid schedule position t=%d is specified at index=%d' % (_t0, ind))
aligned.insert(_t0, child, inplace=True)
return pad(aligned, aligned.channels, until=duration, inplace=True)
def flatten(schedule: Schedule) -> Schedule:
"""Flatten any called nodes into a Schedule tree with no nested children."""
return schedule.flatten()
def remove_directives(schedule: Schedule) -> Schedule:
"""Remove directives."""
return schedule.exclude(instruction_types=[directives.Directive])
def remove_trivial_barriers(schedule: Schedule) -> Schedule:
"""Remove trivial barriers with 0 or 1 channels."""
def filter_func(inst):
return (isinstance(inst[1], directives.RelativeBarrier) and
len(inst[1].channels) < 2)
return schedule.exclude(filter_func)
| 38.534579
| 99
| 0.64057
|
69396c8ac5a886ec2bd896dbb32dbf29ed300351
| 1,761
|
py
|
Python
|
trax/tf_numpy/examples/mnist/train_test.py
|
dedsec-9/trax
|
c394f9df7ee9dfe918cd67f4af2217d361f0f733
|
[
"Apache-2.0"
] | 7,220
|
2019-10-07T23:46:53.000Z
|
2022-03-31T16:28:05.000Z
|
trax/tf_numpy/examples/mnist/train_test.py
|
dedsec-9/trax
|
c394f9df7ee9dfe918cd67f4af2217d361f0f733
|
[
"Apache-2.0"
] | 301
|
2019-10-08T06:42:04.000Z
|
2022-03-12T07:03:46.000Z
|
trax/tf_numpy/examples/mnist/train_test.py
|
dedsec-9/trax
|
c394f9df7ee9dfe918cd67f4af2217d361f0f733
|
[
"Apache-2.0"
] | 783
|
2019-10-08T06:36:36.000Z
|
2022-03-25T02:00:29.000Z
|
# coding=utf-8
# Copyright 2021 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that the example training script works on fake data."""
import mock
import numpy as np
import tensorflow.compat.v2 as tf
from trax.tf_numpy.examples.mnist import dataset
from trax.tf_numpy.examples.mnist import train
class TFNumpyMnistExampleTest(tf.test.TestCase):
def testRuns(self):
with mock.patch.object(dataset, 'load', new=fake_mnist_data):
train.train(
batch_size=1,
learning_rate=0.1,
num_training_iters=10,
validation_steps=5)
train.train(
batch_size=2,
learning_rate=0.1,
num_training_iters=5,
validation_steps=2)
train.train(
batch_size=10,
learning_rate=0.1,
num_training_iters=1,
validation_steps=1)
def fake_mnist_data():
def gen_examples(num_examples):
x = np.array(
np.random.randn(num_examples, 784), copy=False, dtype=np.float32)
y = np.zeros((num_examples, 10), dtype=np.float32)
y[:][0] = 1.
return (x, y)
return (gen_examples(100), gen_examples(10), gen_examples(10))
if __name__ == '__main__':
tf.compat.v1.enable_eager_execution()
tf.test.main()
| 28.868852
| 74
| 0.691652
|
721d479f60ca17e1623e43aab35027cafc8c71c9
| 2,094
|
py
|
Python
|
aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/DeleteDeviceFileRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/DeleteDeviceFileRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/DeleteDeviceFileRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkiot.endpoint import endpoint_data
class DeleteDeviceFileRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2018-01-20', 'DeleteDeviceFile','iot')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_IotId(self):
return self.get_query_params().get('IotId')
def set_IotId(self,IotId):
self.add_query_param('IotId',IotId)
def get_IotInstanceId(self):
return self.get_query_params().get('IotInstanceId')
def set_IotInstanceId(self,IotInstanceId):
self.add_query_param('IotInstanceId',IotInstanceId)
def get_FileId(self):
return self.get_query_params().get('FileId')
def set_FileId(self,FileId):
self.add_query_param('FileId',FileId)
def get_ProductKey(self):
return self.get_query_params().get('ProductKey')
def set_ProductKey(self,ProductKey):
self.add_query_param('ProductKey',ProductKey)
def get_DeviceName(self):
return self.get_query_params().get('DeviceName')
def set_DeviceName(self,DeviceName):
self.add_query_param('DeviceName',DeviceName)
| 33.774194
| 75
| 0.760267
|
bfa579575529aad2e823e71f2f0f4e6524b79b9a
| 1,302
|
py
|
Python
|
scrapers/scrape_fr.py
|
maekke/covid_19
|
d287a2937f7f528b4ebd8fa1d1ff27b3dc1ad1ec
|
[
"CC-BY-4.0"
] | null | null | null |
scrapers/scrape_fr.py
|
maekke/covid_19
|
d287a2937f7f528b4ebd8fa1d1ff27b3dc1ad1ec
|
[
"CC-BY-4.0"
] | null | null | null |
scrapers/scrape_fr.py
|
maekke/covid_19
|
d287a2937f7f528b4ebd8fa1d1ff27b3dc1ad1ec
|
[
"CC-BY-4.0"
] | 1
|
2020-08-25T06:45:56.000Z
|
2020-08-25T06:45:56.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import datetime
import sys
from bs4 import BeautifulSoup
import scrape_common as sc
d = sc.download('https://www.fr.ch/sante/covid-19/coronavirus-statistiques-evolution-de-la-situation-dans-le-canton', silent=True)
soup = BeautifulSoup(d, 'html.parser')
xls_url = soup.find(href=re.compile("\.xlsx$")).get('href')
assert xls_url, "URL is empty"
if not xls_url.startswith('http'):
xls_url = f'https://www.fr.ch{xls_url}'
xls = sc.xlsdownload(xls_url, silent=True)
rows = sc.parse_xls(xls, header_row=0, sheet_name='Données sites internet')
is_first = True
for row in rows:
if row['Date'] is None:
continue
if not isinstance(row['Date'], datetime.datetime):
print(f"WARNING: {row['Date']} is not a valid date, skipping.", file=sys.stderr)
continue
if not is_first:
print('-' * 10)
is_first = False
print('FR')
sc.timestamp()
print('Downloading:', xls_url)
print('Date and time:', row['Date'].date().isoformat())
print('Confirmed cases:', row['Total cas avérés'])
print('Hospitalized:', row['Personnes hospitalisées'])
print('ICU:', row['dont soins intensifs'])
print('Deaths:', row['Total décès'])
print('Recovered:', row['Total Sortis de l\'hôpital'])
| 31.756098
| 130
| 0.668971
|
08b54ce3ee8e1cbf8dba40f6cabf29a64b71fe06
| 11,185
|
py
|
Python
|
app/lib/ops/tiles.py
|
matthewzimmer/trajectory-contact-networks
|
b70e12052447899cea2e21c1dda85aea2f62a469
|
[
"MIT"
] | null | null | null |
app/lib/ops/tiles.py
|
matthewzimmer/trajectory-contact-networks
|
b70e12052447899cea2e21c1dda85aea2f62a469
|
[
"MIT"
] | 1
|
2019-03-08T02:47:25.000Z
|
2019-03-08T02:47:25.000Z
|
app/lib/ops/tiles.py
|
matthewzimmer/trajectory-contact-networks
|
b70e12052447899cea2e21c1dda85aea2f62a469
|
[
"MIT"
] | null | null | null |
from math import radians, cos, sin, asin, pi, sqrt
import itertools
import networkx as nx
import numpy as np
from app.lib.datasets import GeolifeData
from app.lib.pipeline_ops import PipelineOp
from app.lib.points import TrajectoryPoint
class GenerateTilesOp(PipelineOp):
EARTH_CIRCUMFERENCE_AT_EQUATOR_METERS = 40075160
EARTH_CIRCUMFERENCE_THROUGH_POLES_METERS = 40008000
"""
Generates a dictionary of tiles where the key is a hash
of lat/lon/time and the value is a set of unique user
ids that have points within that encoded
spaciotemporal tile (cube).
"""
def __init__(self, users, ds, dt, relative_null_point=(39.75872, 116.04142)):
PipelineOp.__init__(self)
self.tiles = {}
self.data_op = GeolifeData()
self.users = np.array(users)
self.ds = ds
self.dt = dt
self.relative_null_lat = relative_null_point[0]
self.relative_null_lon = relative_null_point[1]
def perform(self):
for uid in self.users:
for pt, plot in self.data_op.trajectories(uid):
traj_pt = TrajectoryPoint(pt, uid)
lat, lon = self.meters_for_lat_lon(traj_pt.lat, traj_pt.lon)
t = traj_pt.t
local_lat_meters = int(lat / self.ds) * self.ds
local_lon_meters = int(lon / self.ds) * self.ds
local_lat, local_lon = self.get_lat_lng_from_meters(local_lat_meters, local_lon_meters)
local_t = int(t / self.dt) * self.dt
tile_hash = "lat{}_lon{}_t{}".format(local_lat, local_lon, local_t)
tile = self.hash_tile(tile_hash)
# extract first column (uid)
users = [sub_list[0] for sub_list in tile]
if traj_pt.uid not in users:
tile.append([traj_pt.uid, traj_pt.lat, traj_pt.lon, t, self.ds, self.dt])
return self._apply_output(self.tiles)
def hash_tile(self, tile_hash):
"""
Returns an existing tile based on tile hash if already generated.
Otherwise, generates and returns a new list for the given tile_hash.
"""
tile = self.tiles.get(tile_hash, None)
if tile is None:
tile = []
self.tiles[tile_hash] = tile
return tile
def meters_for_lat_lon(self, lat, lon):
"""
Calculates X and Y distances in meters.
https://stackoverflow.com/a/3024728
"""
delta_latitude = lat - self.relative_null_lat
delta_longitude = lon - self.relative_null_lon
latitude_circumference = self.EARTH_CIRCUMFERENCE_AT_EQUATOR_METERS * cos(self.deg_to_rad(self.relative_null_lat))
result_x = delta_longitude * latitude_circumference / 360
result_y = delta_latitude * self.EARTH_CIRCUMFERENCE_THROUGH_POLES_METERS / 360
return result_x, result_y
def get_lat_lng_from_meters(self, lat, lon):
latitude_circumference = self.EARTH_CIRCUMFERENCE_AT_EQUATOR_METERS * cos(self.deg_to_rad(self.relative_null_lat))
delta_latitude = lon * 360 / self.EARTH_CIRCUMFERENCE_THROUGH_POLES_METERS
delta_longitude = lat * 360 / latitude_circumference
result_lat = delta_latitude + self.relative_null_lat
result_lng = delta_longitude + self.relative_null_lon
return result_lat, result_lng
@staticmethod
def deg_to_rad(degrees):
return degrees * pi / 180
class GraphContactPointsOp(PipelineOp):
def __init__(self, hashed_tiles, weight):
PipelineOp.__init__(self)
self.hashed_tiles = hashed_tiles
self.weight = weight
assert(weight in ['dist_weight', 'count_weight'])
def perform(self):
contact_points = [['uid1', 'uid2', 'ds', 'dt', 'tile_hash', 'dist_apart', 'time_diff', 'lat1', 'lat2', 'lon1', 'lon2', 't1', 't2']]
tiles = self.hashed_tiles.items()
tile_count = len(tiles)
op_count = 0
graph = nx.Graph()
delta = (None, None)
for tile_hash, uids in tiles:
if not tile_hash:
graph_filepath = 'app/data/graphs/no_tiles_from_data.png'
return self._apply_output({"graph_filepath": graph_filepath, "graph_generated": False})
if not delta:
delta = (uids[0][4], uids[0][5])
if len(uids) > 1:
contact_pairs = itertools.combinations(uids, 2)
for user_pair in contact_pairs:
user1, user2 = user_pair
u1_uid, u1_lat, u1_lon, u1_t, u1_ds, u1_dt = user1
u2_uid, u2_lat, u2_lon, u2_t, u2_ds, u2_dt = user2
u1_lat_lon = (u1_lat, u1_lon)
u2_lat_lon = (u2_lat, u2_lon)
distance = dist_apart(u1_lat_lon, u2_lat_lon)
time_difference = abs(u1_t - u2_t)
contact_points.append([u1_uid, u2_uid, u1_ds, u1_dt, tile_hash, distance, time_difference, u1_lat, u2_lat, u1_lon, u2_lon, u1_t, u2_t])
if self.weight == 'dist_weight':
graph = weight_by_distance(graph, user1, user2)
elif self.weight == 'count_weight':
graph = weight_by_count(graph, user1, user1)
op_count += 1
print("Remaining Tiles: {}".format(tile_count - op_count))
# graph_filepath = 'app/data/graphs/{}.png'.format(str(delta[0]) + 'ds_' + str(delta[1]) + 'dt')
# nx.draw_circular(graph, with_labels=True) # spectral circular random
# plt.savefig(graph_filepath, bbox_inches='tight')
ds, dt = delta
gml_filepath = 'app/data/graphs/{}.gml'.format(str(ds) + 'ds_' + str(dt) + 'dt_' + str(self.weight))
nx.write_gml(graph, gml_filepath)
# largest_comp = find_largest_component(graph)
# avg_degree = find_average_degree(graph)
# graph_results(largest_comp, avg_degree, deltas)
return self._apply_output({"contact_points": np.asarray(contact_points), "graph_filepath": gml_filepath, "graph_generated": True})
class GraphHottestPointsOp(PipelineOp):
def __init__(self, hashed_tiles, weight):
PipelineOp.__init__(self)
self.hashed_tiles = hashed_tiles
self.weight = weight
def perform(self):
contact_points = [['uid1', 'uid2', 'ds', 'dt', 'tile_hash', 'dist_apart', 'time_diff', 'lat1', 'lat2', 'lon1', 'lon2', 't1', 't2']]
user_count_in_tiles = [len(uids) for tile_hash, uids in self.hashed_tiles.items()]
hot_zone_count = max(user_count_in_tiles)
graph = nx.Graph()
delta = (None, None)
for tile_hash, uids in self.hashed_tiles.items():
if not delta:
delta = (uids[0][4], uids[0][5])
if len(uids) == hot_zone_count:
contact_pairs = itertools.combinations(uids, 2)
for user_pair in contact_pairs:
user1, user2 = user_pair
u1_uid, u1_lat, u1_lon, u1_t, u1_ds, u1_dt = user1
u2_uid, u2_lat, u2_lon, u2_t, u2_ds, u2_dt = user2
u1_lat_lon = (u1_lat, u1_lon)
u2_lat_lon = (u2_lat, u2_lon)
distance = dist_apart(u1_lat_lon, u2_lat_lon)
time_difference = abs(u1_t - u2_t)
contact_points.append([u1_uid, u2_uid, u1_ds, u1_dt, tile_hash, distance, time_difference, u1_lat, u2_lat, u1_lon, u2_lon, u1_t, u2_t])
if self.weight == 'dist_weight':
graph = weight_by_distance(graph, user_pair[0], user_pair[1])
elif self.weight == 'count_weight':
graph = weight_by_count(graph, user_pair[0], user_pair[1])
ds, dt = delta
gml_filepath = 'app/data/graphs/{}.gml'.format(str(ds) + 'ds_' + str(dt) + 'dt_hot_zones')
nx.write_gml(graph, gml_filepath)
return self._apply_output({"contact_points": np.asarray(contact_points), "gml_filepath": gml_filepath, "graph_generated": True})
def weight_by_count(graph, user1, user2):
u1_uid, u1_lat, u1_lon, u1_t, u1_ds, u1_dt = user1
u2_uid, u2_lat, u2_lon, u2_t, u2_ds, u2_dt = user2
u1_lat_lon = (u1_lat, u1_lon)
u2_lat_lon = (u2_lat, u2_lon)
distance = dist_apart(u1_lat_lon, u2_lat_lon)
time_difference = abs(u1_t - u2_t)
if not graph.has_edge(u1_uid, u2_uid):
graph.add_edge(u1_uid, u2_uid, weight=1, ds=time_difference, distance=distance)
else:
graph[u1_uid][u2_uid]['weight'] += 1
graph[u1_uid][u2_uid]['ds'] = time_difference
graph[u1_uid][u2_uid]['dt'] = distance
return graph
def weight_by_distance(graph, user1, user2):
u1_uid, u1_lat, u1_lon, u1_t, u1_ds, u1_dt = user1
u2_uid, u2_lat, u2_lon, u2_t, u2_ds, u2_dt = user2
u1_lat_lon = (u1_lat, u1_lon)
u2_lat_lon = (u2_lat, u2_lon)
distance = dist_apart(u1_lat_lon, u2_lat_lon)
time_difference = abs(u1_t - u2_t)
delta = (u1_ds, u1_dt)
ds, dt = delta
weight = dt - distance
if not graph.has_edge(u1_uid, u2_uid):
graph.add_edge(u1_uid, u2_uid, weight=weight, distance=distance, ds=time_difference, dt=distance)
else:
if graph[u1_uid][u2_uid]['weight'] > weight:
graph[u1_uid][u2_uid]['weight'] = weight
graph[u1_uid][u2_uid]['ds'] = time_difference
graph[u1_uid][u2_uid]['dt'] = distance
return graph
def dist_apart(p1, p2):
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [p1[1], p1[0], p2[1], p2[0]])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * asin(sqrt(a))
# Radius of earth in kilometers is 6371
km = 6371 * c
return km * 1000
# def find_largest_component(graph):
# component_size = [len(c) for c in sorted(nx.connected_components(graph), key=len, reverse=True)]
# return str(max(component_size))
# # print("Largest Component Size: " + str(max(component_size)))
# # print("Component List: " + str(max(nx.connected_components(Graph), key=len)))
#
#
# def find_average_degree(graph):
# degree_list = []
# for n in graph.nodes():
# degree_list.append(graph.degree(n))
# return str(sum(degree_list) / graph.number_of_nodes())
# # print("Average degree of Nodes " + str(sum(listr)/Graph.number_of_nodes()))
#
#
# def graph_results(largest_comps, avg_degrees, deltas):
# if largest_comps or avg_degrees != 'NULL':
# plt.plot(deltas, largest_comps, label="Largest Component")
# plt.title("Size of Largest Connected Component")
# plt.ylabel("Largest Component Size")
# plt.xlabel("Delta settings")
# plt.savefig('app/viz/Largest_Component_Results.png', bbox_inches='tight')
#
# plt.plot(deltas, avg_degrees, label="Average Degree")
# plt.title("Average Degree of Nodes")
# plt.ylabel("Mean Degree")
# plt.xlabel("Delta settings")
# plt.savefig('app/viz/Avg_Degree_Results.png', bbox_inches='tight')
| 42.528517
| 155
| 0.622709
|
29c588ee15469bb8e40e69a0648e06606bed8a52
| 671
|
py
|
Python
|
simulator/urls.py
|
i13-msrg/cidds
|
bc212404b1d342ddac4bee220afdb440594f7faa
|
[
"Apache-2.0"
] | 8
|
2018-10-27T19:56:38.000Z
|
2021-09-23T11:47:52.000Z
|
simulator/urls.py
|
i13-msrg/cidds
|
bc212404b1d342ddac4bee220afdb440594f7faa
|
[
"Apache-2.0"
] | 7
|
2020-02-01T13:48:41.000Z
|
2022-03-11T23:35:07.000Z
|
simulator/urls.py
|
i13-msrg/cidds
|
bc212404b1d342ddac4bee220afdb440594f7faa
|
[
"Apache-2.0"
] | 1
|
2019-04-05T08:00:12.000Z
|
2019-04-05T08:00:12.000Z
|
from django.urls import path, include
from django.views.generic import TemplateView
from simulator.views import StartSim, SimulationHistory, Comparison, Details
urlpatterns = [
path('initialize/', StartSim.as_view(), name='initialize'),
path('history/', SimulationHistory.as_view(), name='history'),
path('start/', include([
path('cidds/', TemplateView.as_view(template_name='start_form_cidds.html'), name='startsim'),
path('cac/', TemplateView.as_view(template_name='start_form_cac.html'), name='startcac'),
])),
path('compare/', Comparison.as_view(), name='compare'),
path('<int:sim_id>/', Details.as_view(), name='detail'),
]
| 41.9375
| 101
| 0.701937
|
ea64e092b0377a68569a58b2d31713b062791f28
| 20,407
|
py
|
Python
|
configs/common/Options.py
|
code-lamem/lamem
|
c28f72c13a81fbb105c7c83d1b2720a720f3a47f
|
[
"BSD-3-Clause"
] | null | null | null |
configs/common/Options.py
|
code-lamem/lamem
|
c28f72c13a81fbb105c7c83d1b2720a720f3a47f
|
[
"BSD-3-Clause"
] | null | null | null |
configs/common/Options.py
|
code-lamem/lamem
|
c28f72c13a81fbb105c7c83d1b2720a720f3a47f
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Lisa Hsu
import m5
from m5.defines import buildEnv
from m5.objects import *
from common.Benchmarks import *
from common import CpuConfig
from common import MemConfig
from common import PlatformConfig
def _listCpuTypes(option, opt, value, parser):
CpuConfig.print_cpu_list()
sys.exit(0)
def _listMemTypes(option, opt, value, parser):
MemConfig.print_mem_list()
sys.exit(0)
def _listPlatformTypes(option, opt, value, parser):
PlatformConfig.print_platform_list()
sys.exit(0)
# Add the very basic options that work also in the case of the no ISA
# being used, and consequently no CPUs, but rather various types of
# testers and traffic generators.
def addNoISAOptions(parser):
# Check for extra nvmain configuration override options
for arg in sys.argv:
if arg[:9] == "--nvmain-":
parser.add_option(arg, type="string", default="NULL",
help="Set NVMain configuration value for a parameter")
parser.add_option("-n", "--num-cpus", type="int", default=1)
parser.add_option("--sys-voltage", action="store", type="string",
default='1.0V',
help = """Top-level voltage for blocks running at system
power supply""")
parser.add_option("--sys-clock", action="store", type="string",
default='1GHz',
help = """Top-level clock for blocks running at system
speed""")
# Memory Options
parser.add_option("--list-mem-types",
action="callback", callback=_listMemTypes,
help="List available memory types")
parser.add_option("--mem-type", type="choice", default="DDR3_1600_8x8",
choices=MemConfig.mem_names(),
help = "type of memory to use")
parser.add_option("--mem-channels", type="int", default=1,
help = "number of memory channels")
parser.add_option("--mem-ranks", type="int", default=None,
help = "number of memory ranks per channel")
parser.add_option("--mem-size", action="store", type="string",
default="512MB",
help="Specify the physical memory size (single memory)")
parser.add_option("--memchecker", action="store_true")
# Cache Options
parser.add_option("--external-memory-system", type="string",
help="use external ports of this port_type for caches")
parser.add_option("--tlm-memory", type="string",
help="use external port for SystemC TLM cosimulation")
parser.add_option("--caches", action="store_true")
parser.add_option("--l2cache", action="store_true")
parser.add_option("--num-dirs", type="int", default=1)
parser.add_option("--num-l2caches", type="int", default=1)
parser.add_option("--num-l3caches", type="int", default=1)
parser.add_option("--l1d_size", type="string", default="64kB")
parser.add_option("--l1i_size", type="string", default="32kB")
parser.add_option("--l2_size", type="string", default="2MB")
parser.add_option("--l3_size", type="string", default="16MB")
parser.add_option("--l1d_assoc", type="int", default=2)
parser.add_option("--l1i_assoc", type="int", default=2)
parser.add_option("--l2_assoc", type="int", default=8)
parser.add_option("--l3_assoc", type="int", default=16)
parser.add_option("--cacheline_size", type="int", default=64)
# Enable Ruby
parser.add_option("--ruby", action="store_true")
# Run duration options
parser.add_option("-m", "--abs-max-tick", type="int", default=m5.MaxTick,
metavar="TICKS", help="Run to absolute simulated tick "
"specified including ticks from a restored checkpoint")
parser.add_option("--rel-max-tick", type="int", default=None,
metavar="TICKS", help="Simulate for specified number of"
" ticks relative to the simulation start tick (e.g. if "
"restoring a checkpoint)")
parser.add_option("--maxtime", type="float", default=None,
help="Run to the specified absolute simulated time in "
"seconds")
# Add common options that assume a non-NULL ISA.
def addCommonOptions(parser):
# start by adding the base options that do not assume an ISA
addNoISAOptions(parser)
# system options
parser.add_option("--list-cpu-types",
action="callback", callback=_listCpuTypes,
help="List available CPU types")
parser.add_option("--cpu-type", type="choice", default="AtomicSimpleCPU",
choices=CpuConfig.cpu_names(),
help = "type of cpu to run with")
parser.add_option("--checker", action="store_true");
parser.add_option("--cpu-clock", action="store", type="string",
default='2GHz',
help="Clock for blocks running at CPU speed")
parser.add_option("--smt", action="store_true", default=False,
help = """
Only used if multiple programs are specified. If true,
then the number of threads per cpu is same as the
number of programs.""")
parser.add_option("--elastic-trace-en", action="store_true",
help="""Enable capture of data dependency and instruction
fetch traces using elastic trace probe.""")
# Trace file paths input to trace probe in a capture simulation and input
# to Trace CPU in a replay simulation
parser.add_option("--inst-trace-file", action="store", type="string",
help="""Instruction fetch trace file input to
Elastic Trace probe in a capture simulation and
Trace CPU in a replay simulation""", default="")
parser.add_option("--data-trace-file", action="store", type="string",
help="""Data dependency trace file input to
Elastic Trace probe in a capture simulation and
Trace CPU in a replay simulation""", default="")
parser.add_option("-l", "--lpae", action="store_true")
parser.add_option("-V", "--virtualisation", action="store_true")
parser.add_option("--fastmem", action="store_true")
# dist-gem5 options
parser.add_option("--dist", action="store_true",
help="Parallel distributed gem5 simulation.")
parser.add_option("--dist-sync-on-pseudo-op", action="store_true",
help="Use a pseudo-op to start dist-gem5 synchronization.")
parser.add_option("--is-switch", action="store_true",
help="Select the network switch simulator process for a"\
"distributed gem5 run")
parser.add_option("--dist-rank", default=0, action="store", type="int",
help="Rank of this system within the dist gem5 run.")
parser.add_option("--dist-size", default=0, action="store", type="int",
help="Number of gem5 processes within the dist gem5 run.")
parser.add_option("--dist-server-name",
default="127.0.0.1",
action="store", type="string",
help="Name of the message server host\nDEFAULT: localhost")
parser.add_option("--dist-server-port",
default=2200,
action="store", type="int",
help="Message server listen port\nDEFAULT: 2200")
parser.add_option("--dist-sync-repeat",
default="0us",
action="store", type="string",
help="Repeat interval for synchronisation barriers among dist-gem5 processes\nDEFAULT: --ethernet-linkdelay")
parser.add_option("--dist-sync-start",
default="5200000000000t",
action="store", type="string",
help="Time to schedule the first dist synchronisation barrier\nDEFAULT:5200000000000t")
parser.add_option("--ethernet-linkspeed", default="10Gbps",
action="store", type="string",
help="Link speed in bps\nDEFAULT: 10Gbps")
parser.add_option("--ethernet-linkdelay", default="10us",
action="store", type="string",
help="Link delay in seconds\nDEFAULT: 10us")
# Run duration options
parser.add_option("-I", "--maxinsts", action="store", type="int",
default=None, help="""Total number of instructions to
simulate (default: run forever)""")
parser.add_option("--work-item-id", action="store", type="int",
help="the specific work id for exit & checkpointing")
parser.add_option("--num-work-ids", action="store", type="int",
help="Number of distinct work item types")
parser.add_option("--work-begin-cpu-id-exit", action="store", type="int",
help="exit when work starts on the specified cpu")
parser.add_option("--work-end-exit-count", action="store", type="int",
help="exit at specified work end count")
parser.add_option("--work-begin-exit-count", action="store", type="int",
help="exit at specified work begin count")
parser.add_option("--init-param", action="store", type="int", default=0,
help="""Parameter available in simulation with m5
initparam""")
parser.add_option("--initialize-only", action="store_true", default=False,
help="""Exit after initialization. Do not simulate time.
Useful when gem5 is run as a library.""")
# Simpoint options
parser.add_option("--simpoint-profile", action="store_true",
help="Enable basic block profiling for SimPoints")
parser.add_option("--simpoint-interval", type="int", default=10000000,
help="SimPoint interval in num of instructions")
parser.add_option("--take-simpoint-checkpoints", action="store", type="string",
help="<simpoint file,weight file,interval-length,warmup-length>")
parser.add_option("--restore-simpoint-checkpoint", action="store_true",
help="restore from a simpoint checkpoint taken with " +
"--take-simpoint-checkpoints")
# Checkpointing options
###Note that performing checkpointing via python script files will override
###checkpoint instructions built into binaries.
parser.add_option("--take-checkpoints", action="store", type="string",
help="<M,N> take checkpoints at tick M and every N ticks thereafter")
parser.add_option("--max-checkpoints", action="store", type="int",
help="the maximum number of checkpoints to drop", default=5)
parser.add_option("--checkpoint-dir", action="store", type="string",
help="Place all checkpoints in this absolute directory")
parser.add_option("-r", "--checkpoint-restore", action="store", type="int",
help="restore from checkpoint <N>")
parser.add_option("--checkpoint-at-end", action="store_true",
help="take a checkpoint at end of run")
parser.add_option("--work-begin-checkpoint-count", action="store", type="int",
help="checkpoint at specified work begin count")
parser.add_option("--work-end-checkpoint-count", action="store", type="int",
help="checkpoint at specified work end count")
parser.add_option("--work-cpus-checkpoint-count", action="store", type="int",
help="checkpoint and exit when active cpu count is reached")
parser.add_option("--restore-with-cpu", action="store", type="choice",
default="AtomicSimpleCPU", choices=CpuConfig.cpu_names(),
help = "cpu type for restoring from a checkpoint")
# CPU Switching - default switch model goes from a checkpoint
# to a timing simple CPU with caches to warm up, then to detailed CPU for
# data measurement
parser.add_option("--repeat-switch", action="store", type="int",
default=None,
help="switch back and forth between CPUs with period <N>")
parser.add_option("-s", "--standard-switch", action="store", type="int",
default=None,
help="switch from timing to Detailed CPU after warmup period of <N>")
parser.add_option("-p", "--prog-interval", type="str",
help="CPU Progress Interval")
# Fastforwarding and simpoint related materials
parser.add_option("-W", "--warmup-insts", action="store", type="int",
default=None,
help="Warmup period in total instructions (requires --standard-switch)")
parser.add_option("--bench", action="store", type="string", default=None,
help="base names for --take-checkpoint and --checkpoint-restore")
parser.add_option("-F", "--fast-forward", action="store", type="string",
default=None,
help="Number of instructions to fast forward before switching")
parser.add_option("-S", "--simpoint", action="store_true", default=False,
help="""Use workload simpoints as an instruction offset for
--checkpoint-restore or --take-checkpoint.""")
parser.add_option("--at-instruction", action="store_true", default=False,
help="""Treat value of --checkpoint-restore or --take-checkpoint as a
number of instructions.""")
parser.add_option("--spec-input", default="ref", type="choice",
choices=["ref", "test", "train", "smred", "mdred",
"lgred"],
help="Input set size for SPEC CPU2000 benchmarks.")
parser.add_option("--arm-iset", default="arm", type="choice",
choices=["arm", "thumb", "aarch64"],
help="ARM instruction set.")
def addSEOptions(parser):
# Benchmark options
parser.add_option("-c", "--cmd", default="",
help="The binary to run in syscall emulation mode.")
parser.add_option("-o", "--options", default="",
help="""The options to pass to the binary, use " "
around the entire string""")
parser.add_option("-e", "--env", default="",
help="Initialize workload environment from text file.")
parser.add_option("-i", "--input", default="",
help="Read stdin from a file.")
parser.add_option("--output", default="",
help="Redirect stdout to a file.")
parser.add_option("--errout", default="",
help="Redirect stderr to a file.")
def addFSOptions(parser):
from FSConfig import os_types
# Simulation options
parser.add_option("--timesync", action="store_true",
help="Prevent simulated time from getting ahead of real time")
# System options
parser.add_option("--kernel", action="store", type="string")
parser.add_option("--os-type", action="store", type="choice",
choices=os_types[buildEnv['TARGET_ISA']], default="linux",
help="Specifies type of OS to boot")
parser.add_option("--script", action="store", type="string")
parser.add_option("--frame-capture", action="store_true",
help="Stores changed frame buffers from the VNC server to compressed "\
"files in the gem5 output directory")
if buildEnv['TARGET_ISA'] == "arm":
parser.add_option("--bare-metal", action="store_true",
help="Provide the raw system without the linux specific bits")
parser.add_option("--list-machine-types",
action="callback", callback=_listPlatformTypes,
help="List available platform types")
parser.add_option("--machine-type", action="store", type="choice",
choices=PlatformConfig.platform_names(),
default="VExpress_EMM")
parser.add_option("--dtb-filename", action="store", type="string",
help="Specifies device tree blob file to use with device-tree-"\
"enabled kernels")
parser.add_option("--enable-security-extensions", action="store_true",
help="Turn on the ARM Security Extensions")
parser.add_option("--enable-context-switch-stats-dump", \
action="store_true", help="Enable stats dump at context "\
"switches and dump tasks file (required for Streamline)")
parser.add_option("--generate-dtb", action="store_true", default=False,
help="Automatically generate a dtb file")
# Benchmark options
parser.add_option("--dual", action="store_true",
help="Simulate two systems attached with an ethernet link")
parser.add_option("-b", "--benchmark", action="store", type="string",
dest="benchmark",
help="Specify the benchmark to run. Available benchmarks: %s"\
% DefinedBenchmarks)
# Metafile options
parser.add_option("--etherdump", action="store", type="string", dest="etherdump",
help="Specify the filename to dump a pcap capture of the" \
"ethernet traffic")
# Disk Image Options
parser.add_option("--disk-image", action="store", type="string", default=None,
help="Path to the disk image to use.")
parser.add_option("--root-device", action="store", type="string", default=None,
help="OS device name for root partition")
# Command line options
parser.add_option("--command-line", action="store", type="string",
default=None,
help="Template for the kernel command line.")
parser.add_option("--command-line-file", action="store",
default=None, type="string",
help="File with a template for the kernel command line")
| 53.56168
| 131
| 0.621796
|
305d6d0d48355bcf49bb2023cf4f462d8b108968
| 8,283
|
py
|
Python
|
pytorch/caffe2/contrib/nccl/nccl_ops_test.py
|
raghavnauhria/whatmt
|
c20483a437c82936cb0fb8080925e37b9c4bba87
|
[
"MIT"
] | 15
|
2019-08-10T02:36:38.000Z
|
2021-07-14T13:45:32.000Z
|
caffe2/contrib/nccl/nccl_ops_test.py
|
wxwoods/mctorch
|
7cd6eb51fdd01fa75ed9245039a4f145ba342de2
|
[
"BSD-3-Clause"
] | 7
|
2019-10-21T03:08:51.000Z
|
2022-03-11T23:54:28.000Z
|
pytorch/caffe2/contrib/nccl/nccl_ops_test.py
|
raghavnauhria/whatmt
|
c20483a437c82936cb0fb8080925e37b9c4bba87
|
[
"MIT"
] | 5
|
2019-09-27T02:41:40.000Z
|
2021-11-05T20:40:49.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import hypothesis.strategies as st
from hypothesis import given, assume
import numpy as np
import time
import os
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, muji, dyndep
import caffe2.python.hypothesis_test_util as hu
np.random.seed(1)
dyndep.InitOpsLibrary('@/caffe2/caffe2/contrib/nccl:nccl_ops')
def gpu_device(i):
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.device_id = i
return device_option
def benchmark(ws, net, warmups=5, iters=100):
for _ in range(warmups):
ws.run(net)
plan = core.Plan("plan")
plan.AddStep(core.ExecutionStep("test-step", net, iters))
before = time.time()
ws.run(plan)
after = time.time()
print("Timing network, time taken per-iteration: {:.6f}ms".format((
after - before) / float(iters) * 1000.0))
return after - before
@unittest.skipIf(not workspace.has_cuda_support, "NCCL only on CUDA GPU")
class NCCLOpsTest(hu.HypothesisTestCase):
@given(n=st.integers(min_value=2, max_value=workspace.NumCudaDevices()),
m=st.integers(min_value=1, max_value=1000),
in_place=st.booleans())
def test_nccl_allreduce(self, n, m, in_place):
xs = [np.random.randn(m).astype(np.float32) for i in range(n)]
inputs = [str("x_{}".format(i)) for i in range(n)]
prefix = "" if in_place else "o"
outputs = [str("{}x_{}".format(prefix, i)) for i in range(n)]
op = core.CreateOperator("NCCLAllreduce", inputs, outputs)
input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)}
def allreduce(*args):
assert len(args) == n
output = np.sum(args, axis=0)
return [output for _ in range(n)]
outputs = self.assertReferenceChecks(
hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)],
allreduce, input_device_options)
for output in outputs:
np.testing.assert_array_equal(outputs[0], output)
self.assertEqual(outputs[0].tobytes(), output.tobytes())
@given(n=st.integers(min_value=2, max_value=workspace.NumCudaDevices()),
m=st.integers(min_value=1, max_value=1000),
root=st.integers(min_value=0,
max_value=workspace.NumCudaDevices() - 1))
def test_nccl_broadcast(self, n, m, root):
assume(root < n)
xs = [np.random.randn(m).astype(np.float32) for i in range(n)]
inputs = [str("x_{}".format(i)) for i in range(n)]
op = core.CreateOperator("NCCLBroadcast", inputs, inputs, root=root)
input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)}
def broadcast(*args):
assert len(args) == n
return [args[root] for _ in range(n)]
self.assertReferenceChecks(
hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)],
broadcast, input_device_options)
@given(n=st.integers(min_value=2, max_value=workspace.NumCudaDevices()),
m=st.integers(min_value=1, max_value=1000),
# NCCL Reduce seems to deadlock for non-zero roots.
root=st.integers(min_value=0, max_value=0),
in_place=st.booleans())
def test_nccl_reduce(self, n, m, root, in_place):
assume(in_place is False or root == 0)
xs = [np.random.randn(m).astype(np.float32) for i in range(n)]
inputs = [str("x_{}".format(i)) for i in range(n)]
op = core.CreateOperator(
"NCCLReduce", inputs,
inputs[root] if in_place else b"o", root=root)
input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)}
def reduce(*args):
assert len(args) == n
return [np.sum(args, axis=0)]
self.assertReferenceChecks(
hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)],
reduce, input_device_options)
@given(n=st.integers(min_value=2, max_value=workspace.NumCudaDevices()),
m=st.integers(min_value=1, max_value=1000))
def test_nccl_allgather(self, n, m):
xs = [np.random.randn(m).astype(np.float32) for i in range(n)]
inputs = [str("x_{}".format(i)) for i in range(n)]
outputs = [str("o_{}".format(i)) for i in range(n)]
op = core.CreateOperator("NCCLAllGather", inputs, outputs)
input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)}
def allgather(*args):
assert len(args) == n
return [np.stack(args, axis=0) for _ in range(n)]
outputs = self.assertReferenceChecks(
hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)],
allgather, input_device_options)
for output in outputs:
np.testing.assert_array_equal(outputs[0], output)
self.assertEqual(outputs[0].tobytes(), output.tobytes())
@given(n=st.integers(min_value=2, max_value=workspace.NumCudaDevices()),
m=st.integers(min_value=1, max_value=1000))
def test_nccl_reduce_scatter(self, n, m):
xs = [np.random.randn(n, m).astype(np.float32) for i in range(n)]
inputs = [str("x_{}".format(i)) for i in range(n)]
outputs = [str("o_{}".format(i)) for i in range(n)]
op = core.CreateOperator("NCCLReduceScatter", inputs, outputs)
input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)}
def reduce_scatter(*args):
assert len(args) == n
reduced = sum(args)
assert len(reduced.shape) > 1
ref = [reduced[i, :] for i in range(n)]
return ref
self.assertReferenceChecks(
hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)],
reduce_scatter, input_device_options)
@given(n=st.integers(min_value=2, max_value=workspace.NumCudaDevices()),
m=st.integers(min_value=100000, max_value=100000),
iters=st.integers(min_value=1, max_value=100),
net_type=st.sampled_from(["dag", "async_dag", "simple"]))
def _test_nccl_sync(self, n, m, iters, net_type):
inputs = [str("x_{}".format(i)) for i in range(n)]
extra_inputs = [str("xe_{}".format(i)) for i in range(n)]
net = core.Net("asdf")
net.Proto().type = net_type
net.Proto().num_workers = n
for i in range(n):
net.ConstantFill([], inputs[i], shape=[m], value=0.0,
device_option=gpu_device(i))
net.ConstantFill([], extra_inputs[i], shape=[m], value=1.0,
device_option=gpu_device(i))
for _ in range(iters):
net.Sum([inputs[i], extra_inputs[i]], [inputs[i]],
device_option=gpu_device(i))
net.NCCLReduce(inputs, [inputs[0]], device_option=gpu_device(0))
self.ws.run(net)
np.testing.assert_array_equal(
self.ws.blobs[inputs[0]].fetch(),
np.full(shape=(m,), fill_value=iters * n, dtype=np.float32))
@unittest.skipIf(not os.environ.get("CAFFE2_BENCHMARK"), "Benchmark")
def test_timings(self):
for n in range(2, workspace.NumCudaDevices()):
for in_place in [False, True]:
xs = [np.random.randn(1e7).astype(np.float32)
for i in range(n)]
inputs = [str("x_{}".format(i)) for i in range(n)]
prefix = "" if in_place else "o"
outputs = [str("{}x_{}".format(prefix, i)) for i in range(n)]
net = core.Net("test")
net.NCCLAllreduce(inputs, outputs)
net.RunAllOnGPU()
for i in range(n):
self.ws.create_blob(inputs[i]).feed(xs[i], gpu_device(i))
self.ws.run(net)
net_time = benchmark(self.ws, net)
vanilla = core.Net("vanilla")
muji.Allreduce(vanilla, inputs)
vanilla_time = benchmark(self.ws, vanilla)
print("Speedup for NCCL: {:.2f}".format(
vanilla_time / net_time))
| 42.917098
| 79
| 0.602801
|
29beb8039763bb6b297904924ed5c2e46ee21d91
| 42,193
|
py
|
Python
|
MiTiSegmentor.py
|
AIEMMU/MiTSegmentor
|
190dccff3ebc9167c8bd1ea75786626f1e0efcb8
|
[
"MIT"
] | null | null | null |
MiTiSegmentor.py
|
AIEMMU/MiTSegmentor
|
190dccff3ebc9167c8bd1ea75786626f1e0efcb8
|
[
"MIT"
] | null | null | null |
MiTiSegmentor.py
|
AIEMMU/MiTSegmentor
|
190dccff3ebc9167c8bd1ea75786626f1e0efcb8
|
[
"MIT"
] | null | null | null |
from tkinter import filedialog
from tkinter import *
from PIL import Image, ImageTk
from tkinter.messagebox import showinfo
from skimage import measure, morphology
import math
import numpy as np
import cv2 as cv
import os
import open3d as o3d
import pandas as pd
import shutil
import time
# our files
from PopUpClasses import *
####### version info #######
# python 3.6 # tkinter # PIL # numpy = 1.16.2 # cv2 = 4.1.1 # os # open3d = 0.8.0.0 # random
class ScanOBJGenerator(Tk):
# initialisation
def __init__(self):
super().__init__()
self.threshold = 40
self.blobMinSizeVal = 50
self.downsampleFactor = 4
self.cellBase = 40
self.usedThres =(0,0)
self.imgTopSize = (0,0)
self.imgSideSize = (0,0)
self.imgFrontSize = (0,0)
self.maxSize =self.winfo_screenwidth()//3
self.gridSize = (0,0)
self.gridCenter = (0,0)
self.gridRotation = 0
self.viewThresholdVar = IntVar()
self.viewCellVar = IntVar()
self.layers = []
self.traySize = 50
self.trayCSV = []
self.init_GUI()
self.workingPath = ""
self.blobCenterOfMass = []
self.TL = 0
self.TR = 0
self.BL = 0
self.BR = 0
def init_GUI(self):
#main window title and size
self.title("MiTiSegmenter")
self.minsize(self.winfo_screenwidth(),self.winfo_screenheight())
self.imageStack = None
# tool bar
menubar = Menu(self)
self.config(menu=menubar)
fileMenu = Menu(menubar)
fileMenu.add_command(label="Load Images", command=self.loadImages)
fileMenu.add_command(label="Generate Point Cloud", command=self.makeAllPointCloud)
fileMenu.add_command(label="Generate Info File", command=self.generateInfoFile)
fileMenu.add_command(label="Generate Tiff Stacks", command=self.exportTiffStacks)
fileMenu.add_command(label="Export Trays", command=self.exportTrays)
menubar.add_cascade(label="File", menu=fileMenu)
editMenu = Menu(menubar)
editMenu.add_command(label="Flip Trays Horizontal", command=self.flipTrayHor)
editMenu.add_command(label="Flip Trays Vertical", command=self.flipTrayVer)
menubar.add_cascade(label="Edit", menu=editMenu)
# three views front, side, top
self.panalFront = None
self.panalSide = None
self.panalTop = None
self.frontBar = None
self.sideBar = None
self.topBar = None
# thresholding
self.thresholdBar = Scale(self, from_=0, to=255, orient=HORIZONTAL, label="Threshold Value", length=self.winfo_screenwidth()/3.6, sliderlength=self.winfo_screenheight()//100, command=self.adjustThreshold)
self.thresholdBar.grid(row=3,column=0,sticky = W)
self.thresholdBar.set(self.threshold)
self.viewThresholdCheck = Checkbutton(self,text="View Threshold Image", variable = self.viewThresholdVar, command=self.refreshImages)
self.viewThresholdCheck.grid(row=2,column=0,sticky = SE)
self.applyThresholdBtn = Button(self,text="Apply Threshold",command=self.applyThreshold)
self.applyThresholdBtn.grid(row=3,column=0,sticky = E)
# traying
self.listboxValues = Listbox(self)
self.listboxValues.grid(row=2, column = 2, rowspan=2, sticky = W)
self.applyTrayBtn = Button(self, text="Apply Traying",command=self.applyTray)
self.applyTrayBtn.grid(row=2,column=1,sticky = N)
self.removeTrayBtn = Button(self, text="Delete Tray",command=self.deleteTray)
self.removeTrayBtn.grid(row=2, column=1,sticky=W)
self.addTrayBtn = Button(self, text="Add Tray",command=self.addTray)
self.addTrayBtn.grid(row=2, column=1,sticky=E)
self.RotateGridBar = Scale(self, from_=0, to=360, orient=HORIZONTAL, label="Rotate Tray", length=self.winfo_screenwidth()/3, sliderlength=self.winfo_screenheight()//100, command=self.adjustGridRotation)
self.RotateGridBar.grid(row=3,column=1,sticky = NW)
self.RotateGridBar.set(self.gridRotation)
self.ScaleGridBarH = Scale(self, from_=0, to=360, orient=HORIZONTAL, label="Scale Tray Horizontal", length=self.winfo_screenwidth()/6, sliderlength=self.winfo_screenheight()//100, command=self.adjustGridSizeHor)
self.ScaleGridBarH.grid(row=4,column=1,sticky = NW)
self.ScaleGridBarH.set(self.gridSize[0])
self.ScaleGridBarV = Scale(self, from_=0, to=360, orient=HORIZONTAL, label="Scale Tray Vertical", length=self.winfo_screenwidth()/6, sliderlength=self.winfo_screenheight()//100, command=self.adjustGridSizeVert)
self.ScaleGridBarV.grid(row=4,column=1,sticky = NE)
self.ScaleGridBarV.set(self.gridSize[1])
self.GridMidX = Scale(self, from_=0,to=360, orient=HORIZONTAL, label="Grid Center X", length=self.winfo_screenwidth()/6, sliderlength=self.winfo_screenheight()//100, command=self.AdjustGridCentreX)
self.GridMidX.grid(row=5,column=1,sticky = NW)
self.GridMidX.set(self.gridSize[0])
self.GridMidY = Scale(self, from_=0,to=360, orient=HORIZONTAL, label="Grid Center Y", length=self.winfo_screenwidth()/6, sliderlength=self.winfo_screenheight()//100, command=self.AdjustGridCentreY)
self.GridMidY.grid(row=5,column=1,sticky = NE)
self.GridMidY.set(self.gridSize[1])
self.listbox = Listbox(self)
self.listbox.grid(row=2, column = 2, rowspan=2, sticky = E)
self.applyTrayBtn = Button(self, text="Load CSVs",command=self.loadCSV)
self.applyTrayBtn.grid(row=4,column=2,sticky = N)
# blobing
self.removeDensity = Button(self,text="Remove Blob Interior", command=self.removeblobDensity)
self.removeDensity.grid(row=4, column = 0, sticky = NW)
self.blobMinSize = Scale(self, from_=0, to=100, orient=HORIZONTAL, label="Min Blob Size", length=self.winfo_screenwidth()/3.6, sliderlength=self.winfo_screenheight()//100, command=self.minBlobSize)
self.blobMinSize.grid(row=5, column = 0, sticky = W)
self.blobMinSize.set(self.blobMinSizeVal)
self.blobImage = Button(self,text="Seperate the Blobs", command=self.blobDetection)
self.blobImage.grid(row=5, column = 0, sticky= E)
self.cellBar = Scale(self, from_=0, to=255, orient=HORIZONTAL, label="Cel-shade Base Value", length=self.winfo_screenwidth()/3.6, sliderlength=self.winfo_screenheight()//100, command=self.adjustCellBase)
self.cellBar.grid(row=2,column=0,sticky = NW)
self.cellBar.set(self.cellBase)
self.viewCellCheck = Checkbutton(self,text="View Cel Image", variable = self.viewCellVar, command=self.refreshImages)
self.viewCellCheck.grid(row=4, column = 0, sticky = NE)# row=2,column=0,sticky = SE
self.applyCellBtn = Button(self,text="Apply Cel-Shade",command=self.cellShade)
self.applyCellBtn.grid(row=2,column=0,sticky = E)
def flipTrayHor(self):
for i in range(len(self.trayCSV)):
self.trayCSV[i] = np.fliplr(self.trayCSV[i])
self.refreshImages()
def flipTrayVer(self):
for i in range(len(self.trayCSV)):
self.trayCSV[i] = np.flipud(self.trayCSV[i])
self.refreshImages()
def loadCSV(self):
#print("create checkbox that gets the path of each of the csv s, : cannot be in a file name")
if len(self.layers) == 0:
print("no layers created")
self.resTrayPopUp = GetTrayCSVs(self,self.layers)
self.wait_window(self.resTrayPopUp.top)
self.resTrayPopUp = self.resTrayPopUp.value
self.resTrayPopUp = self.resTrayPopUp.split("*")
for i in range(len(self.resTrayPopUp)):
#print(self.resTrayPopUp[i])
if self.resTrayPopUp[i] == ' ':
self.trayCSV.append(None)
elif self.resTrayPopUp[i] == '':
print("blankspace")
else:
tray = pd.read_csv(self.resTrayPopUp[i],header=None)
tray = np.array(tray.values)
self.trayCSV.append(tray)
self.refreshImages()
def deleteTray(self):
if self.listboxValues.size() > 0:
self.layers.pop(self.listboxValues.curselection()[0])
self.listboxValues.delete(self.listboxValues.curselection()[0])
self.refreshImages
def addTray(self):
self.listbox.insert(END,"tray part: " + "_" +str(self.topBar.get()))
def exportTrays(self):
items = self.listbox.get(0, END)
numOfOutputs = len(items)
# create the folders
lastOn = 0
for i in range(numOfOutputs):
if os.path.exists(self.workingPath+'/tray' + str(i)) == False:
os.mkdir(self.workingPath+'/tray' + str(i))
numberOfFrames = int(items[i].split('_')[1])
infoFile = open(self.workingPath+'/tray' + str(i) +'/' + "a_info.info","w")
infoFile.write("pixelsize " + str(self.pixelSizeX) + " " + str(self.pixelSizeY) +"\n")
infoFile.write("offset " + str(self.offsetX) + " " + str(self.offsetY) + "\n")
startLast = lastOn
for o in range(lastOn, numberOfFrames):
shutil.copyfile(self.workingPath+'/' + self.imagePaths[o],self.workingPath+'/tray' + str(i)+ '/' +self.imagePaths[o])
infoFile.write('"' + self.imagePaths[o] +'" ' + str(self.imagesHeightSlice[o]-self.imagesHeightSlice[startLast]) +"\n")
lastOn = o
infoFile.close()
def applyTray(self):
trayCount = 0
onTray = False
self.layers = []
self.listboxValues.delete(0,self.listboxValues.size())
maxValue = 0
layer = 0
loopRate = self.downsampleFactor
if loopRate == 0:
loopRate = 1
for i in range(0,self.imageStack.shape[0],loopRate):
temp = self.imageStack[i,:,:].astype('uint8')
#print(np.where(temp>0)[0].shape)
if np.where(temp>0)[0].shape[0] > self.blobMinSizeVal*10:
if onTray == False:
onTray = True
trayCount = trayCount + 1
if np.where(temp>0)[0].shape[0] > maxValue:
maxValue = np.where(temp>0)[0].shape[0]
layer = i
elif onTray == True:
self.layers.append(layer)
layer = 0
maxValue = 0
onTray = False
self.gridSize = []
temp = self.imageStack[0,:,:].astype('uint8')
for i in range(len(self.layers)):
self.gridSize.append(( ((temp.shape[0]//10)*9)//2, ((temp.shape[1]//10)*3)//2))
for i in range(len(self.layers)):
self.listboxValues.insert(END,"tray : "+ str(i+1) + "_" +str(self.layers[i]))
self.refreshImages()
def cellShade(self):
self.imageStack = self.imageStack-(self.imageStack%self.cellBase)
self.usedThres = (self.usedThres[0],1)
self.refreshImages()
def removeblobDensity(self):
for i in range(self.imageStack.shape[0]):
print("\rprocessing image : " + str(i) + " of " + str(self.imageStack.shape[0]),end=" ")
img = self.imageStack[i].astype('uint8')
img = cv.Canny(img,self.threshold,255)
img[img != 0] == 1
self.imageStack[i] = self.imageStack[i] * img
self.refreshImages()
def AdjustGridCentreY(self, val):
self.gridCenter = (self.gridCenter[0],int(val))
self.refreshImages()
def AdjustGridCentreX(self, val):
self.gridCenter = (int(val),self.gridCenter[1])
self.refreshImages()
def minBlobSize(self,val):
self.blobMinSizeVal = int(val)
def adjustCellBase(self,val):
self.cellBase = int(val)
self.refreshImages()
def adjustThreshold(self,val):
self.threshold = int(val)
self.refreshImages()
def adjustGridRotation(self,val):
self.gridRotation = int(val)
self.refreshImages()
def adjustGridSizeHor(self, val):
for i in range(len(self.layers)):
if self.layers[i] < self.topBar.get() + self.traySize and self.layers[i] > self.topBar.get() - self.traySize:
self.gridSize[i] = (int(val),self.gridSize[i][1])
#print(self.gridSize[i])
self.refreshImages()
def adjustGridSizeVert(self, val):
for i in range(len(self.layers)):
if self.layers[i] < self.topBar.get() + self.traySize and self.layers[i] > self.topBar.get() - self.traySize:
self.gridSize[i] = (self.gridSize[i][0],int(val))
self.refreshImages()
def applyThreshold(self):
if self.imageStack is None:
return
self.imageStack[self.imageStack <= self.threshold] = 0
self.viewThresholdVar.set(0)
self.usedThres = (1,self.usedThres[1])
self.refreshImages()
def refreshImages(self):
if self.imageStack is None:
return
self.frontSlider(self.frontBar.get())
self.sideSlider(self.sideBar.get())
self.topSlider(self.topBar.get())
def blobDetection(self):
if self.imageStack is None:
return
self.imageStack[self.imageStack >= self.threshold] = 255
self.imageStack = measure.label(self.imageStack)
self.imageStack = morphology.remove_small_objects(self.imageStack, min_size=self.blobMinSizeVal)
self.viewThresholdVar.set(0)
self.viewThresholdCheck.config(state="disabled")
self.refreshImages()
def organiseBlobs(self, unique):
numOn = 0
for i in range(unique.shape[0]):
self.imageStack[self.imageStack == unique[i]] == numOn
numOn = numOn + 1
def generate3DModel(self,img,path):
if img is None:
return
try:
verts, faces, normals, values = measure.marching_cubes_lewiner((img != 0), 0)#fit this into the model from open3d
faces=faces+1
verts = verts- (verts.min(axis=0)+verts.max(axis=0))//2
verts[:,0] = verts[:,0]* self.pixelSizeX # meters to microns
verts[:,1] = verts[:,1]* self.pixelSizeY
verts[:,2] = verts[:,2]* self.pixelSizeZ
thefile = open(os.path.expanduser('~')+'/meshFull.obj', 'w')
for item in verts:
thefile.write("v {0} {1} {2}\n".format(item[0],item[1],item[2]))
for item in normals:
thefile.write("vn {0} {1} {2}\n".format(item[0],item[1],item[2]))
for item in faces:
thefile.write("f {0}//{0} {1}//{1} {2}//{2}\n".format(item[0],item[1],item[2]))
thefile.close()
pcd_load = o3d.io.read_triangle_mesh(os.path.expanduser('~')+'/meshFull.obj')
o3d.io.write_triangle_mesh(path+'/'+"sync.ply", pcd_load)
os.remove(os.path.expanduser('~')+'/meshFull.obj')
#print("file written")
except:
print("file not working properly")
def makeAllPointCloud(self):
if self.imageStack is None:
return
verts, faces, normals, values = measure.marching_cubes_lewiner((self.imageStack != 0), 0)#fit this into the model from open3d
faces=faces+1
thefile = open(os.path.expanduser('~')+'/meshFull.obj', 'w')
for item in verts:
thefile.write("v {0} {1} {2}\n".format(item[0]/self.downsampleFactor,item[1],item[2]))
for item in normals:
thefile.write("vn {0} {1} {2}\n".format(item[0],item[1],item[2]))
for item in faces:
thefile.write("f {0}//{0} {1}//{1} {2}//{2}\n".format(item[0],item[1],item[2]))
thefile.close()
pcd_load = o3d.io.read_triangle_mesh(os.path.expanduser('~')+'/meshFull.obj')
#o3d.visualization.draw_geometries([pcd_load])
o3d.io.write_triangle_mesh(os.path.expanduser('~')+'/sync.ply', pcd_load)
os.remove(os.path.expanduser('~')+'/meshFull.obj')
def WriteStacks(self, i, blobName, bounds, imType):
dirName = "Raw"
if imType == 1: #processed
dirName = "Pro"
elif imType == 2: # segmentation
dirName = "Seg"
if os.path.isdir(self.workingPath + '/'+"blobstacks" + '/' + str(blobName) + '/' + dirName) == False:
os.mkdir(self.workingPath + '/'+"blobstacks"+ '/' + str(blobName) +'/'+dirName)
infoFile = open(self.workingPath + '/' + 'blobstacks'+'/' + str(blobName) +'/'+ dirName +'/' + "a_info.info","w")
infoFile.write("pixelsize " + str(self.pixelSizeX) + " " + str(self.pixelSizeY) +"\n")
infoFile.write("offset " + str(self.offsetX) + " " + str(self.offsetY) + "\n")
p = i
for o in range(bounds[i][0],bounds[i][1]+1):
print("check this +1 doenst break anything")
infoFile.write('"' + dirName + self.imagePaths[o] +'" ' + str(self.imagesHeightSlice[o]-self.imagesHeightSlice[bounds[i][0]]) +"\n")
img = cv.imread(self.workingPath + '/' + self.imagePaths[o],0).astype("uint8")[bounds[p][2]:bounds[p][3], bounds[p][4]:bounds[p][5]]
if imType == 1: #processed
img = self.ViewImagePreviews(img,1,1,False,self.downsampleFactor,self.threshold,self.cellBase)#self.processSingleTray(img)
elif imType == 2: # segmentation
img = self.ViewImagePreviews(img,1,1,False,self.downsampleFactor,self.threshold,self.cellBase)#img * (self.processSingleTray(img)//255)
img[img >= 1] = 255
cv.imwrite(self.workingPath + '/' + 'blobstacks'+'/'+ str(blobName) + '/' + dirName +'/' + dirName + self.imagePaths[o], img)
infoFile.close()
def exportTiffStacks(self):
start = time.perf_counter()
if self.imageStack is None:
return
self.resPopUp = GenerateTiffStackWindow(self)
self.wait_window(self.resPopUp.top)
self.resPopUp.value = self.resPopUp.value.split(';')
generateRaw = int(self.resPopUp.value[0])
generatePro = int(self.resPopUp.value[1])
generateMod = int(self.resPopUp.value[2])
generateSeg = int(self.resPopUp.value[3])
#nt("do the segmentation tiffs")
if os.path.isdir(self.workingPath + '/'+"blobstacks") == False:
os.mkdir(self.workingPath + '/'+"blobstacks")
unique = np.unique(self.imageStack)
bounds = []
blobCenters = []
gridCenters = []
gridNames = []
for i in range(unique.shape[0]):
print("\r Getting blobs : "+str(i),end=" ")
if unique[i] == 0: # background
continue
currentBlob = np.where(self.imageStack == unique[i])
Z = currentBlob[0].reshape((currentBlob[0].shape[0],1))
Y = currentBlob[1].reshape((currentBlob[1].shape[0],1))*self.downsampleFactor
X = currentBlob[2].reshape((currentBlob[2].shape[0],1))*self.downsampleFactor
bounds.append((np.amin(Z),np.amax(Z),np.amin(Y),np.amax(Y),np.amin(X),np.amax(X)))
blobCenters.append( ( (np.amin(Z)+np.amax(Z))//2, (np.amin(Y)+np.amax(Y))//2, (np.amin(X)+np.amax(X))//2 ))
if len(self.layers) > 0:
self.flipTrayVer()
for i in range(len(self.layers)):
topInterp = np.linspace((self.TL[0],self.TL[1]),(self.TR[0],self.TR[1]),num=self.trayCSV[i].shape[0]+1,endpoint=True,dtype=('int32'))
bottomInterp = np.linspace((self.BL[0],self.BL[1]),(self.BR[0],self.BR[1]),num=self.trayCSV[i].shape[0]+1,endpoint=True,dtype=('int32'))
for o in range(self.trayCSV[i].shape[0]):
#interpolate between the top and bottom downward looping to fill the gaps
cols1 = np.linspace(topInterp[o],bottomInterp[o],num=self.trayCSV[i].shape[1]+1,endpoint=True,dtype=('int32'))
cols2 = np.linspace(topInterp[o+1],bottomInterp[o+1],num=self.trayCSV[i].shape[1]+1,endpoint=True,dtype=('int32'))#0+2
for q in range(self.trayCSV[i].shape[1]):#cols1.shape[0]
X = (cols1[q][0] + cols2[q][0])//2
Y = (cols1[q][1] + cols2[q][1])//2
gridCenters.append([self.layers[i],Y,X])
gridNames.append(self.trayCSV[i][o][q])
# create a colleration between blobs and spread sheet
TrayToBlob = []
for i in range(len(blobCenters)):
dist = 999999999
refPoint = 0
# loop round and get the lowest distance
for o in range(len(gridCenters)):
distance = math.sqrt(
(blobCenters[i][0]-gridCenters[o][0])*(blobCenters[i][0]-gridCenters[o][0]) +
(blobCenters[i][1]-gridCenters[o][1])*(blobCenters[i][1]-gridCenters[o][1]) +
(blobCenters[i][2]-gridCenters[o][2])*(blobCenters[i][2]-gridCenters[o][2]))
if dist > distance:
dist = distance
refPoint = o
if refPoint in TrayToBlob:
indx = 1
gotName = True
while(gotName):
if gridNames[refPoint]+str(indx) in gridNames:
indx = indx+1
else:
gridNames.append(gridNames[refPoint]+str(indx))
refPoint = len(gridNames)-1
gotName = False
TrayToBlob.append(refPoint)
# cycle through and create directories
self.flipTrayVer()
for i in range(len(bounds)): # was grid names
if len(self.layers) > 0:
blobName = gridNames[i]
else:
blobName = 'blob'+ str(i)
print("\r making Directories : "+str(i),end=" ")
if os.path.isdir(self.workingPath + '/'+"blobstacks" + '/' + str(blobName) ) == False:
os.mkdir(self.workingPath + '/'+"blobstacks"+ '/' + str(blobName))
if generateRaw == 1:
self.WriteStacks(i, blobName, bounds, 0)
if generatePro == 1:
self.WriteStacks(i, blobName, bounds, 1)
if generateSeg == 1:
self.WriteStacks(i, blobName, bounds, 2)
if generateMod == 1:
blobs = os.listdir(self.workingPath + '/' + 'blobstacks')
for i in range(len(blobs)):
folders = os.listdir(self.workingPath + '/' + 'blobstacks' + '/' + blobs[i])
for o in range(len(folders)):
# folder containing the tiff stacks
stk = self.LoadImageStack(self.workingPath + '/' + 'blobstacks' + '/' + blobs[i]+ '/'+folders[o])
self.generate3DModel(stk,self.workingPath + '/' + 'blobstacks' + '/' + blobs[i]+ '/'+folders[o])
end = time.perf_counter()
print(end - start)
def ViewImagePreviews(self,img, viewThres, viewCell, downSample, downFactor, thres, cell):
#if downSample:
#img = np.delete(img,list(range(0,img.shape[0],downFactor)),axis=0)
#print("no downsample anymore")
if viewCell == 1:
img = img-(img%cell)
if viewThres == 1:
img[img <= thres] = 0
return img
def frontSlider(self,val):
# right image
temp = self.imageStack[:,:,int(val)-1].astype('uint8')
temp = cv.cvtColor(temp,cv.COLOR_GRAY2RGB)
for i in range(len(self.layers)):
temp = cv.line(temp,pt1=(0,self.layers[i]),pt2=(temp.shape[1],self.layers[i]),color=(0,0,255),thickness=3)
temp = self.ViewImagePreviews(temp,self.viewThresholdVar.get(),self.viewCellVar.get(),True,self.downsampleFactor,self.threshold,self.cellBase)
temp = cv.resize(temp,self.imgFrontSize)
temp = Image.fromarray(temp)
self.imgFront = ImageTk.PhotoImage(image=temp)
self.panalFront.configure(image=self.imgFront)
self.panalFront.image = self.imgFront
def sideSlider(self,val):
temp = self.imageStack[:,int(val)-1,:].astype('uint8')
temp = self.ViewImagePreviews(temp,self.viewThresholdVar.get(),self.viewCellVar.get(),True,self.downsampleFactor,self.threshold,self.cellBase)
temp = cv.resize(temp,self.imgSideSize)
temp = Image.fromarray(temp)
self.imgSide = ImageTk.PhotoImage(image=temp)
self.panalSide.configure(image=self.imgSide)
self.panalSide.image = self.imgSide
def rotate(self, origin, point, angle):
"""
Rotate a point counterclockwise by a given angle around a given origin.
"""
angle = math.radians(angle)
ox, oy = origin
px, py = point
qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
return int(qx),int(qy)
def putGridOnImage(self,temp, val):
for i in range(len(self.layers)):
if self.layers[i] < int(val) + self.traySize and self.layers[i] > int(val) - self.traySize:
self.ScaleGridBarV.set(self.gridSize[i][1])
self.ScaleGridBarH.set(self.gridSize[i][0])
halfTemp = (self.gridCenter[0],self.gridCenter[1])
self.TL = (halfTemp[0]-self.gridSize[i][0],halfTemp[1]-self.gridSize[i][1])
self.TR = (halfTemp[0]+self.gridSize[i][0],halfTemp[1]-self.gridSize[i][1])
self.BL = (halfTemp[0]-self.gridSize[i][0],halfTemp[1]+self.gridSize[i][1])
self.BR = (halfTemp[0]+self.gridSize[i][0],halfTemp[1]+self.gridSize[i][1])
self.TL = self.rotate(halfTemp,self.TL,self.gridRotation)
self.TR = self.rotate(halfTemp,self.TR,self.gridRotation)
self.BL = self.rotate(halfTemp,self.BL,self.gridRotation)
self.BR = self.rotate(halfTemp,self.BR,self.gridRotation)
#print(self.trayCSV[i])
temp = cv.putText(temp,self.trayCSV[i][0][0],self.TL,cv.FONT_HERSHEY_SIMPLEX,1,(0,255,0),2)
temp = cv.putText(temp,self.trayCSV[i][self.trayCSV[i].shape[0]-1][0],self.BL,cv.FONT_HERSHEY_SIMPLEX,1,(0,255,0),2)
temp = cv.putText(temp,self.trayCSV[i][0][self.trayCSV[i].shape[1]-1],self.TR,cv.FONT_HERSHEY_SIMPLEX,1,(0,255,0),2)
temp = cv.putText(temp,self.trayCSV[i][self.trayCSV[i].shape[0]-1][self.trayCSV[i].shape[1]-1],self.BR,cv.FONT_HERSHEY_SIMPLEX,1,(0,255,0),2)
rowsY = np.linspace((self.TL[0],self.TL[1],self.TR[0],self.TR[1]),(self.BL[0],self.BL[1],self.BR[0],self.BR[1]), num=self.trayCSV[i].shape[0]+1, endpoint=True,dtype=('int32'))
rowsX = np.linspace((self.TL[0],self.TL[1],self.BL[0],self.BL[1]),(self.TR[0],self.TR[1],self.BR[0],self.BR[1]), num=self.trayCSV[i].shape[1]+1, endpoint=True,dtype=('int32'))
for o in range(self.trayCSV[i].shape[0]+ 1): # creates the rows + 2 as we need the number of blocks
pnt1 = (rowsY[o][0],rowsY[o][1])
pnt2 = (rowsY[o][2],rowsY[o][3])
temp = cv.line(temp,pt1=pnt1,pt2=pnt2,color=(0,255,0),thickness=1)
for o in range(self.trayCSV[i].shape[1]+1):
pnt1 = (rowsX[o][0],rowsX[o][1])
pnt2 = (rowsX[o][2],rowsX[o][3])
temp = cv.line(temp,pt1=pnt1,pt2=pnt2,color=(0,255,0),thickness=3)
# get the accrow values for the top row and bottom
topInterp = np.linspace((self.TL[0],self.TL[1]),(self.TR[0],self.TR[1]),num=self.trayCSV[i].shape[1]+1,endpoint=True,dtype=('int32'))
bottomInterp = np.linspace((self.BL[0],self.BL[1]),(self.BR[0],self.BR[1]),num=self.trayCSV[i].shape[1]+1,endpoint=True,dtype=('int32'))
for o in range(topInterp.shape[0]):# down
#interpolate between the top and bottom downward looping to fill the gaps
cols = np.linspace(topInterp[o],bottomInterp[o],num=self.trayCSV[i].shape[0]+1,endpoint=True,dtype=('int32')) #inter top i and bottom i by the shape
for q in range(cols.shape[0]):
# draw circle at cols
temp = cv.circle(temp,(cols[q][0],cols[q][1]),2,(255,0,0))
return temp
def topSlider(self,val):
# left image
temp = self.imageStack[int(val)-1,:,:].astype('uint8')
temp = cv.cvtColor(temp,cv.COLOR_GRAY2RGB)
temp = self.ViewImagePreviews(temp,self.viewThresholdVar.get(),self.viewCellVar.get(),False,self.downsampleFactor,self.threshold,self.cellBase)
temp = self.putGridOnImage(temp,val)
temp = cv.resize(temp,self.imgTopSize)
temp = Image.fromarray(temp)
self.imgTop = ImageTk.PhotoImage(image=temp)
self.panalTop.configure(image=self.imgTop)
self.panalTop.image = self.imgTop
def image_resize(self,image, width = None, height = None, inter = cv.INTER_AREA):
# by thewaywewere https://stackoverflow.com/questions/44650888/resize-an-image-without-distortion-opencv accessed 11/11/19
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv.resize(image, dim, interpolation = inter)
# return the resized image
return resized
def generateInfoFile(self):
self.resPopUp = InfoWindow(self)
self.wait_window(self.resPopUp.top)
#print(self.resPopUp.value)
resolution = self.resPopUp.value.split(";")
if len(resolution) < 3:
print("do error")
return
path = filedialog.askdirectory(title = "select image dir")
paths = os.listdir(path)
infoFile = open(path+"/a_info.info","w")
infoFile.write("pixelsize " + resolution[0] + " " + resolution[1]+"\n")
infoFile.write("offset 0 0\n")
for i in range(len(paths)):
infoFile.write('"' + paths[i]+'" ' + str(float(resolution[2])*i) +"\n")
infoFile.close()
def LoadImageStack(self, path):
imgstk = None
paths = os.listdir(path)
infoFile = ""
if len(paths) < 1:
showinfo("File Directory empty",path+ " : contains no files!")
return imgstk
for i in range(len(paths)):
if paths[i].endswith(".info"):
infoFile = paths[i]
break
if infoFile == "":
showinfo("No Scanner info file", path + " : contains no .info file from the scanner!")
return imgstk
info = open(path+'/'+infoFile,'r')
info = info.readlines()
imagePaths = []
imagesHeightSlice = []
pixelSizeX = 0
pixelSizeY = 0
pixelSizeZ = 0
offsetX = 0
offsetY = 0
for i in range(len(info)):
temp = info.pop(0)
if temp.startswith('p'):
temp = temp.split(" ")
pixelSizeX = float(temp[1])
pixelSizeY = float(temp[2])
elif temp.startswith('o'):
temp = temp.split(" ")
offsetX = float(temp[1])
offsetY = float(temp[2])
elif temp.startswith('"'):
temp = temp.replace('"','')
temp = temp.split(" ")
imagePaths.append(temp[0])
imagesHeightSlice.append(float(temp[1]))
imgstk = None
if os.path.exists(path + '/' + imagePaths[0]):
temp = cv.imread(path + '/' + imagePaths[0],0).astype("uint8")
imgstk = np.zeros((len(imagePaths),temp.shape[0],temp.shape[1])).astype("uint8")
for i in range(len(imagePaths)):
#print(path + '/' + imagePaths[i])
imgstk[i] = cv.imread(path + '/' + imagePaths[i],0).astype("uint8")
else:
imgstk = np.zeros((10,10,10))
print("this is an error")
pixelSizeZ = imagesHeightSlice[1]
return imgstk
def loadImages(self):
path = filedialog.askdirectory()
if path == "":
# file loading canceled
return
print("loading images")
self.imageStack = None
paths = os.listdir(path)
self.workingPath = path
infoFile = ""
if len(paths) < 1:
showinfo("File Directory empty",path+ " : contains no files!")
return
for i in range(len(paths)):
if paths[i].endswith(".info"):
infoFile = paths[i]
break
if infoFile == "":
showinfo("No Scanner info file", path + " : contains no .info file from the scanner!")
return
info = open(path+'/'+infoFile,'r')
info = info.readlines()
self.imagePaths = []
self.imagesHeightSlice = []
self.pixelSizeX = 0
self.pixelSizeY = 0
self.pixelSizeZ = 0
self.offsetX = 0
self.offsetY = 0
print("this code is a duplicat as load image stack, calle the load stack be change to add if downsampling")
for i in range(len(info)):
temp = info.pop(0)
if temp.startswith('p'):
temp = temp.split(" ")
self.pixelSizeX = float(temp[1])
self.pixelSizeY = float(temp[2])
elif temp.startswith('o'):
temp = temp.split(" ")
self.offsetX = float(temp[1])
self.offsetY = float(temp[2])
elif temp.startswith('"'):
temp = temp.replace('"','')
temp = temp.split(" ")
self.imagePaths.append(temp[0])
self.imagesHeightSlice.append(float(temp[1]))
self.pixelSizeZ = self.imagesHeightSlice[1]
temp = cv.imread(path + '/' + self.imagePaths[0],0).astype("uint8")
self.imageStack = np.zeros((len(self.imagePaths),temp.shape[0]//self.downsampleFactor,temp.shape[1]//self.downsampleFactor)).astype("uint8")
for i in range(len(self.imagePaths)):
print("\rprocessing image : " + str(i) + " of " + str(len(self.imagePaths)),end=" ")
self.imageStack[i] = cv.resize(cv.imread(path + '/' + self.imagePaths[i],0).astype("uint8"),(temp.shape[1]//self.downsampleFactor,temp.shape[0]//self.downsampleFactor))
# get the bottom image (default in the scan)
self.imgTop = self.imageStack[0,:,:].astype('uint8')
self.imgTopSize = self.imgTop.shape
self.gridSize = ( ((self.imgTop.shape[0]//10)*9)//2, ((self.imgTop.shape[1]//10)*3)//2)
self.ScaleGridBarH = Scale(self, from_=0, to=(self.imgTop.shape[0]//2), orient=HORIZONTAL, label="Scale Tray Horizontal", length=self.winfo_screenwidth()/6, sliderlength=self.winfo_screenheight()//100, command=self.adjustGridSizeHor)
self.ScaleGridBarH.grid(row=4,column=1,sticky = NW)
self.ScaleGridBarH.set(self.gridSize[0])
self.ScaleGridBarV = Scale(self, from_=0, to=(self.imgTop.shape[0]//2), orient=HORIZONTAL, label="Scale Tray Vertical", length=self.winfo_screenwidth()/6, sliderlength=self.winfo_screenheight()//100, command=self.adjustGridSizeVert)
self.ScaleGridBarV.grid(row=4,column=1,sticky = NE)
self.ScaleGridBarV.set(self.gridSize[1])
self.GridMidX = Scale(self, from_=0,to=(self.imgTop.shape[0]), orient=HORIZONTAL, label="Grid center X", length=self.winfo_screenwidth()/6, sliderlength=self.winfo_screenheight()//100, command=self.AdjustGridCentreX)
self.GridMidX.grid(row=5,column=1,sticky = NW)
self.GridMidX.set(self.imgTop.shape[0]//2)
self.GridMidY = Scale(self, from_=0,to=(self.imgTop.shape[1]), orient=HORIZONTAL, label="Grid center Y", length=self.winfo_screenwidth()/6, sliderlength=self.winfo_screenheight()//100, command=self.AdjustGridCentreY)
self.GridMidY.grid(row=5,column=1,sticky = NE)
self.GridMidY.set(self.imgTop.shape[1]//2)
self.gridCenter = (self.imgTop.shape[0]//2,self.imgTop.shape[1]//2)
if self.imgTop.shape[0] > self.imgTop.shape[1] :
if self.imgTop.shape[0] > self.maxSize:
size = self.imgTop.shape[1]/(self.imgTop.shape[0]/self.maxSize)
self.imgTopSize = (int(size),self.maxSize)
else:
if self.imgTop.shape[1] > self.maxSize:
size = self.imgTop.shape[0]/(self.imgTop.shape[1]/self.maxSize)
self.imgTopSize = (self.maxSize,int(size))
self.imgTop = cv.resize(self.imgTop,self.imgTopSize)
self.imgTop = cv.cvtColor(self.imgTop,cv.COLOR_GRAY2RGB)
self.imgTop = Image.fromarray(self.imgTop)
self.imgTop = ImageTk.PhotoImage(image=self.imgTop)
self.panalTop = Label(self,image=self.imgTop)
self.panalTop.grid(row=0,column=0)
self.imgSide = self.imageStack[:,0,:].astype('uint8')
#if self.downsampleFactor > 1:
#self.imgSide = np.delete(self.imgSide,list(range(0,self.imgSide.shape[0],self.downsampleFactor)),axis=0)
self.imgSideSize = self.imgSide.shape
if self.imgSideSize[0] > self.imgSideSize[1]:
self.imgSide = self.image_resize(self.imgSide,height=self.maxSize)
else:
self.imgSide = self.image_resize(self.imgSide,width=self.maxSize)
self.imgSideSize = self.imgSide.shape
self.imgSide = cv.cvtColor(self.imgSide,cv.COLOR_GRAY2RGB)
self.imgSide = Image.fromarray(self.imgSide)
self.imgSide = ImageTk.PhotoImage(image=self.imgSide)
self.panalSide = Label(self,image=self.imgSide)
self.panalSide.grid(row=0,column=1)
self.imgFront = self.imageStack[:,:,0].astype('uint8')
#if self.downsampleFactor > 1:
# self.imgFront = np.delete(self.imgFront,list(range(0,self.imgFront.shape[0],self.downsampleFactor)),axis=0)
self.imgFrontSize = self.imgFront.shape
if self.imgFrontSize[0] > self.imgFrontSize[1]:
self.imgFront = self.image_resize(self.imgFront,height=self.maxSize)
else:
self.imgFront = self.image_resize(self.imgFront,width=self.maxSize)
self.imgFrontSize = self.imgFront.shape
self.imgFront = cv.cvtColor(self.imgFront,cv.COLOR_GRAY2RGB)
self.imgFront = Image.fromarray(self.imgFront)
self.imgFront = ImageTk.PhotoImage(image=self.imgFront)
self.panalFront = Label(self,image=self.imgFront)
self.panalFront.grid(row=0,column=2)
# bars for showing scale
self.frontBar = Scale(self, from_=1, to=self.imageStack.shape[2], orient=HORIZONTAL, length=self.winfo_screenwidth()/3, sliderlength=self.winfo_screenheight()//100, command=self.frontSlider)
self.frontBar.grid(row=1,column=2)
self.frontBar.set(self.imageStack.shape[2]//2)
self.sideBar = Scale(self, from_=1, to=self.imageStack.shape[1], orient=HORIZONTAL, length=self.winfo_screenwidth()/3, sliderlength=self.winfo_screenheight()//100, command=self.sideSlider)
self.sideBar.grid(row=1,column=1)
self.sideBar.set(self.imageStack.shape[1]//2)
self.topBar = Scale(self, from_=1, to=self.imageStack.shape[0], orient=HORIZONTAL, length=self.winfo_screenwidth()/3, sliderlength=self.winfo_screenheight()//100, command=self.topSlider)
self.topBar.grid(row=1,column=0)
self.topBar.set(self.imageStack.shape[0]//2)
root = ScanOBJGenerator()
root.mainloop()
| 50.110451
| 242
| 0.56981
|
09351ba17a423bc5408a207e2dde27d57b6edbd9
| 104
|
py
|
Python
|
enthought/mayavi/components/custom_grid_plane.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/mayavi/components/custom_grid_plane.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/mayavi/components/custom_grid_plane.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from __future__ import absolute_import
from mayavi.components.custom_grid_plane import *
| 26
| 49
| 0.855769
|
8d64f6682732e42b076484abf103dfb02262c359
| 10,542
|
py
|
Python
|
txweb2/test/test_metafd.py
|
backwardn/ccs-calendarserver
|
13c706b985fb728b9aab42dc0fef85aae21921c3
|
[
"Apache-2.0"
] | 462
|
2016-08-14T17:43:24.000Z
|
2022-03-17T07:38:16.000Z
|
txweb2/test/test_metafd.py
|
backwardn/ccs-calendarserver
|
13c706b985fb728b9aab42dc0fef85aae21921c3
|
[
"Apache-2.0"
] | 72
|
2016-09-01T23:19:35.000Z
|
2020-02-05T02:09:26.000Z
|
txweb2/test/test_metafd.py
|
backwardn/ccs-calendarserver
|
13c706b985fb728b9aab42dc0fef85aae21921c3
|
[
"Apache-2.0"
] | 171
|
2016-08-16T03:50:30.000Z
|
2022-03-26T11:49:55.000Z
|
##
# Copyright (c) 2011-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Tests for txweb2.metafd.
"""
from socket import error as SocketError, AF_INET
from errno import ENOTCONN
from twext.internet import sendfdport
from txweb2 import metafd
from txweb2.channel.http import HTTPChannel
from txweb2.metafd import ReportingHTTPService, ConnectionLimiter
from twisted.internet.tcp import Server
from twisted.application.service import Service
from twext.internet.test.test_sendfdport import ReaderAdder
from txweb2.metafd import WorkerStatus
from twisted.trial.unittest import TestCase
class FakeSocket(object):
"""
A fake socket for testing.
"""
def __init__(self, test):
self.test = test
def fileno(self):
return "not a socket"
def setblocking(self, blocking):
return
def getpeername(self):
if self.test.peerNameSucceed:
return ("4.3.2.1", 4321)
else:
raise SocketError(ENOTCONN, "Transport endpoint not connected")
def getsockname(self):
return ("4.3.2.1", 4321)
class InheritedPortForTesting(sendfdport.InheritedPort):
"""
L{sendfdport.InheritedPort} subclass that prevents certain I/O operations
for better unit testing.
"""
def startReading(self):
"Do nothing."
def stopReading(self):
"Do nothing."
def startWriting(self):
"Do nothing."
def stopWriting(self):
"Do nothing."
class ServerTransportForTesting(Server):
"""
tcp.Server replacement for testing purposes.
"""
def startReading(self):
"Do nothing."
def stopReading(self):
"Do nothing."
def startWriting(self):
"Do nothing."
def stopWriting(self):
"Do nothing."
def __init__(self, *a, **kw):
super(ServerTransportForTesting, self).__init__(*a, **kw)
self.reactor = None
class ReportingHTTPServiceTests(TestCase):
"""
Tests for L{ReportingHTTPService}
"""
peerNameSucceed = True
def setUp(self):
def fakefromfd(fd, addressFamily, socketType):
return FakeSocket(self)
def fakerecvfd(fd):
return "not an fd", "not a description"
def fakeclose(fd):
""
def fakegetsockfam(fd):
return AF_INET
self.patch(sendfdport, 'recvfd', fakerecvfd)
self.patch(sendfdport, 'fromfd', fakefromfd)
self.patch(sendfdport, 'close', fakeclose)
self.patch(sendfdport, 'getsockfam', fakegetsockfam)
self.patch(metafd, 'InheritedPort', InheritedPortForTesting)
self.patch(metafd, 'Server', ServerTransportForTesting)
# This last stubbed out just to prevent dirty reactor warnings.
self.patch(HTTPChannel, "callLater", lambda *a, **k: None)
self.svc = ReportingHTTPService(None, None, None)
self.svc.startService()
def test_quickClosedSocket(self):
"""
If a socket is closed very quickly after being {accept()}ed, requesting
its peer (or even host) address may fail with C{ENOTCONN}. If this
happens, its transport should be supplied with a dummy peer address.
"""
self.peerNameSucceed = False
self.svc.reportingFactory.inheritedPort.doRead()
channels = self.svc.reportingFactory.connectedChannels
self.assertEqual(len(channels), 1)
self.assertEqual(list(channels)[0].transport.getPeer().host, "0.0.0.0")
class ConnectionLimiterTests(TestCase):
"""
Tests for L{ConnectionLimiter}
"""
def test_loadReducedStartsReadingAgain(self):
"""
L{ConnectionLimiter.statusesChanged} determines whether the current
"load" of all subprocesses - that is, the total outstanding request
count - is high enough that the listening ports attached to it should
be suspended.
"""
builder = LimiterBuilder(self)
builder.fillUp()
self.assertEquals(builder.port.reading, False) # sanity check
self.assertEquals(builder.highestLoad(), builder.requestsPerSocket)
builder.loadDown()
self.assertEquals(builder.port.reading, True)
def test_processRestartedStartsReadingAgain(self):
"""
L{ConnectionLimiter.statusesChanged} determines whether the current
number of outstanding requests is above the limit, and either stops or
resumes reading on the listening port.
"""
builder = LimiterBuilder(self)
builder.fillUp()
self.assertEquals(builder.port.reading, False)
self.assertEquals(builder.highestLoad(), builder.requestsPerSocket)
builder.processRestart()
self.assertEquals(builder.port.reading, True)
def test_unevenLoadDistribution(self):
"""
Subprocess sockets should be selected for subsequent socket sends by
ascending status. Status should sum sent and successfully subsumed
sockets.
"""
builder = LimiterBuilder(self)
# Give one simulated worker a higher acknowledged load than the other.
builder.fillUp(True, 1)
# There should still be plenty of spare capacity.
self.assertEquals(builder.port.reading, True)
# Then slam it with a bunch of incoming requests.
builder.fillUp(False, builder.limiter.maxRequests - 1)
# Now capacity is full.
self.assertEquals(builder.port.reading, False)
# And everyone should have an even amount of work.
self.assertEquals(builder.highestLoad(), builder.requestsPerSocket)
def test_processStopsReadingEvenWhenConnectionsAreNotAcknowledged(self):
"""
L{ConnectionLimiter.statusesChanged} determines whether the current
number of outstanding requests is above the limit.
"""
builder = LimiterBuilder(self)
builder.fillUp(acknowledged=False)
self.assertEquals(builder.highestLoad(), builder.requestsPerSocket)
self.assertEquals(builder.port.reading, False)
builder.processRestart()
self.assertEquals(builder.port.reading, True)
def test_workerStatusRepr(self):
"""
L{WorkerStatus.__repr__} will show all the values associated with the
status of the worker.
"""
self.assertEquals(repr(WorkerStatus(1, 2, 3, 4, 5, 6, 7, 8)),
"<WorkerStatus acknowledged=1 unacknowledged=2 total=3 "
"started=4 abandoned=5 unclosed=6 starting=7 stopped=8>")
def test_workerStatusNonNegative(self):
"""
L{WorkerStatus.__repr__} will show all the values associated with the
status of the worker.
"""
w = WorkerStatus()
w.adjust(
acknowledged=1,
unacknowledged=-1,
total=1,
)
self.assertEquals(w.acknowledged, 1)
self.assertEquals(w.unacknowledged, 0)
self.assertEquals(w.total, 1)
class LimiterBuilder(object):
"""
A L{LimiterBuilder} can build a L{ConnectionLimiter} and associated objects
for a given unit test.
"""
def __init__(self, test, requestsPerSocket=3, socketCount=2):
# Similar to MaxRequests in the configuration.
self.requestsPerSocket = requestsPerSocket
# Similar to ProcessCount in the configuration.
self.socketCount = socketCount
self.limiter = ConnectionLimiter(
2, maxRequests=requestsPerSocket * socketCount
)
self.dispatcher = self.limiter.dispatcher
self.dispatcher.reactor = ReaderAdder()
self.service = Service()
self.limiter.addPortService("TCP", 4321, "127.0.0.1", 5,
self.serverServiceMakerMaker(self.service))
for ignored in xrange(socketCount):
subskt = self.dispatcher.addSocket()
subskt.start()
subskt.restarted()
# Has to be running in order to add stuff.
self.limiter.startService()
self.port = self.service.myPort
def highestLoad(self):
return max(
skt.status.effective()
for skt in self.limiter.dispatcher._subprocessSockets
)
def serverServiceMakerMaker(self, s):
"""
Make a serverServiceMaker for use with
L{ConnectionLimiter.addPortService}.
"""
class NotAPort(object):
def startReading(self):
self.reading = True
def stopReading(self):
self.reading = False
def serverServiceMaker(port, factory, *a, **k):
s.factory = factory
s.myPort = NotAPort()
# TODO: technically, the following should wait for startService
s.myPort.startReading()
factory.myServer = s
return s
return serverServiceMaker
def fillUp(self, acknowledged=True, count=0):
"""
Fill up all the slots on the connection limiter.
@param acknowledged: Should the virtual connections created by this
method send a message back to the dispatcher indicating that the
subprocess has acknowledged receipt of the file descriptor?
@param count: Amount of load to add; default to the maximum that the
limiter.
"""
for _ignore_x in range(count or self.limiter.maxRequests):
self.dispatcher.sendFileDescriptor(None, "SSL")
if acknowledged:
self.dispatcher.statusMessage(
self.dispatcher._subprocessSockets[0], "+"
)
def processRestart(self):
self.dispatcher._subprocessSockets[0].stop()
self.dispatcher._subprocessSockets[0].start()
self.dispatcher.statusMessage(
self.dispatcher._subprocessSockets[0], "0"
)
def loadDown(self):
self.dispatcher.statusMessage(
self.dispatcher._subprocessSockets[0], "-"
)
| 33.466667
| 83
| 0.648928
|
80b8425866ebca833c4632d39045331f62817786
| 649
|
py
|
Python
|
run.py
|
sujianqingfeng/scrapy_xiuren
|
a05d8fe05777aca26667c5b40573ef0431bcff06
|
[
"Apache-2.0"
] | 49
|
2018-04-18T03:48:48.000Z
|
2021-12-21T03:14:07.000Z
|
run.py
|
sujianqingfeng/xiuren
|
a05d8fe05777aca26667c5b40573ef0431bcff06
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
sujianqingfeng/xiuren
|
a05d8fe05777aca26667c5b40573ef0431bcff06
|
[
"Apache-2.0"
] | 21
|
2018-05-10T03:35:20.000Z
|
2021-08-15T18:19:54.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: su jian
@contact: 121116111@qq.com
@file: run.py
@time: 2017/8/16 17:58
"""
from scrapy.cmdline import execute
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# execute(['scrapy', 'crawl', 'xiuren_spider'])
# execute(['scrapy', 'crawl', 'meizi_spider'])
execute('scrapy crawl jcenter_spider'.split(' '))
# def func():
# sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# execute(['scrapy', 'crawl', 'xiuren_spider'])
#
#
# if __name__ == '__main__':
# func()
# import re
# print(re.findall(r'(\d+)', 'http://www.mzitu.com/57176')[0])
| 20.935484
| 65
| 0.659476
|
ac0fbe4f28ab43ea782ea12890e00700d24c9c01
| 3,761
|
py
|
Python
|
paper/experiments/01_uci_benchmark/01_uci_benchmark_lightgbm.py
|
ii-research-yu/pgbm
|
d050a5f71f1a458d8269c4f5201744c0d7c4d487
|
[
"Apache-2.0"
] | 79
|
2021-06-02T06:59:08.000Z
|
2022-03-10T20:27:02.000Z
|
paper/experiments/01_uci_benchmark/01_uci_benchmark_lightgbm.py
|
ii-research-yu/pgbm
|
d050a5f71f1a458d8269c4f5201744c0d7c4d487
|
[
"Apache-2.0"
] | 11
|
2021-06-07T17:39:34.000Z
|
2022-01-10T14:00:21.000Z
|
paper/experiments/01_uci_benchmark/01_uci_benchmark_lightgbm.py
|
ii-research-yu/pgbm
|
d050a5f71f1a458d8269c4f5201744c0d7c4d487
|
[
"Apache-2.0"
] | 10
|
2021-06-08T01:04:03.000Z
|
2022-03-31T23:41:23.000Z
|
"""
Copyright (c) 2021 Olivier Sprangers as part of Airlab Amsterdam
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
https://github.com/elephaint/pgbm/blob/main/LICENSE
"""
#%% Import packages
import numpy as np
import time
import lightgbm as lgb
from sklearn.model_selection import train_test_split
import pandas as pd
from datasets import get_dataset, get_fold
#%% Objective
def rmseloss_metric(yhat, y):
loss = np.sqrt(np.mean((yhat - y)**2))
return loss
#%% Generic Parameters
params = {'min_split_gain':0,
'min_data_in_leaf':1,
'max_depth':-1,
'max_bin':64,
'learning_rate':0.1,
'n_estimators':2000,
'verbose':2,
'feature_fraction':1,
'bagging_fraction':1,
'seed':1}
#%% LightGBM specific
method = 'lightgbm'
params['lambda'] = 1
params['device'] = 'cpu'
params['objective'] = 'regression'
params['metric'] = 'rmse'
params['num_leaves'] = 8
params['bagging_freq'] = 1
params['min_data_in_bin'] = 1
#%% Loop
datasets = ['boston', 'concrete', 'energy', 'kin8nm', 'msd', 'naval', 'power', 'protein', 'wine', 'yacht', 'higgs']
base_estimators = 2000
df = pd.DataFrame(columns=['method', 'dataset','fold','device','validation_estimators','test_estimators','rmse_test','crps_test','validation_time'])
for i, dataset in enumerate(datasets):
if dataset == 'msd' or dataset == 'higgs':
params['bagging_fraction'] = 0.1
n_folds = 1
else:
params['bagging_fraction'] = 1
n_folds = 20
data = get_dataset(dataset)
for fold in range(n_folds):
print(f'{dataset}: fold {fold + 1}/{n_folds}')
# Get data
X_train, X_test, y_train, y_test = get_fold(dataset, data, fold)
X_train_val, X_val, y_train_val, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=fold)
# Build torchdata datasets
train_data = lgb.Dataset(X_train, y_train)
train_val_data = lgb.Dataset(X_train_val, y_train_val)
valid_data = lgb.Dataset(X_val, y_val)
test_data = lgb.Dataset(X_test, y_test)
params['n_estimators'] = base_estimators
# Train to retrieve best iteration
print('Validating...')
start = time.perf_counter()
model = lgb.train(params, train_val_data, valid_sets=valid_data, early_stopping_rounds=2000)
end = time.perf_counter()
validation_time = end - start
print(f'Fold time: {validation_time:.2f}s')
# Set iterations to best iteration
params['n_estimators'] = model.best_iteration + 1
# Retrain on full set
print('Training...')
model = lgb.train(params, train_data)
#% Predictions
print('Prediction...')
yhat_point = model.predict(X_test)
# Scoring
rmse = rmseloss_metric(yhat_point, y_test)
crps = 0
# Save data
df = df.append({'method':method, 'dataset':dataset, 'fold':fold, 'device':params['device'], 'validation_estimators': base_estimators, 'test_estimators':params['n_estimators'], 'rmse_test': rmse, 'crps_test': crps, 'validation_time':validation_time}, ignore_index=True)
#%% Save
filename = f"{method}_{params['device']}.csv"
df.to_csv(f'experiments/01_uci_benchmark/{filename}')
| 39.589474
| 276
| 0.668439
|
5867b077be35c9ed8cbdcdb26456b1959feda021
| 2,155
|
py
|
Python
|
etc/users/users.py
|
shakhyar/social_engineering
|
d075b6bda0d9da62e8db7358f43819878a5fef21
|
[
"MIT"
] | null | null | null |
etc/users/users.py
|
shakhyar/social_engineering
|
d075b6bda0d9da62e8db7358f43819878a5fef21
|
[
"MIT"
] | null | null | null |
etc/users/users.py
|
shakhyar/social_engineering
|
d075b6bda0d9da62e8db7358f43819878a5fef21
|
[
"MIT"
] | null | null | null |
import sqlite3
uconn = sqlite3.connect("etc/users/users.db", check_same_thread=False)
uc = uconn.cursor()
class Users:
"""
#? Table name = user
#* Columns: email, password, username, website
"""
def __init__(self):
self.create_table(True)
self.column1 = []
def create_table(self, true):
self.true = true
if self.true:
uc.execute("CREATE TABLE IF NOT EXISTS user(email TEXT, password TEXT, username TEXT, website TEXT)")
uconn.commit()
else:
pass
def user_entry(self, email, password, username, website):
self.email = email
self.password = password
self.username = username
self.website = website
uc.execute("SELECT * FROM user")
uc.execute("INSERT INTO user(email, password, username, website) VALUES (?, ?, ?, ?)", (self.email, self.password, self.username, self.website))
uconn.commit()
def get_username(self, email):
self.email = str(email)
uc.execute(f"SELECT * FROM user WHERE email={self.email}")
for row in uc.fetchall():
if self.email == row[0]:
return row[2]
else:
return False
def get_webiste(self, email):
self.email = email
uc.execute(f"SELECT * FROM user WHERE email={self.email}")
for row in uc.fetchall():
if self.email == row[0]:
return row[3]
else:
return False
def get_password(self, email):
self.email == email
uc.execute(f"SELECT * FROM user where email={self.email}")
for row in uc.fetchall():
if self.email == row[0]:
return row[1]
else:
return False
def get_email(self, username):
self.username = username
uc.execute(f"SELECT * FROM user where username={self.username}")
for row in uc.fetchall():
if self.username == row[2]:
return row[0]
else:
return False
| 31.231884
| 153
| 0.535499
|
3ca9b8f16271cf464fa2cf8805a5646797510cb1
| 155
|
py
|
Python
|
apps/site_admin/apps.py
|
aspic2/rendermaps
|
517905c5f970bbb8bba87e9599be874fc0fd250d
|
[
"MIT"
] | null | null | null |
apps/site_admin/apps.py
|
aspic2/rendermaps
|
517905c5f970bbb8bba87e9599be874fc0fd250d
|
[
"MIT"
] | 24
|
2018-02-21T22:51:24.000Z
|
2022-03-11T23:12:25.000Z
|
apps/site_admin/apps.py
|
aspic2/jobs-by-neighborhood
|
517905c5f970bbb8bba87e9599be874fc0fd250d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class AdminConfig(AppConfig):
name = 'site_admin'
| 17.222222
| 39
| 0.735484
|
dc1379d7643328d2a0251cd831289813a91a5730
| 17,532
|
py
|
Python
|
requirements/docutils-0.18/test/test_transforms/test_footnotes.py
|
QuentinTournier40/AnimationFreeCAD
|
8eaff8356ec68b948a721b83a6888b652278db8a
|
[
"Apache-2.0"
] | null | null | null |
requirements/docutils-0.18/test/test_transforms/test_footnotes.py
|
QuentinTournier40/AnimationFreeCAD
|
8eaff8356ec68b948a721b83a6888b652278db8a
|
[
"Apache-2.0"
] | null | null | null |
requirements/docutils-0.18/test/test_transforms/test_footnotes.py
|
QuentinTournier40/AnimationFreeCAD
|
8eaff8356ec68b948a721b83a6888b652278db8a
|
[
"Apache-2.0"
] | 1
|
2022-02-03T08:03:30.000Z
|
2022-02-03T08:03:30.000Z
|
#! /usr/bin/env python
# $Id: test_footnotes.py 8771 2021-06-18 18:55:08Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Tests for docutils.transforms.references.Footnotes.
"""
from __future__ import absolute_import
if __name__ == '__main__':
import __init__
from test_transforms import DocutilsTestSupport
from docutils.transforms.references import Footnotes
from docutils.parsers.rst import Parser
def suite():
parser = Parser()
s = DocutilsTestSupport.TransformTestSuite(parser)
s.generateTests(totest)
return s
totest = {}
totest['footnotes'] = ((Footnotes,), [
["""\
[#autolabel]_
.. [#autolabel] text
""",
"""\
<document source="test data">
<paragraph>
<footnote_reference auto="1" ids="footnote-reference-1" refid="autolabel">
1
<footnote auto="1" backrefs="footnote-reference-1" ids="autolabel" names="autolabel">
<label>
1
<paragraph>
text
"""],
["""\
autonumber: [#]_
.. [#] text
""",
"""\
<document source="test data">
<paragraph>
autonumber: \n\
<footnote_reference auto="1" ids="footnote-reference-1" refid="footnote-1">
1
<footnote auto="1" backrefs="footnote-reference-1" ids="footnote-1" names="1">
<label>
1
<paragraph>
text
"""],
["""\
[#]_ is the first auto-numbered footnote reference.
[#]_ is the second auto-numbered footnote reference.
.. [#] Auto-numbered footnote 1.
.. [#] Auto-numbered footnote 2.
.. [#] Auto-numbered footnote 3.
[#]_ is the third auto-numbered footnote reference.
""",
"""\
<document source="test data">
<paragraph>
<footnote_reference auto="1" ids="footnote-reference-1" refid="footnote-1">
1
is the first auto-numbered footnote reference.
<footnote_reference auto="1" ids="footnote-reference-2" refid="footnote-2">
2
is the second auto-numbered footnote reference.
<footnote auto="1" backrefs="footnote-reference-1" ids="footnote-1" names="1">
<label>
1
<paragraph>
Auto-numbered footnote 1.
<footnote auto="1" backrefs="footnote-reference-2" ids="footnote-2" names="2">
<label>
2
<paragraph>
Auto-numbered footnote 2.
<footnote auto="1" backrefs="footnote-reference-3" ids="footnote-3" names="3">
<label>
3
<paragraph>
Auto-numbered footnote 3.
<paragraph>
<footnote_reference auto="1" ids="footnote-reference-3" refid="footnote-3">
3
is the third auto-numbered footnote reference.
"""],
["""\
[#third]_ is a reference to the third auto-numbered footnote.
.. [#first] First auto-numbered footnote.
.. [#second] Second auto-numbered footnote.
.. [#third] Third auto-numbered footnote.
[#second]_ is a reference to the second auto-numbered footnote.
[#first]_ is a reference to the first auto-numbered footnote.
[#third]_ is another reference to the third auto-numbered footnote.
Here are some internal cross-references to the implicit targets
generated by the footnotes: first_, second_, third_.
""",
"""\
<document source="test data">
<paragraph>
<footnote_reference auto="1" ids="footnote-reference-1" refid="third">
3
is a reference to the third auto-numbered footnote.
<footnote auto="1" backrefs="footnote-reference-3" ids="first" names="first">
<label>
1
<paragraph>
First auto-numbered footnote.
<footnote auto="1" backrefs="footnote-reference-2" ids="second" names="second">
<label>
2
<paragraph>
Second auto-numbered footnote.
<footnote auto="1" backrefs="footnote-reference-1 footnote-reference-4" ids="third" names="third">
<label>
3
<paragraph>
Third auto-numbered footnote.
<paragraph>
<footnote_reference auto="1" ids="footnote-reference-2" refid="second">
2
is a reference to the second auto-numbered footnote.
<footnote_reference auto="1" ids="footnote-reference-3" refid="first">
1
is a reference to the first auto-numbered footnote.
<footnote_reference auto="1" ids="footnote-reference-4" refid="third">
3
is another reference to the third auto-numbered footnote.
<paragraph>
Here are some internal cross-references to the implicit targets
generated by the footnotes: \n\
<reference name="first" refname="first">
first
, \n\
<reference name="second" refname="second">
second
, \n\
<reference name="third" refname="third">
third
.
"""],
["""\
Mixed anonymous and labelled auto-numbered footnotes:
[#four]_ should be 4, [#]_ should be 1,
[#]_ should be 3, [#]_ is one too many,
[#two]_ should be 2, and [#six]_ doesn't exist.
.. [#] Auto-numbered footnote 1.
.. [#two] Auto-numbered footnote 2.
.. [#] Auto-numbered footnote 3.
.. [#four] Auto-numbered footnote 4.
.. [#five] Auto-numbered footnote 5.
.. [#five] Auto-numbered footnote 5 again (duplicate).
""",
"""\
<document source="test data">
<paragraph>
Mixed anonymous and labelled auto-numbered footnotes:
<paragraph>
<footnote_reference auto="1" ids="footnote-reference-1" refid="four">
4
should be 4, \n\
<footnote_reference auto="1" ids="footnote-reference-2" refid="footnote-1">
1
should be 1,
<footnote_reference auto="1" ids="footnote-reference-3" refid="footnote-2">
3
should be 3, \n\
<problematic ids="problematic-1 footnote-reference-4" refid="system-message-1">
[#]_
is one too many,
<footnote_reference auto="1" ids="footnote-reference-5" refid="two">
2
should be 2, and \n\
<footnote_reference auto="1" ids="footnote-reference-6" refname="six">
doesn't exist.
<footnote auto="1" backrefs="footnote-reference-2" ids="footnote-1" names="1">
<label>
1
<paragraph>
Auto-numbered footnote 1.
<footnote auto="1" backrefs="footnote-reference-5" ids="two" names="two">
<label>
2
<paragraph>
Auto-numbered footnote 2.
<footnote auto="1" backrefs="footnote-reference-3" ids="footnote-2" names="3">
<label>
3
<paragraph>
Auto-numbered footnote 3.
<footnote auto="1" backrefs="footnote-reference-1" ids="four" names="four">
<label>
4
<paragraph>
Auto-numbered footnote 4.
<footnote auto="1" dupnames="five" ids="five">
<label>
5
<paragraph>
Auto-numbered footnote 5.
<footnote auto="1" dupnames="five" ids="five-1">
<label>
6
<system_message backrefs="five-1" level="2" line="12" source="test data" type="WARNING">
<paragraph>
Duplicate explicit target name: "five".
<paragraph>
Auto-numbered footnote 5 again (duplicate).
<system_message backrefs="problematic-1" ids="system-message-1" level="3" line="3" source="test data" type="ERROR">
<paragraph>
Too many autonumbered footnote references: only 2 corresponding footnotes available.
"""],
["""\
Mixed auto-numbered and manual footnotes:
.. [1] manually numbered
.. [#] auto-numbered
.. [#label] autonumber-labeled
""",
"""\
<document source="test data">
<paragraph>
Mixed auto-numbered and manual footnotes:
<footnote ids="footnote-1" names="1">
<label>
1
<paragraph>
manually numbered
<footnote auto="1" ids="footnote-2" names="2">
<label>
2
<paragraph>
auto-numbered
<footnote auto="1" ids="label" names="label">
<label>
3
<paragraph>
autonumber-labeled
"""],
["""\
A labeled autonumbered footnote referece: [#footnote]_.
An unlabeled autonumbered footnote referece: [#]_.
.. [#] Unlabeled autonumbered footnote.
.. [#footnote] Labeled autonumbered footnote.
Note that the footnotes are not in the same
order as the references.
""",
"""\
<document source="test data">
<paragraph>
A labeled autonumbered footnote referece: \n\
<footnote_reference auto="1" ids="footnote-reference-1" refid="footnote">
2
.
<paragraph>
An unlabeled autonumbered footnote referece: \n\
<footnote_reference auto="1" ids="footnote-reference-2" refid="footnote-1">
1
.
<footnote auto="1" backrefs="footnote-reference-2" ids="footnote-1" names="1">
<label>
1
<paragraph>
Unlabeled autonumbered footnote.
<footnote auto="1" backrefs="footnote-reference-1" ids="footnote" names="footnote">
<label>
2
<paragraph>
Labeled autonumbered footnote.
Note that the footnotes are not in the same
order as the references.
"""],
["""\
Mixed manually-numbered, anonymous auto-numbered,
and labelled auto-numbered footnotes:
[#four]_ should be 4, [#]_ should be 2,
[1]_ is 1, [3]_ is 3,
[#]_ should be 6, [#]_ is one too many,
[#five]_ should be 5, and [#eight]_ doesn't exist.
.. [1] Manually-numbered footnote 1.
.. [#] Auto-numbered footnote 2.
.. [#four] Auto-numbered footnote 4.
.. [3] Manually-numbered footnote 3
.. [#five] Auto-numbered footnote 5.
.. [#] Auto-numbered footnote 6.
.. [#five] Auto-numbered footnote 5 again (duplicate).
""",
"""\
<document source="test data">
<paragraph>
Mixed manually-numbered, anonymous auto-numbered,
and labelled auto-numbered footnotes:
<paragraph>
<footnote_reference auto="1" ids="footnote-reference-1" refid="four">
4
should be 4, \n\
<footnote_reference auto="1" ids="footnote-reference-2" refid="footnote-2">
2
should be 2,
<footnote_reference ids="footnote-reference-3" refid="footnote-1">
1
is 1, \n\
<footnote_reference ids="footnote-reference-4" refid="footnote-3">
3
is 3,
<footnote_reference auto="1" ids="footnote-reference-5" refid="footnote-4">
6
should be 6, \n\
<problematic ids="problematic-1 footnote-reference-6" refid="system-message-1">
[#]_
is one too many,
<footnote_reference auto="1" ids="footnote-reference-7" refname="five">
should be 5, and \n\
<footnote_reference auto="1" ids="footnote-reference-8" refname="eight">
doesn't exist.
<footnote backrefs="footnote-reference-3" ids="footnote-1" names="1">
<label>
1
<paragraph>
Manually-numbered footnote 1.
<footnote auto="1" backrefs="footnote-reference-2" ids="footnote-2" names="2">
<label>
2
<paragraph>
Auto-numbered footnote 2.
<footnote auto="1" backrefs="footnote-reference-1" ids="four" names="four">
<label>
4
<paragraph>
Auto-numbered footnote 4.
<footnote backrefs="footnote-reference-4" ids="footnote-3" names="3">
<label>
3
<paragraph>
Manually-numbered footnote 3
<footnote auto="1" dupnames="five" ids="five">
<label>
5
<paragraph>
Auto-numbered footnote 5.
<footnote auto="1" backrefs="footnote-reference-5" ids="footnote-4" names="6">
<label>
6
<paragraph>
Auto-numbered footnote 6.
<footnote auto="1" dupnames="five" ids="five-1">
<label>
7
<system_message backrefs="five-1" level="2" line="15" source="test data" type="WARNING">
<paragraph>
Duplicate explicit target name: "five".
<paragraph>
Auto-numbered footnote 5 again (duplicate).
<system_message backrefs="problematic-1" ids="system-message-1" level="3" line="4" source="test data" type="ERROR">
<paragraph>
Too many autonumbered footnote references: only 2 corresponding footnotes available.
"""],
["""\
Referencing a footnote by symbol [*]_.
.. [*] This is an auto-symbol footnote.
""",
"""\
<document source="test data">
<paragraph>
Referencing a footnote by symbol \n\
<footnote_reference auto="*" ids="footnote-reference-1" refid="footnote-1">
*
.
<footnote auto="*" backrefs="footnote-reference-1" ids="footnote-1">
<label>
*
<paragraph>
This is an auto-symbol footnote.
"""],
["""\
A sequence of symbol footnote references:
[*]_ [*]_ [*]_ [*]_ [*]_ [*]_ [*]_ [*]_ [*]_ [*]_ [*]_ [*]_.
.. [*] Auto-symbol footnote 1.
.. [*] Auto-symbol footnote 2.
.. [*] Auto-symbol footnote 3.
.. [*] Auto-symbol footnote 4.
.. [*] Auto-symbol footnote 5.
.. [*] Auto-symbol footnote 6.
.. [*] Auto-symbol footnote 7.
.. [*] Auto-symbol footnote 8.
.. [*] Auto-symbol footnote 9.
.. [*] Auto-symbol footnote 10.
.. [*] Auto-symbol footnote 11.
.. [*] Auto-symbol footnote 12.
""",
u"""\
<document source="test data">
<paragraph>
A sequence of symbol footnote references:
<footnote_reference auto="*" ids="footnote-reference-1" refid="footnote-1">
*
\n\
<footnote_reference auto="*" ids="footnote-reference-2" refid="footnote-2">
\u2020
\n\
<footnote_reference auto="*" ids="footnote-reference-3" refid="footnote-3">
\u2021
\n\
<footnote_reference auto="*" ids="footnote-reference-4" refid="footnote-4">
\u00A7
\n\
<footnote_reference auto="*" ids="footnote-reference-5" refid="footnote-5">
\u00B6
\n\
<footnote_reference auto="*" ids="footnote-reference-6" refid="footnote-6">
#
\n\
<footnote_reference auto="*" ids="footnote-reference-7" refid="footnote-7">
\u2660
\n\
<footnote_reference auto="*" ids="footnote-reference-8" refid="footnote-8">
\u2665
\n\
<footnote_reference auto="*" ids="footnote-reference-9" refid="footnote-9">
\u2666
\n\
<footnote_reference auto="*" ids="footnote-reference-10" refid="footnote-10">
\u2663
\n\
<footnote_reference auto="*" ids="footnote-reference-11" refid="footnote-11">
**
\n\
<footnote_reference auto="*" ids="footnote-reference-12" refid="footnote-12">
\u2020\u2020
.
<footnote auto="*" backrefs="footnote-reference-1" ids="footnote-1">
<label>
*
<paragraph>
Auto-symbol footnote 1.
<footnote auto="*" backrefs="footnote-reference-2" ids="footnote-2">
<label>
\u2020
<paragraph>
Auto-symbol footnote 2.
<footnote auto="*" backrefs="footnote-reference-3" ids="footnote-3">
<label>
\u2021
<paragraph>
Auto-symbol footnote 3.
<footnote auto="*" backrefs="footnote-reference-4" ids="footnote-4">
<label>
\u00A7
<paragraph>
Auto-symbol footnote 4.
<footnote auto="*" backrefs="footnote-reference-5" ids="footnote-5">
<label>
\u00B6
<paragraph>
Auto-symbol footnote 5.
<footnote auto="*" backrefs="footnote-reference-6" ids="footnote-6">
<label>
#
<paragraph>
Auto-symbol footnote 6.
<footnote auto="*" backrefs="footnote-reference-7" ids="footnote-7">
<label>
\u2660
<paragraph>
Auto-symbol footnote 7.
<footnote auto="*" backrefs="footnote-reference-8" ids="footnote-8">
<label>
\u2665
<paragraph>
Auto-symbol footnote 8.
<footnote auto="*" backrefs="footnote-reference-9" ids="footnote-9">
<label>
\u2666
<paragraph>
Auto-symbol footnote 9.
<footnote auto="*" backrefs="footnote-reference-10" ids="footnote-10">
<label>
\u2663
<paragraph>
Auto-symbol footnote 10.
<footnote auto="*" backrefs="footnote-reference-11" ids="footnote-11">
<label>
**
<paragraph>
Auto-symbol footnote 11.
<footnote auto="*" backrefs="footnote-reference-12" ids="footnote-12">
<label>
\u2020\u2020
<paragraph>
Auto-symbol footnote 12.
"""],
["""\
Duplicate manual footnote labels:
.. [1] Footnote.
.. [1] Footnote.
""",
"""\
<document source="test data">
<paragraph>
Duplicate manual footnote labels:
<footnote dupnames="1" ids="footnote-1">
<label>
1
<paragraph>
Footnote.
<footnote dupnames="1" ids="footnote-2">
<label>
1
<system_message backrefs="footnote-2" level="2" line="5" source="test data" type="WARNING">
<paragraph>
Duplicate explicit target name: "1".
<paragraph>
Footnote.
"""],
])
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
| 32.051188
| 119
| 0.585672
|
16f61d94a057c0e3360fdbd022c87a9939f3e6dc
| 1,011
|
py
|
Python
|
modules/platforms/python/tests/test_generic_object.py
|
FedorUporov/gridgain
|
883125f943743fa8198d88be98dfe61bde86ad96
|
[
"CC0-1.0"
] | null | null | null |
modules/platforms/python/tests/test_generic_object.py
|
FedorUporov/gridgain
|
883125f943743fa8198d88be98dfe61bde86ad96
|
[
"CC0-1.0"
] | null | null | null |
modules/platforms/python/tests/test_generic_object.py
|
FedorUporov/gridgain
|
883125f943743fa8198d88be98dfe61bde86ad96
|
[
"CC0-1.0"
] | null | null | null |
#
# Copyright 2019 GridGain Systems, Inc. and Contributors.
#
# Licensed under the GridGain Community Edition License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.gridgain.com/products/software/community-edition/gridgain-community-edition-license
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyignite import GenericObjectMeta
from pyignite.datatypes import *
def test_go():
class GenericObject(
metaclass=GenericObjectMeta,
schema={
'TEST_ID': IntObject,
'TEST_NAME': String,
},
version=2,
):
pass
x = GenericObject()
print(x.__class__.__name__)
| 29.735294
| 101
| 0.714144
|
17ca294d5d018ce4c05139101443ac92c250c02d
| 2,463
|
py
|
Python
|
tests/marltoolbox/algos/amTFT/test_amTFTRolloutsTorchPolicy.py
|
tobiasbaumann1/amd
|
cb6190be92dea54db04ef9202d381b96f6f6218b
|
[
"MIT"
] | null | null | null |
tests/marltoolbox/algos/amTFT/test_amTFTRolloutsTorchPolicy.py
|
tobiasbaumann1/amd
|
cb6190be92dea54db04ef9202d381b96f6f6218b
|
[
"MIT"
] | null | null | null |
tests/marltoolbox/algos/amTFT/test_amTFTRolloutsTorchPolicy.py
|
tobiasbaumann1/amd
|
cb6190be92dea54db04ef9202d381b96f6f6218b
|
[
"MIT"
] | null | null | null |
from marltoolbox.algos import amTFT
from marltoolbox.algos.amTFT import base_policy
from test_base_policy import init_amTFT, generate_fake_discrete_actions
def test_compute_actions_overwrite():
am_tft_policy, env = init_amTFT(policy_class=amTFT.amTFTRolloutsTorchPolicy)
fake_actions = generate_fake_discrete_actions(env)
env.reset()
observations, rewards, done, info = env.step(fake_actions)
am_tft_policy.use_opponent_policies = True
fake_actions, fake_state_out, fake_extra_fetches = "fake", "fake", "fake"
fake_actions_2nd, fake_state_out_2nd, fake_extra_fetches_2nd = "fake_2nd", "fake_2nd", "fake_2nd"
am_tft_policy.overwrite_action = [(fake_actions, fake_state_out, fake_extra_fetches),
(fake_actions_2nd, fake_state_out_2nd, fake_extra_fetches_2nd)]
actions, state_out, extra_fetches = am_tft_policy.compute_actions(observations[env.players_ids[0]])
assert actions == fake_actions
assert state_out == fake_state_out
assert extra_fetches == fake_extra_fetches
actions, state_out, extra_fetches = am_tft_policy.compute_actions(observations[env.players_ids[0]])
assert actions == fake_actions_2nd
assert state_out == fake_state_out_2nd
assert extra_fetches == fake_extra_fetches_2nd
def test__select_algo_to_use_in_eval():
am_tft_policy, env = init_amTFT(policy_class=amTFT.amTFTRolloutsTorchPolicy)
def assert_(working_state_idx, active_algo_idx):
am_tft_policy.working_state = base_policy.WORKING_STATES[working_state_idx]
am_tft_policy._select_witch_algo_to_use()
assert am_tft_policy.active_algo_idx == active_algo_idx
am_tft_policy.use_opponent_policies = False
am_tft_policy.n_steps_to_punish = 0
assert_(working_state_idx=2, active_algo_idx=base_policy.OWN_COOP_POLICY_IDX)
am_tft_policy.use_opponent_policies = False
am_tft_policy.n_steps_to_punish = 1
assert_(working_state_idx=2, active_algo_idx=base_policy.OWN_SELFISH_POLICY_IDX)
am_tft_policy.use_opponent_policies = True
am_tft_policy.performing_rollouts = True
am_tft_policy.n_steps_to_punish_opponent = 0
assert_(working_state_idx=2, active_algo_idx=base_policy.OPP_COOP_POLICY_IDX)
am_tft_policy.use_opponent_policies = True
am_tft_policy.performing_rollouts = True
am_tft_policy.n_steps_to_punish_opponent = 1
assert_(working_state_idx=2, active_algo_idx=base_policy.OPP_SELFISH_POLICY_IDX)
| 48.294118
| 103
| 0.794153
|
52424e97ab94e59b33b6176705c01c645b397684
| 4,236
|
py
|
Python
|
list_scheduler.py
|
AdiNar/MGR
|
e626e333dfbc2f920b507ccae000809d9d479e36
|
[
"Apache-2.0"
] | null | null | null |
list_scheduler.py
|
AdiNar/MGR
|
e626e333dfbc2f920b507ccae000809d9d479e36
|
[
"Apache-2.0"
] | null | null | null |
list_scheduler.py
|
AdiNar/MGR
|
e626e333dfbc2f920b507ccae000809d9d479e36
|
[
"Apache-2.0"
] | null | null | null |
import heapq
import random
from decimal import Decimal
from typing import List
from simulator import Job, Scheduler
class ListScheduler(Scheduler):
def _list_scheduler_run(self, m: int, jobs: List[Job], start_at=0):
schedule = self.schedule
action_points = [(start_at, 0, 0)] # (time, machine, resources)
# 0 is not in the free_machines set.
# Think of it as if there was a virtual job scheduled on machine 0
# that ended at <start_at> and we're starting now.
free_machines = set(range(1, m))
scheduled_jobs = set()
# First we handle already scheduled jobs, so that they are properly acknowledged by the algorithm
org_jobs = schedule.jobs_running_after(start_at).jobs
running_jobs = list()
for j in sorted(org_jobs, key=lambda x: x.S):
if j.S < start_at:
running_jobs.append(Job(j.S - start_at + j.p, j.r))
else:
running_jobs.append(Job(j.p, j.r))
jobs = list(running_jobs) + jobs
rc = Decimal(0)
while jobs:
t, machine, jr = action_points[0]
heapq.heappop(action_points)
rc -= Decimal(jr)
while action_points and action_points[0][0] <= t:
_, m_1, jr_1 = heapq.heappop(action_points)
free_machines.add(m_1)
rc -= Decimal(jr_1)
for j in jobs:
if free_machines or machine is not None:
if len(free_machines) == m or (len(free_machines) == m - 1 and machine is not None):
# Due to floating ops it may be sth above 0 even if all machines are free,
# which breaks the algorithm.
rc = Decimal(0)
if rc + Decimal(j.r) <= 1:
if machine is None:
machine = free_machines.pop()
rc += Decimal(j.r)
if j not in running_jobs:
schedule.schedule(j, t, schedule.get_machine_by_number(machine), check_for_machine=False)
heapq.heappush(action_points, (t + j.p, machine, j.r))
scheduled_jobs.add(j)
if free_machines:
machine = free_machines.pop()
else:
machine = None
else:
break
if machine is not None:
free_machines.add(machine)
jobs = [j for j in jobs if j not in scheduled_jobs]
return schedule
def _run(self, start_at=0):
return self._list_scheduler_run(self.instance.machines_count,
self.instance.jobs.jobs,
start_at=start_at)
class LPT(ListScheduler):
"""List scheduler with priorities of decreasing job processing times."""
def _run(self, start_at=0):
return self._list_scheduler_run(self.instance.machines_count,
self.instance.jobs.by_length_descending(),
start_at=start_at)
class HRR(ListScheduler):
"""List scheduler with priorities of decreasing job resource requirements."""
def _run(self, start_at=0):
return self._list_scheduler_run(self.instance.machines_count,
self.instance.jobs.by_resource_descending(),
start_at=start_at)
class LRR(ListScheduler):
"""List scheduler with priorities of increasing job resource requirements."""
def _run(self, start_at=0):
return self._list_scheduler_run(self.instance.machines_count,
self.instance.jobs.by_resource_ascending(),
start_at=start_at)
class RAND(ListScheduler):
"""List scheduler with random priorities."""
def _run(self, start_at=0):
jobs = list(self.instance.jobs.jobs)[:]
random.shuffle(jobs)
return self._list_scheduler_run(self.instance.machines_count, jobs, start_at=start_at)
| 38.509091
| 117
| 0.552644
|
aac60aac253b8fe9c2dfcc26b48a8c667b5db072
| 10,269
|
py
|
Python
|
env/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/introspection.py
|
diego-d5000/MisValesMd
|
b641782bc2546776e9f55f452ec7fb48100dc482
|
[
"MIT"
] | null | null | null |
env/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/introspection.py
|
diego-d5000/MisValesMd
|
b641782bc2546776e9f55f452ec7fb48100dc482
|
[
"MIT"
] | null | null | null |
env/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/introspection.py
|
diego-d5000/MisValesMd
|
b641782bc2546776e9f55f452ec7fb48100dc482
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from collections import namedtuple
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.utils.encoding import force_text
FieldInfo = namedtuple('FieldInfo', FieldInfo._fields + ('default',))
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type codes to Django Field types.
data_types_reverse = {
16: 'BooleanField',
17: 'BinaryField',
20: 'BigIntegerField',
21: 'SmallIntegerField',
23: 'IntegerField',
25: 'TextField',
700: 'FloatField',
701: 'FloatField',
869: 'GenericIPAddressField',
1042: 'CharField', # blank-padded
1043: 'CharField',
1082: 'DateField',
1083: 'TimeField',
1114: 'DateTimeField',
1184: 'DateTimeField',
1266: 'TimeField',
1700: 'DecimalField',
}
ignored_tables = []
def get_field_type(self, data_type, description):
field_type = super(DatabaseIntrospection, self).get_field_type(data_type, description)
if field_type == 'IntegerField' and description.default and 'nextval' in description.default:
return 'AutoField'
return field_type
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
cursor.execute("""
SELECT c.relname, c.relkind
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)""")
return [TableInfo(row[0], {'r': 't', 'v': 'v'}.get(row[1]))
for row in cursor.fetchall()
if row[0] not in self.ignored_tables]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
# As cursor.description does not return reliably the nullable property,
# we have to query the information_schema (#7783)
cursor.execute("""
SELECT column_name, is_nullable, column_default
FROM information_schema.columns
WHERE table_name = %s""", [table_name])
field_map = {line[0]: line[1:] for line in cursor.fetchall()}
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return [FieldInfo(*((force_text(line[0]),) + line[1:6]
+ (field_map[force_text(line[0])][0] == 'YES', field_map[force_text(line[0])][1])))
for line in cursor.description]
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
cursor.execute("""
SELECT c2.relname, a1.attname, a2.attname
FROM pg_constraint con
LEFT JOIN pg_class c1 ON con.conrelid = c1.oid
LEFT JOIN pg_class c2 ON con.confrelid = c2.oid
LEFT JOIN pg_attribute a1 ON c1.oid = a1.attrelid AND a1.attnum = con.conkey[1]
LEFT JOIN pg_attribute a2 ON c2.oid = a2.attrelid AND a2.attnum = con.confkey[1]
WHERE c1.relname = %s
AND con.contype = 'f'""", [table_name])
relations = {}
for row in cursor.fetchall():
relations[row[1]] = (row[2], row[0])
return relations
def get_key_columns(self, cursor, table_name):
key_columns = []
cursor.execute("""
SELECT kcu.column_name, ccu.table_name AS referenced_table, ccu.column_name AS referenced_column
FROM information_schema.constraint_column_usage ccu
LEFT JOIN information_schema.key_column_usage kcu
ON ccu.constraint_catalog = kcu.constraint_catalog
AND ccu.constraint_schema = kcu.constraint_schema
AND ccu.constraint_name = kcu.constraint_name
LEFT JOIN information_schema.table_constraints tc
ON ccu.constraint_catalog = tc.constraint_catalog
AND ccu.constraint_schema = tc.constraint_schema
AND ccu.constraint_name = tc.constraint_name
WHERE kcu.table_name = %s AND tc.constraint_type = 'FOREIGN KEY'""", [table_name])
key_columns.extend(cursor.fetchall())
return key_columns
def get_indexes(self, cursor, table_name):
# This query retrieves each index on the given table, including the
# first associated field name
cursor.execute("""
SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx, pg_catalog.pg_attribute attr
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND attr.attrelid = c.oid
AND attr.attnum = idx.indkey[0]
AND c.relname = %s""", [table_name])
indexes = {}
for row in cursor.fetchall():
# row[1] (idx.indkey) is stored in the DB as an array. It comes out as
# a string of space-separated integers. This designates the field
# indexes (1-based) of the fields that have indexes on the table.
# Here, we skip any indexes across multiple fields.
if ' ' in row[1]:
continue
if row[0] not in indexes:
indexes[row[0]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in separate indexes.
if row[3]:
indexes[row[0]]['primary_key'] = True
if row[2]:
indexes[row[0]]['unique'] = True
return indexes
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Loop over the key table, collecting things as constraints
# This will get PKs, FKs, and uniques, but not CHECK
cursor.execute("""
SELECT
kc.constraint_name,
kc.column_name,
c.constraint_type,
array(SELECT table_name::text || '.' || column_name::text
FROM information_schema.constraint_column_usage
WHERE constraint_name = kc.constraint_name)
FROM information_schema.key_column_usage AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
kc.table_schema = %s AND
kc.table_name = %s
ORDER BY kc.ordinal_position ASC
""", ["public", table_name])
for constraint, column, kind, used_cols in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": kind.lower() == "primary key",
"unique": kind.lower() in ["primary key", "unique"],
"foreign_key": tuple(used_cols[0].split(".", 1)) if kind.lower() == "foreign key" else None,
"check": False,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get CHECK constraint columns
cursor.execute("""
SELECT kc.constraint_name, kc.column_name
FROM information_schema.constraint_column_usage AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
c.constraint_type = 'CHECK' AND
kc.table_schema = %s AND
kc.table_name = %s
""", ["public", table_name])
for constraint, column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": None,
"check": True,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get indexes
cursor.execute("""
SELECT
c2.relname,
ARRAY(
SELECT (SELECT attname FROM pg_catalog.pg_attribute WHERE attnum = i AND attrelid = c.oid)
FROM unnest(idx.indkey) i
),
idx.indisunique,
idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND c.relname = %s
""", [table_name])
for index, columns, unique, primary in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": list(columns),
"primary_key": primary,
"unique": unique,
"foreign_key": None,
"check": False,
"index": True,
}
return constraints
| 45.039474
| 113
| 0.557114
|
b08d29a5f920704f166374f6b3cf387611a728e2
| 841
|
py
|
Python
|
examples/simple_example_1.py
|
tnarg999/CS238FinalProject
|
0ecbb66678f60d6cc525936e737d310361f171ad
|
[
"MIT"
] | 1
|
2019-11-27T20:06:27.000Z
|
2019-11-27T20:06:27.000Z
|
examples/simple_example_1.py
|
tnarg999/CS238FinalProject
|
0ecbb66678f60d6cc525936e737d310361f171ad
|
[
"MIT"
] | null | null | null |
examples/simple_example_1.py
|
tnarg999/CS238FinalProject
|
0ecbb66678f60d6cc525936e737d310361f171ad
|
[
"MIT"
] | 1
|
2019-12-05T03:16:46.000Z
|
2019-12-05T03:16:46.000Z
|
from flatland.envs.rail_env import RailEnv
from flatland.envs.rail_generators import rail_from_manual_specifications_generator
from flatland.utils.rendertools import RenderTool
# Example generate a rail given a manual specification,
# a map of tuples (cell_type, rotation)
specs = [[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)],
[(0, 0), (0, 0), (0, 0), (0, 0), (7, 0), (0, 0)],
[(7, 270), (1, 90), (1, 90), (1, 90), (2, 90), (7, 90)],
[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)]]
env = RailEnv(width=6, height=4, rail_generator=rail_from_manual_specifications_generator(specs), number_of_agents=1)
env.reset()
env_renderer = RenderTool(env)
env_renderer.render_env(show=True, show_predictions=False, show_observations=False)
# uncomment to keep the renderer open
input("Press Enter to continue...")
| 38.227273
| 117
| 0.65874
|
99e266a9a7e099b2b49741f3e20f3ad5d087c4a4
| 832
|
py
|
Python
|
Tests/interop/com/compat/hw_client.py
|
aisk/ironpython3
|
d492fd811a0cee4d0a07cd46f02a29a3c90d964b
|
[
"Apache-2.0"
] | 1,872
|
2015-01-02T18:56:47.000Z
|
2022-03-31T07:34:39.000Z
|
Tests/interop/com/compat/hw_client.py
|
aisk/ironpython3
|
d492fd811a0cee4d0a07cd46f02a29a3c90d964b
|
[
"Apache-2.0"
] | 675
|
2015-02-27T09:01:01.000Z
|
2022-03-31T14:03:25.000Z
|
Tests/interop/com/compat/hw_client.py
|
aisk/ironpython3
|
d492fd811a0cee4d0a07cd46f02a29a3c90d964b
|
[
"Apache-2.0"
] | 278
|
2015-01-02T03:48:20.000Z
|
2022-03-29T20:40:44.000Z
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
'''
For the time being this is a minimal sanity check designed to ensure IP can access
COM servers implemented in pywin32.
'''
import sys
from interop.com.compat.hw import hw_progid, hw_retval
from iptest.cominterop_util import *
if sys.implementation.name == "ironpython":
from System import Type, Activator
type = Type.GetTypeFromProgID(hw_progid)
com_obj = Activator.CreateInstance(type)
else:
import win32com.client
com_obj = win32com.client.Dispatch(hw_progid)
print("dir(obj):", dir(com_obj))
print()
print("comMethod():", com_obj.comMethod(None))
AreEqual(com_obj.comMethod(None), hw_retval)
| 29.714286
| 82
| 0.760817
|
7a622807d9cb870fbf9ac357aeae47e24a0c797a
| 2,201
|
py
|
Python
|
euca2ools/commands/iam/listroles.py
|
sjones4/euca2ools
|
03b0e421eeebd8f402422a0ad6994bd6ee4e4127
|
[
"BSD-2-Clause"
] | null | null | null |
euca2ools/commands/iam/listroles.py
|
sjones4/euca2ools
|
03b0e421eeebd8f402422a0ad6994bd6ee4e4127
|
[
"BSD-2-Clause"
] | null | null | null |
euca2ools/commands/iam/listroles.py
|
sjones4/euca2ools
|
03b0e421eeebd8f402422a0ad6994bd6ee4e4127
|
[
"BSD-2-Clause"
] | 2
|
2016-06-24T20:19:40.000Z
|
2020-02-05T10:50:19.000Z
|
# Copyright 2014 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from requestbuilder import Arg
from requestbuilder.response import PaginatedResponse
from euca2ools.commands.iam import IAMRequest, AS_ACCOUNT
class ListRoles(IAMRequest):
DESCRIPTION = "List your account's roles"
ARGS = [Arg('-p', '--path-prefix', dest='PathPrefix', metavar='PREFIX',
help='limit results to roles who begin with a given path'),
AS_ACCOUNT]
LIST_TAGS = ['Roles']
def main(self):
return PaginatedResponse(self, (None,), ('Roles',))
def prepare_for_page(self, page):
# Pages are defined by markers
self.params['Marker'] = page
def get_next_page(self, response):
if response.get('IsTruncated') == 'true':
return response['Marker']
def print_result(self, result):
for role in result.get('Roles', []):
print role['Arn']
| 41.528302
| 75
| 0.729668
|
90ca1523c40f73a27695c958d954f701d7fd9e9a
| 8,806
|
py
|
Python
|
pages/preprocess.py
|
raniabrn/Streamlit-Application
|
1abf667a9ef7c4462864b7e4991799cd63b9f0cb
|
[
"MIT"
] | 1
|
2022-03-26T17:57:38.000Z
|
2022-03-26T17:57:38.000Z
|
pages/preprocess.py
|
raniabrn/Streamlit-Application
|
1abf667a9ef7c4462864b7e4991799cd63b9f0cb
|
[
"MIT"
] | 1
|
2022-03-28T17:12:03.000Z
|
2022-03-28T17:12:03.000Z
|
pages/preprocess.py
|
RacimRgh/generic-dataset-analysis
|
abbb3ff10ab8b16af471a363defd567c8f3ebd2f
|
[
"MIT"
] | null | null | null |
import streamlit as st
import os
import pandas as pd
from pandas.api.types import is_numeric_dtype
import numpy as np
import time
from sklearn.preprocessing import LabelEncoder as le
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
def standardise_column(df, df_last):
df_last = df.copy()
df = StandardScaler().fit_transform(df)
return df, df_last
def normalise_column(df, df_last):
min_max_scaler = MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(df)
df_normalized = pd.DataFrame(x_scaled)
return df_normalized, df_last
def chg_type(df, df_last, newtype, column_name):
df_last = df.copy()
if newtype == 'numerical':
df[column_name] = pd.to_numeric(df[column_name], errors='ignore')
elif newtype == 'object':
df = df.astype({column_name: str}, errors='ignore')
elif newtype == 'categorical':
df = df.astype({column_name: 'category'}, errors='ignore')
df.to_csv('data.csv', index=False)
st.success("Your changes have been made!")
return df, df_last
def dumm(df, df_last, column_name):
df_last = df.copy()
df = pd.get_dummies(df, columns=[column_name])
df.to_csv('data.csv', index=False)
return df, df_last
def onSubmit(df, df_last, del_na, replace_vals, new_num_val, new_cat_val):
df_last = df.copy()
categorical = df.select_dtypes(
include=['object']).columns.values
numerical = df.select_dtypes(include=[np.number]).columns.values
if del_na == "Drop columns":
df.dropna(axis="columns", how="any", inplace=True)
elif del_na == "Drop rows":
df.dropna(axis="rows", how="any", inplace=True)
elif del_na == "Replace values":
for num in numerical:
if replace_vals == "Average/Most frequent":
new_na_val = df[num].mean()
df[num] = df[num].fillna(new_na_val)
elif replace_vals == "Interpolation":
df[num] = df[num].interpolate(
method='linear', limit_direction='forward')
elif replace_vals == "Value":
df[num].fillna(new_num_val)
for cat in categorical:
if replace_vals == "Average/Most frequent":
new_na_val = df[cat].mode()
df[cat] = df[cat].fillna(new_na_val)
elif replace_vals == "Value":
df[cat].fillna(new_cat_val)
df.to_csv('data.csv', index=False)
return df, df_last
def app():
if 'data.csv' not in os.listdir(os.getcwd()):
st.markdown("Please upload data through `Upload Data` page!")
else:
with st.spinner("Loading the cached dataset, please wait..."):
df = pd.read_csv('data.csv')
df.to_csv('data_og.csv', index=False)
df_last = df.copy()
st.title('Preprocessing')
"""
Change the columns types one by one
"""
st.markdown("## Change dataframe columns")
# Use two column technique
col1, col2, col3 = st.columns(3)
column_name = col1.selectbox("Select Column", df.columns)
if is_numeric_dtype(df[column_name]):
current_type = 'numerical'
else:
try:
numvals = len(np.unique(df[column_name])
) < 0.2 * len(df[column_name])
except Exception as ex:
df[column_name] = le().fit_transform(df[column_name])
numvals = len(np.unique(df[column_name])
) < 0.2 * len(df[column_name])
if numvals:
current_type = 'categorical'
else:
current_type = 'object'
column_options = ['numerical', 'categorical', 'object']
current_index = column_options.index(current_type)
newtype = col2.selectbox("Select Column Type",
options=column_options, index=current_index)
newname = col3.text_input('New column name', value=column_name)
e1, e2, e3 = st.columns(3)
e1.write("""Select your column column_name and the new type from the data.
To submit all the changes, click on *Submit changes* """)
if e2.button("Change Column Type"):
with st.spinner("Modifying type..."):
df, df_last = chg_type(df, df_last, newtype, column_name)
if e3.button("Change column name"):
df.rename(columns={column_name: newname}, inplace=True)
"""
Check for NA values and show the choices if they exist
- Drop rows containing at least one NA value
- Drop the entire column containing NA values
- Replace the values with: Average/Most frequent, Interpolation (num), Typing
"""
if pd.isna(df).any().any():
st.markdown("## List of columns with NA values: ")
col1, col2, col3, col4, col5, col6 = st.columns(6)
j = 0
for d in df.columns:
if df[d].isna().sum():
if j == 0:
col1.info(d)
elif j == 1:
col2.info(d)
else:
col3.info(d)
j = 0
j += 1
if len([d for d in df.columns if df[d].isna().sum()]) > 0:
replace_vals, new_cat_val, new_num_val = None, None, None
del_na = col4.radio(
"Action:", ("Drop rows", "Drop columns", "Replace values"))
if del_na == 'Replace values':
replace_vals = col5.radio(
"With:", ("Average/Most frequent", "Interpolation", "Value"))
if replace_vals == "Value":
new_num_val = st.text_input(
"Replacement for numerical NA")
new_cat_val = st.text_input(
"Replacement for categorical NA")
b1, b2, b3, b4, b5 = st.columns(5)
if b5.button("Submit"):
with st.spinner("Please wait ..."):
df, df_last = onSubmit(
df, df_last, del_na, replace_vals, new_num_val, new_cat_val)
time.sleep(1)
df = pd.read_csv('data.csv')
st.success("Your changes have been made!")
st.markdown('## Ratio of different values and dummification')
col1, col2, col3, col4, col5, col6, col7, col8 = st.columns(8)
col1.metric(label="Total number of values", value=len(df[column_name]),
delta_color="off")
col2.metric(label="Number of different values", value=len(np.unique(df[column_name])),
delta_color="off")
if col6.button("Dummify current column", key=current_index):
with st.spinner("Please wait ..."):
df, df_last = dumm(df, df_last, column_name)
time.sleep(1)
st.success("Your changes have been made!")
if col7.button("Delete current column"):
with st.spinner("Processing..."):
df_last = df.copy()
df.drop(column_name, inplace=True, axis=1)
df.to_csv('data.csv', index=False)
st.success("Your changes have been made!")
b1, b2, b3, b4, b5, b6, b7, b8 = st.columns(8)
if b5.button("Standardise dataset"):
with st.spinner("Please wait ..."):
df, df_last = standardise_column(df, df_last)
time.sleep(1)
st.success("Your changes have been made!")
if b6.button("Normalise dataset"):
with st.spinner("Please wait ..."):
df, df_last = normalise_column(df, df_last)
time.sleep(1)
st.success("Your changes have been made!")
if b7.button("Revert last change"):
df = df_last.copy()
df.to_csv('data.csv', index=False)
st.success("Your changes have been Reverted!")
if b8.button("Revert all changes"):
df_og = pd.read_csv('data_og.csv')
df.to_csv('data.csv', index=False)
st.success("Your changes have been reverted!")
st.markdown('## Dataframe columns and types')
c = st.columns(6)
c[0].write('Column')
c[1].write('Type')
c[2].write('Column')
c[3].write('Type')
c[4].write('Column')
c[5].write('Type')
i = 0
for idx, val in enumerate(df.columns):
if i == 6:
i = 0
c[i].write(val)
c[i+1].write(df[val].dtype)
i += 2
| 37.47234
| 94
| 0.545764
|
dc0424b068661ba954f18e1693c7bd4868a2aae2
| 10,391
|
py
|
Python
|
fuel_agent/fuel_agent/tests/test_partition.py
|
Zipfer/fuel-web
|
c6c4032eb6e29474e2be0318349265bdb566454c
|
[
"Apache-2.0"
] | null | null | null |
fuel_agent/fuel_agent/tests/test_partition.py
|
Zipfer/fuel-web
|
c6c4032eb6e29474e2be0318349265bdb566454c
|
[
"Apache-2.0"
] | null | null | null |
fuel_agent/fuel_agent/tests/test_partition.py
|
Zipfer/fuel-web
|
c6c4032eb6e29474e2be0318349265bdb566454c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslotest import base as test_base
from fuel_agent import errors
from fuel_agent.objects import partition
class TestMD(test_base.BaseTestCase):
def setUp(self):
super(TestMD, self).setUp()
self.md = partition.Md('name', 'level')
def test_add_device_ok(self):
self.assertEqual(0, len(self.md.devices))
self.md.add_device('device')
self.assertEqual(1, len(self.md.devices))
self.assertEqual('device', self.md.devices[0])
def test_add_device_in_spares_fail(self):
self.assertEqual(0, len(self.md.devices))
self.assertEqual(0, len(self.md.spares))
self.md.add_spare('device')
self.assertRaises(errors.MDDeviceDuplicationError, self.md.add_device,
'device')
def test_add_device_in_devices_fail(self):
self.assertEqual(0, len(self.md.devices))
self.assertEqual(0, len(self.md.spares))
self.md.add_device('device')
self.assertRaises(errors.MDDeviceDuplicationError, self.md.add_device,
'device')
def test_add_spare_in_spares_fail(self):
self.assertEqual(0, len(self.md.devices))
self.assertEqual(0, len(self.md.spares))
self.md.add_spare('device')
self.assertRaises(errors.MDDeviceDuplicationError, self.md.add_spare,
'device')
def test_add_spare_in_devices_fail(self):
self.assertEqual(0, len(self.md.devices))
self.assertEqual(0, len(self.md.spares))
self.md.add_device('device')
self.assertRaises(errors.MDDeviceDuplicationError, self.md.add_spare,
'device')
class TestPartition(test_base.BaseTestCase):
def setUp(self):
super(TestPartition, self).setUp()
self.pt = partition.Partition('name', 'count', 'device', 'begin',
'end', 'partition_type')
def test_set_flag(self):
self.assertEqual(0, len(self.pt.flags))
self.pt.set_flag('fake_flag')
self.assertEqual(1, len(self.pt.flags))
self.assertIn('fake_flag', self.pt.flags)
class TestPartitionScheme(test_base.BaseTestCase):
def setUp(self):
super(TestPartitionScheme, self).setUp()
self.p_scheme = partition.PartitionScheme()
def test_root_device_not_found(self):
self.assertRaises(errors.WrongPartitionSchemeError,
self.p_scheme.root_device)
def test_fs_by_device(self):
expected_fs = partition.Fs('device')
self.p_scheme.fss.append(expected_fs)
self.p_scheme.fss.append(partition.Fs('wrong_device'))
actual_fs = self.p_scheme.fs_by_device('device')
self.assertEqual(expected_fs, actual_fs)
def test_fs_by_mount(self):
expected_fs = partition.Fs('d', mount='mount')
self.p_scheme.fss.append(expected_fs)
self.p_scheme.fss.append(partition.Fs('w_d', mount='wrong_mount'))
actual_fs = self.p_scheme.fs_by_mount('mount')
self.assertEqual(expected_fs, actual_fs)
def test_pv_by_name(self):
expected_pv = partition.Pv('pv')
self.p_scheme.pvs.append(expected_pv)
self.p_scheme.pvs.append(partition.Pv('wrong_pv'))
actual_pv = self.p_scheme.pv_by_name('pv')
self.assertEqual(expected_pv, actual_pv)
def test_vg_by_name(self):
expected_vg = partition.Vg('vg')
self.p_scheme.vgs.append(expected_vg)
self.p_scheme.vgs.append(partition.Vg('wrong_vg'))
actual_vg = self.p_scheme.vg_by_name('vg')
self.assertEqual(expected_vg, actual_vg)
def test_vg_attach_by_name(self):
self.p_scheme.vg_attach_by_name('pvname', 'vgname')
self.assertEqual(1, len(self.p_scheme.pvs))
self.assertEqual(1, len(self.p_scheme.vgs))
self.assertIn('pvname', self.p_scheme.vgs[0].pvnames)
self.assertIn('vgname', self.p_scheme.vgs[0].name)
def test_md_next_name_ok(self):
expected_name = '/dev/md0'
self.assertEqual(expected_name, self.p_scheme.md_next_name())
def test_md_next_name_fail(self):
self.p_scheme.mds = [
partition.Md('/dev/md%s' % x, 'level') for x in range(0, 128)]
self.assertRaises(errors.MDAlreadyExistsError,
self.p_scheme.md_next_name)
def test_md_by_name(self):
self.assertEqual(0, len(self.p_scheme.mds))
expected_md = partition.Md('name', 'level')
self.p_scheme.mds.append(expected_md)
self.p_scheme.mds.append(partition.Md('wrong_name', 'level'))
self.assertEqual(expected_md, self.p_scheme.md_by_name('name'))
def test_md_by_mount(self):
self.assertEqual(0, len(self.p_scheme.mds))
self.assertEqual(0, len(self.p_scheme.fss))
expected_md = partition.Md('name', 'level')
expected_fs = partition.Fs('name', mount='mount')
self.p_scheme.mds.append(expected_md)
self.p_scheme.fss.append(expected_fs)
self.p_scheme.fss.append(partition.Fs('wrong_name',
mount='wrong_mount'))
self.assertEqual(expected_md, self.p_scheme.md_by_mount('mount'))
def test_md_attach_by_mount_md_exists(self):
self.assertEqual(0, len(self.p_scheme.mds))
self.assertEqual(0, len(self.p_scheme.fss))
expected_md = partition.Md('name', 'level')
expected_fs = partition.Fs('name', mount='mount')
self.p_scheme.mds.append(expected_md)
self.p_scheme.fss.append(expected_fs)
actual_md = self.p_scheme.md_attach_by_mount('device', 'mount')
self.assertIn('device', actual_md.devices)
self.assertEqual(expected_md, actual_md)
def test_md_attach_by_mount_no_md(self):
self.assertEqual(0, len(self.p_scheme.mds))
self.assertEqual(0, len(self.p_scheme.fss))
actual_md = self.p_scheme.md_attach_by_mount(
'device', 'mount', fs_type='fs_type', fs_options='-F',
fs_label='fs_label', name='name', level='level')
self.assertIn('device', actual_md.devices)
self.assertEqual(1, len(self.p_scheme.fss))
self.assertEqual('name', self.p_scheme.fss[0].device)
self.assertEqual('mount', self.p_scheme.fss[0].mount)
self.assertEqual('fs_type', self.p_scheme.fss[0].type)
self.assertEqual('fs_label', self.p_scheme.fss[0].label)
self.assertEqual('-F', self.p_scheme.fss[0].options)
class TestParted(test_base.BaseTestCase):
def setUp(self):
super(TestParted, self).setUp()
self.prtd = partition.Parted('name', 'label')
@mock.patch.object(partition.Parted, 'next_count')
@mock.patch.object(partition.Parted, 'next_type')
def test_next_name_none(self, nt_mock, nc_mock):
nc_mock.return_value = 1
nt_mock.return_value = 'extended'
self.assertEqual(None, self.prtd.next_name())
@mock.patch.object(partition.Parted, 'next_count')
@mock.patch.object(partition.Parted, 'next_type')
def test_next_name_no_separator(self, nt_mock, nc_mock):
nc_mock.return_value = 1
nt_mock.return_value = 'not_extended'
expected_name = '%s%s' % (self.prtd.name, 1)
self.assertEqual(expected_name, self.prtd.next_name())
@mock.patch.object(partition.Parted, 'next_count')
@mock.patch.object(partition.Parted, 'next_type')
def test_next_name_with_separator(self, nt_mock, nc_mock):
nc_mock.return_value = 1
nt_mock.return_value = 'not_extended'
self.prtd.name = 'cciss or loop'
expected_name = '%sp%s' % (self.prtd.name, 1)
self.assertEqual(expected_name, self.prtd.next_name())
def test_next_begin_empty_partitions(self):
self.assertEqual(1, self.prtd.next_begin())
def test_next_begin_last_extended_partition(self):
self.prtd.partitions.append(
partition.Partition('name', 'count', 'device', 'begin', 'end',
'extended'))
self.assertEqual('begin', self.prtd.next_begin())
def test_next_begin_no_last_extended_partition(self):
self.prtd.partitions.append(
partition.Partition('name', 'count', 'device', 'begin', 'end',
'primary'))
self.assertEqual('end', self.prtd.next_begin())
def test_next_count_no_logical(self):
self.assertEqual(1, self.prtd.next_count('primary'))
def test_next_count_has_logical(self):
self.prtd.partitions.append(
partition.Partition('name', 'count', 'device', 'begin', 'end',
'logical'))
self.assertEqual(6, self.prtd.next_count('logical'))
def test_next_type_gpt(self):
self.prtd.label = 'gpt'
self.assertEqual('primary', self.prtd.next_type())
def test_next_type_no_extended(self):
self.prtd.label = 'msdos'
self.assertEqual('primary', self.prtd.next_type())
self.prtd.partitions.extend(
3 * [partition.Partition('name', 'count', 'device', 'begin',
'end', 'primary')])
self.assertEqual('extended', self.prtd.next_type())
def test_next_type_has_extended(self):
self.prtd.label = 'msdos'
self.prtd.partitions.append(
partition.Partition('name', 'count', 'device', 'begin', 'end',
'extended'))
self.assertEqual('logical', self.prtd.next_type())
def test_primary(self):
expected_partitions = [partition.Partition('name', 'count', 'device',
'begin', 'end', 'primary')]
self.prtd.partitions.extend(expected_partitions)
self.assertEqual(expected_partitions, self.prtd.primary)
| 41.398406
| 78
| 0.649889
|
5a16b4af295628020448294ff4b390766d576997
| 2,423
|
py
|
Python
|
Data_loader/Data_loader_facenet_notmask.py
|
ZouJiu1/Mask_face_recognitionZ
|
bac006718627d869b8ffaaa2c0605a300efd35e8
|
[
"MIT"
] | 17
|
2020-12-29T06:00:35.000Z
|
2022-02-24T13:38:41.000Z
|
Data_loader/Data_loader_facenet_notmask.py
|
LY-Road/Mask-Face-Recognition
|
f256a38084073718628b99a09622f5c830a232e7
|
[
"MIT"
] | 2
|
2021-01-02T13:12:51.000Z
|
2021-07-03T04:37:18.000Z
|
Data_loader/Data_loader_facenet_notmask.py
|
LY-Road/Mask-Face-Recognition
|
f256a38084073718628b99a09622f5c830a232e7
|
[
"MIT"
] | 12
|
2021-01-06T08:38:04.000Z
|
2022-01-27T15:44:21.000Z
|
# 路径置顶
import sys
import os
sys.path.append(os.getcwd())
# 导入包
import torchvision.transforms as transforms
import torch
# 导入文件
from Data_loader.Data_loader_test_notmask import TestDataset
from Data_loader.Data_loader_train_notmask import TrainDataset
from config_notmask import config
from Data_loader.Data_loadertest_mask import NOTLFWestNOTMaskDataset
# 训练数据的变换
train_data_transforms = transforms.Compose([
# transforms.Resize([config['image_size'], config['image_size']]), # resize
#transforms.RandomHorizontalFlip(), # 随机翻转
transforms.ToTensor(), # 变成tensor
transforms.Normalize(
mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5]
)
])
# 测试数据的变换
test_data_transforms = transforms.Compose([
# transforms.Resize([config['image_size'], config['image_size']]), # resize
transforms.ToTensor(),
transforms.Normalize(
mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5]
)
])
# 训练数据生成器
train_dataloader = torch.utils.data.DataLoader(
dataset=TrainDataset(
face_dir=config['train_data_path'],
mask_dir = config['mask_data_path'],
csv_name=config['train_data_index'],
num_triplets=config['num_train_triplets'],
training_triplets_path=config['train_triplets_path'],
transform=train_data_transforms,
predicter_path=config['predicter_path'],
img_size=config['image_size']
),
batch_size=config['train_batch_size'],
num_workers=config['num_workers'],
shuffle=False
)
# 测试数据生成器
test_dataloader = torch.utils.data.DataLoader(
dataset=TestDataset(
dir=config['LFW_data_path'],
pairs_path=config['LFW_pairs'],
predicter_path=config['predicter_path'],
img_size=config['image_size'],
transform=test_data_transforms,
test_pairs_paths=config['test_pairs_paths']
),
batch_size=config['test_batch_size'],
num_workers=config['num_workers'],
shuffle=False
)
# # 非LFW不戴口罩测试数据生成器
NOTLFWestNOTMask_dataloader = torch.utils.data.DataLoader(
dataset = NOTLFWestNOTMaskDataset(
dir=config['LFW_data_path'],
pairs_path=config['LFW_pairs'],
predicter_path=config['predicter_path'],
img_size=config['image_size'],
transform=test_data_transforms,
test_pairs_paths=config['test_pairs_paths']
),
batch_size=config['test_batch_size'],
num_workers=config['num_workers'],
shuffle=False
)
| 30.2875
| 79
| 0.70161
|
733f18477779ec9d7c71f76f8ec10dcb3070304c
| 1,234
|
py
|
Python
|
airflow/kubernetes/pod.py
|
takuti/airflow
|
0ac3b8c3dd749c59e60cf0169580b9e7c5049d9e
|
[
"Apache-2.0"
] | 27
|
2019-02-25T14:20:36.000Z
|
2022-03-22T09:35:13.000Z
|
airflow/kubernetes/pod.py
|
takuti/airflow
|
0ac3b8c3dd749c59e60cf0169580b9e7c5049d9e
|
[
"Apache-2.0"
] | 200
|
2019-01-09T15:33:06.000Z
|
2022-01-12T09:13:42.000Z
|
airflow/kubernetes/pod.py
|
takuti/airflow
|
0ac3b8c3dd749c59e60cf0169580b9e7c5049d9e
|
[
"Apache-2.0"
] | 14
|
2019-06-25T17:08:29.000Z
|
2022-03-29T13:25:53.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module is deprecated.
Please use :mod:`kubernetes.client.models for V1ResourceRequirements and Port.
"""
# flake8: noqa
import warnings
with warnings.catch_warnings():
from airflow.providers.cncf.kubernetes.backcompat.pod import Port, Resources # noqa: autoflake
warnings.warn(
"This module is deprecated. Please use `kubernetes.client.models for V1ResourceRequirements and Port.",
DeprecationWarning,
stacklevel=2,
)
| 36.294118
| 107
| 0.769854
|
0df4965d7e95ee8803932ddebcedcc8bd36fdac9
| 2,004
|
py
|
Python
|
download_images_from_web.py
|
TonkWorks/download_images_from_a_website
|
41e893b3c81258eb572d5fcac517b847faf97446
|
[
"MIT"
] | null | null | null |
download_images_from_web.py
|
TonkWorks/download_images_from_a_website
|
41e893b3c81258eb572d5fcac517b847faf97446
|
[
"MIT"
] | null | null | null |
download_images_from_web.py
|
TonkWorks/download_images_from_a_website
|
41e893b3c81258eb572d5fcac517b847faf97446
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import traceback
import argparse
import os
import shutil
import re
#Function Info goes here.
__info__ = {
'title': "Download all images from a web page",
'description': "Downloadall images from a web page",
'url': "http://github.com/TonkWorks/download_images_from_a_website/archive/master.zip",
'author': "Kevin Dagostino",
'input': [
{
'label': 'Web Page URL to download pictures from',
'type': 'text',
'map': 'site_url',
}
]
}
#And the actual script.
def script():
import requests
from bs4 import BeautifulSoup
parser=argparse.ArgumentParser()
parser.add_argument('--site_url')
args=parser.parse_args()
site_url = args.site_url
r = requests.get(site_url)
soup = BeautifulSoup(r.text)
#images = [a['src'] for a in soup.find_all("img", {"src": re.compile("gstatic.com")})]
images = soup.findAll("img")
print("Found " + str(len(images)) + " images.")
for image in images:
try:
#Make a filename for the image.
filename = image["src"].split("/")[-1]
filename ="".join([c for c in filename if c.isalpha() or c.isdigit() or c==' ' or c=='.']).rstrip() #Only valid OS chars
filename = os.path.join(os.getcwd(), filename)
#Fix relative images
if (not image["src"].lower().startswith("http") and not image["src"].lower().startswith("data") ):
#Relative image add full path
print(image["src"])
image["src"] = site_url + image["src"]
print(image["src"])
response = requests.get(image["src"])
if response.status_code == 200:
f = open(filename, 'wb')
f.write(response.content)
f.close()
#print (filename)
except Exception as e:
traceback.print_exc()
print(str(e))
if __name__ == '__main__':
script()
| 28.225352
| 132
| 0.566367
|
bc989ba45cca9b4e47c41015ce66d13cf298aa03
| 1,702
|
py
|
Python
|
Matrix-Layer-Rotation.py
|
akshatminocha/Demo
|
25513be11925aceaa69da96a1fa6ed0b546cdc70
|
[
"MIT"
] | 1
|
2019-02-23T15:58:11.000Z
|
2019-02-23T15:58:11.000Z
|
Matrix-Layer-Rotation.py
|
akshatminocha/Demo
|
25513be11925aceaa69da96a1fa6ed0b546cdc70
|
[
"MIT"
] | 1
|
2019-01-25T06:16:29.000Z
|
2019-01-25T06:17:07.000Z
|
Matrix-Layer-Rotation.py
|
akshatminocha/Demo
|
25513be11925aceaa69da96a1fa6ed0b546cdc70
|
[
"MIT"
] | null | null | null |
import math
import os
import random
import re
import sys
import copy
# Complete the matrixRotation function below.
def matrixRotation(matrix, r):
for k in range(0,r):
a=copy.deepcopy(matrix)
b=0
while(b<int(min(m,n)/2)):
for i in range(0,m):
for j in range(0,n):
if(i==(0+b)):
if(j==(0+b)):
matrix[i][j]=a[i][j+1]
if(j>(0+b) and j<(n-1-b)):
matrix[i][j]=a[i][j+1]
if(j==(0+b)):
if(b==0):
if(i>(0+b)):
matrix[i][j]=a[i-1][j]
else:
if(i>(0+b) and i<m-1):
matrix[i][j]=a[i-1][j]
if(i==(m-1-b)):
if(b==0):
if(j>(0+b)):
matrix[i][j]=a[i][j-1]
else:
if(j>(0+b) and j<n-1):
matrix[i][j]=a[i][j-1]
if(j==(n-1-b)):
if(i<(m-1-b) and i>=b):
matrix[i][j]=a[i+1][j]
b=b+1
for i in range(0,m):
for j in range(0,n):
print(matrix[i][j],end='\t')
print()
if __name__ == '__main__':
mnr = input().rstrip().split()
m = int(mnr[0])
n = int(mnr[1])
r = int(mnr[2])
matrix = []
for _ in range(m):
matrix.append(list(map(int, input().rstrip().split())))
matrixRotation(matrix, r)
| 28.366667
| 74
| 0.331962
|
c994d04084133177a83b641e8213248d46f896d5
| 5,084
|
py
|
Python
|
src/openprocurement/tender/pricequotation/validation.py
|
ProzorroUKR/openprocurement.api
|
2855a99aa8738fb832ee0dbad4e9590bd3643511
|
[
"Apache-2.0"
] | 10
|
2020-02-18T01:56:21.000Z
|
2022-03-28T00:32:57.000Z
|
src/openprocurement/tender/pricequotation/validation.py
|
quintagroup/openprocurement.api
|
2855a99aa8738fb832ee0dbad4e9590bd3643511
|
[
"Apache-2.0"
] | 26
|
2018-07-16T09:30:44.000Z
|
2021-02-02T17:51:30.000Z
|
src/openprocurement/tender/pricequotation/validation.py
|
ProzorroUKR/openprocurement.api
|
2855a99aa8738fb832ee0dbad4e9590bd3643511
|
[
"Apache-2.0"
] | 15
|
2019-08-08T10:50:47.000Z
|
2022-02-05T14:13:36.000Z
|
from schematics.exceptions import ValidationError
from openprocurement.api.utils import raise_operation_error, get_first_revision_date
from openprocurement.api.constants import PQ_CRITERIA_RESPONSES_ALL_FROM
from openprocurement.api.validation import validate_data, OPERATIONS, validate_json_data
from openprocurement.tender.core.validation import TYPEMAP
from openprocurement.tender.pricequotation.constants import PROFILE_PATTERN
# tender documents
def validate_document_operation_in_not_allowed_period(request, **kwargs):
if request.validated["tender_status"] not in ["active.tendering", "draft"]:
raise_operation_error(
request,
"Can't {} document in current ({}) tender status".format(
OPERATIONS.get(request.method), request.validated["tender_status"]
),
)
# award
def validate_create_award_not_in_allowed_period(request, **kwargs):
tender = request.validated["tender"]
if tender.status != "active.qualification":
raise_operation_error(
request,
"Can't create award in current ({}) tender status".format(
tender.status
)
)
def validate_award_update_in_terminal_status(request, **kwargs):
award_status = request.validated['award'].status
if award_status in ('cancelled', 'unsuccessful'):
raise_operation_error(
request,
"Can't update award in current ({}) status".format(
award_status
)
)
# contract document
def validate_contract_document_operation(request, **kwargs):
operation = OPERATIONS.get(request.method)
if request.validated["tender_status"] not in\
["active.qualification", "active.awarded"]:
raise_operation_error(
request,
"Can't {} document in current ({}) tender status".format(
operation, request.validated["tender_status"]
),
)
if request.validated["contract"].status not in ["pending", "active"]:
raise_operation_error(
request,
"Can't {} document in current contract status".format(operation)
)
return True
def validate_patch_tender_data(request, **kwargs):
model = type(request.tender)
data = validate_data(request, model, True, validate_json_data(request))
_validate_kind_update(request, model)
return data
def _validate_kind_update(request, model):
data = request.validated["data"]
kind = data.get("procuringEntity", {}).get("kind", "")
if kind and kind not in model.procuring_entity_kinds:
request.errors.add(
"body", "kind",
"{kind!r} procuringEntity cannot publish this type of procedure. Only {kinds} are allowed.".format(
kind=kind, kinds=", ".join(model.procuring_entity_kinds)
)
)
request.errors.status = 403
def _validate_bid_value(tender, value):
if not value:
raise ValidationError("This field is required.")
if tender.value.amount < value.amount:
raise ValidationError("value of bid should be less than value of tender")
if tender.get("value").currency != value.currency:
raise ValidationError("currency of bid should be identical to currency of value of tender")
if tender.get("value").valueAddedTaxIncluded != value.valueAddedTaxIncluded:
raise ValidationError(
"valueAddedTaxIncluded of bid should be identical " "to valueAddedTaxIncluded of value of tender"
)
def validate_post_bid(request, **kwargs):
bid = request.validated["bid"]
tender = request.validated["tender"]
tenderer_id = bid["tenderers"][0]["identifier"]["id"]
if tenderer_id not in [i.identifier.id for i in tender.shortlistedFirms]:
raise_operation_error(request, f"Can't add bid if tenderer not in shortlistedFirms")
def validate_tender_publish(request, **kwargs):
current_status = request.validated['tender'].status
tender_status = request.validated['data'].get('status', current_status)
error_message = "{} can't switch tender from status ({}) to ({})"
if tender_status == current_status:
return
if current_status == "draft.publishing" and tender_status == "cancelled":
raise_operation_error(request,
error_message.format("You",
current_status,
tender_status))
if request.authenticated_role not in ("bots", "Administrator", "chronograph") \
and tender_status != "draft.publishing":
raise_operation_error(request,
error_message.format(request.authenticated_role,
current_status,
tender_status))
def validate_profile_pattern(profile):
result = PROFILE_PATTERN.findall(profile)
if len(result) != 1:
raise ValidationError("The profile value doesn't match id pattern")
| 40.349206
| 111
| 0.651652
|
4553939929ce9a33295f1c2f00578133e869b8dc
| 271
|
py
|
Python
|
diskfree2.py
|
slavong/python-parallel
|
d59855d40c1d6399fef5c2ac6173cebfda47f503
|
[
"MIT"
] | null | null | null |
diskfree2.py
|
slavong/python-parallel
|
d59855d40c1d6399fef5c2ac6173cebfda47f503
|
[
"MIT"
] | null | null | null |
diskfree2.py
|
slavong/python-parallel
|
d59855d40c1d6399fef5c2ac6173cebfda47f503
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3.6
import subprocess
# Call the command
output = subprocess.run(["df", "-h", "/home"], stdout=subprocess.PIPE)
# Read the return code and the output data
print ("Return code: %i" % output.returncode)
print ("Output data: %s" % output.stdout)
| 24.636364
| 70
| 0.686347
|
4bba4a3f21ac576ce569c53d0ebb5ce7d3d617c6
| 2,941
|
py
|
Python
|
blog/files/21/dns_server.py
|
HowcanoeWang/SigmaMeow
|
89c7bcd6b505e565875cab66354b67ed0bbb1ee9
|
[
"MIT"
] | null | null | null |
blog/files/21/dns_server.py
|
HowcanoeWang/SigmaMeow
|
89c7bcd6b505e565875cab66354b67ed0bbb1ee9
|
[
"MIT"
] | 24
|
2019-08-20T14:04:34.000Z
|
2022-01-27T01:24:46.000Z
|
blog/files/21/dns_server.py
|
HowcanoeWang/HowcanoeWang.github.io
|
14a958253ccb6acd47f9d97a66f9b1eed1d7dad6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import re
import sys
import socket
import traceback
from os.path import isfile
HOSTS_FILE = "hosts.txt"
SERVER_HOST = "0.0.0.0"
SERVER_PORT = 53
#ipv4_exp = re.compile(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}")
class DNSQuery:
def __init__(self, data):
self.data = data
self.domain = bytearray()
tipo = (data[2] >> 3) & 15
if tipo == 0:
ini = 12
lon = data[ini]
while lon != 0:
self.domain += data[ini + 1:ini + lon + 1] + bytes(".", "ascii")
ini += lon + 1
lon = data[ini]
self.domain = str(self.domain, "utf8").rstrip(".")
def response(self, ip):
packet = bytearray()
if self.domain:
packet += self.data[:2] + bytearray([0x81, 0x80])
packet += self.data[4:6] + self.data[4:6] + bytearray([0x00, 0x00, 0x00, 0x00]) # Questions and Answers Counts
packet += self.data[12:] # Original Domain Name Question
packet += bytearray([0xC0, 0x0C]) # Pointer to domain name
packet += bytearray([0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x3c, 0x00, 0x04]) # Response type, ttl and resource data length -> 4 bytes
packet += bytearray([int(x) for x in ip.split(".")]) # 4 bytes of IP
return packet
def parse_host_file_as_regex(data):
host_list = []
for line in data.splitlines():
if line != "" and line[0] != "#":
split_line = line.split(" ", 1)
if len(split_line) == 2:
host_regex = split_line[0]
ip_addr = split_line[1]
host_list.append([re.compile(host_regex), ip_addr])
return host_list
if __name__ == '__main__':
if isfile(HOSTS_FILE):
host_data = parse_host_file_as_regex(open(HOSTS_FILE, "r").read())
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((SERVER_HOST, SERVER_PORT))
print("DNS Proxy server started on UDP port {}!".format(SERVER_PORT))
while True:
try:
(data, addr) = sock.recvfrom(1024)
p = DNSQuery(data)
result = [ip_addr for (regex, ip_addr) in host_data if regex.search(p.domain)]
if result:
ip = result[0]
print("Local: {} -> {}".format(p.domain, ip))
sock.sendto(p.response(ip), addr)
else:
ip = socket.gethostbyname(p.domain)
print("Remote: {} -> {}".format(p.domain, ip))
sock.sendto(p.response(ip), addr)
except KeyboardInterrupt:
print("Done!")
sock.close()
sys.exit(0)
except:
traceback.print_exc()
else:
print("Host file not found!")
| 37.227848
| 151
| 0.530772
|
c98052dff5245256100b9f74b9399cb28e704135
| 1,873
|
py
|
Python
|
scipy/constants/tests/test_codata.py
|
maxi-marufo/my-scipy
|
be6c2597fcee86419592ac512319301c7ddfc118
|
[
"BSD-3-Clause"
] | 1
|
2020-05-14T13:09:58.000Z
|
2020-05-14T13:09:58.000Z
|
scipy/constants/tests/test_codata.py
|
maxi-marufo/my-scipy
|
be6c2597fcee86419592ac512319301c7ddfc118
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/constants/tests/test_codata.py
|
maxi-marufo/my-scipy
|
be6c2597fcee86419592ac512319301c7ddfc118
|
[
"BSD-3-Clause"
] | null | null | null |
from scipy.constants import constants, codata, find, value
from numpy.testing import (assert_equal, assert_,
assert_almost_equal)
def test_find():
keys = find('weak mixing', disp=False)
assert_equal(keys, ['weak mixing angle'])
keys = find('qwertyuiop', disp=False)
assert_equal(keys, [])
keys = find('natural unit', disp=False)
assert_equal(keys, sorted(['natural unit of velocity',
'natural unit of action',
'natural unit of action in eV s',
'natural unit of mass',
'natural unit of energy',
'natural unit of energy in MeV',
'natural unit of momentum',
'natural unit of momentum in MeV/c',
'natural unit of length',
'natural unit of time']))
def test_basic_table_parse():
c = 'speed of light in vacuum'
assert_equal(codata.value(c), constants.c)
assert_equal(codata.value(c), constants.speed_of_light)
def test_basic_lookup():
assert_equal('%d %s' % (codata.c, codata.unit('speed of light in vacuum')),
'299792458 m s^-1')
def test_find_all():
assert_(len(codata.find(disp=False)) > 300)
def test_find_single():
assert_equal(codata.find('Wien freq', disp=False)[0],
'Wien frequency displacement law constant')
def test_2002_vs_2006():
assert_almost_equal(codata.value('magn. flux quantum'),
codata.value('mag. flux quantum'))
def test_exact_values():
# Check that updating stored values with exact ones worked.
for key in codata.exact_values:
assert_((codata.exact_values[key][0] - value(key)) / value(key) == 0)
| 33.446429
| 79
| 0.567005
|
93cb81eb77bb680ad07e61612f8d2b284278db0a
| 1,826
|
py
|
Python
|
src/multisig.py
|
valerio-vaccaro/krux
|
a3718a4e12ef6f92ada98e02d0d286a971a56434
|
[
"MIT"
] | null | null | null |
src/multisig.py
|
valerio-vaccaro/krux
|
a3718a4e12ef6f92ada98e02d0d286a971a56434
|
[
"MIT"
] | null | null | null |
src/multisig.py
|
valerio-vaccaro/krux
|
a3718a4e12ef6f92ada98e02d0d286a971a56434
|
[
"MIT"
] | null | null | null |
# The MIT License (MIT)
# Copyright (c) 2021 Tom J. Sun
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from embit.descriptor.descriptor import Descriptor
class MultisigPolicy:
def __init__(self, policy):
self.policy = policy
self.label = policy['label']
self.descriptor = Descriptor.from_string(policy['descriptor'])
if not self.descriptor or not self.descriptor.is_basic_multisig:
raise ValueError('Not multisig')
self.m = int(str(self.descriptor.miniscript.args[0]))
self.n = len(self.descriptor.keys)
self.cosigners = [key.key.to_base58() for key in self.descriptor.keys]
if self.descriptor.is_sorted:
self.cosigners = sorted(self.cosigners)
def matches_tx_policy(self, tx_policy):
return tx_policy['cosigners'] == self.cosigners and tx_policy['m'] == self.m and tx_policy['n'] == self.n
| 48.052632
| 107
| 0.766156
|
89460777595a74f0070ddcf2e42ac2f7e2a2b299
| 1,691
|
py
|
Python
|
tests/test_index.py
|
dkreuer/emrichen
|
ef2b2ad5cd828a9dfed6d7924cee207e13598b0f
|
[
"MIT"
] | 62
|
2019-01-17T18:47:45.000Z
|
2022-03-23T10:00:09.000Z
|
tests/test_index.py
|
dkreuer/emrichen
|
ef2b2ad5cd828a9dfed6d7924cee207e13598b0f
|
[
"MIT"
] | 24
|
2019-01-18T10:05:36.000Z
|
2022-02-20T17:46:36.000Z
|
tests/test_index.py
|
dkreuer/emrichen
|
ef2b2ad5cd828a9dfed6d7924cee207e13598b0f
|
[
"MIT"
] | 10
|
2019-02-12T23:58:29.000Z
|
2022-02-18T09:42:35.000Z
|
import pytest
from emrichen import Template
def test_index():
assert (
Template.parse(
'''
!Index
over:
- name: manifold
score: 7.8
- name: John
score: 9.9
- name: John
score: 9.8
as: flavour
by: !Lookup flavour.name
duplicates: ignore
template: !Lookup flavour.score
'''
).enrich({})
== [{'manifold': 7.8, 'John': 9.8}]
)
def test_index_without_template():
assert (
Template.parse(
'''
!Index
over:
- name: manifold
score: 7.8
- name: John
score: 9.9
- name: John
score: 9.8
as: flavour
by: !Lookup flavour.name
duplicates: ignore
'''
).enrich({})
== [{'manifold': {'name': 'manifold', 'score': 7.8}, 'John': {'name': 'John', 'score': 9.8}}]
)
def test_index_result_as():
assert (
Template.parse(
'''
!Index
over:
- name: manifold
score: 7.8
- name: John
score: 9.9
- name: John
score: 9.8
as: flavour
template:
NAME: !Lookup flavour.name
SCORE: !Lookup flavour.score
result_as: result
by: !Lookup result.NAME
duplicates: ignore
'''
).enrich({})
== [{'manifold': {'NAME': 'manifold', 'SCORE': 7.8}, 'John': {'NAME': 'John', 'SCORE': 9.8}}]
)
def test_index_duplicates_error():
with pytest.raises(ValueError):
assert Template.parse(
'''
!Index
over:
- name: manifold
score: 7.8
- name: John
score: 9.9
- name: John
score: 9.8
as: flavour
by: !Lookup flavour.name
duplicates: error
template: !Lookup flavour.score
'''
).enrich({})
| 18.380435
| 101
| 0.52809
|
184eddeb97201b81a89002298685bc2db249cef3
| 15,527
|
py
|
Python
|
tensorflow_probability/python/mcmc/metropolis_hastings_test.py
|
NeelGhoshal/probability
|
45ed841e3cff6cdc7cd1b2d96dd874d9070318f7
|
[
"Apache-2.0"
] | 2
|
2019-10-30T04:45:07.000Z
|
2019-10-30T04:45:08.000Z
|
tensorflow_probability/python/mcmc/metropolis_hastings_test.py
|
gregorystrubel/probability
|
df96f3d56eff92c6b06fbac68dc58e095e28fed6
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/mcmc/metropolis_hastings_test.py
|
gregorystrubel/probability
|
df96f3d56eff92c6b06fbac68dc58e095e28fed6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for `MetropolisHastings` `TransitionKernel`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import warnings
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.mcmc.internal.util import is_list_like
InnerKernelResultsWithoutCorrection = collections.namedtuple(
'InnerKernelResultsWithoutCorrection',
[
'target_log_prob', # For "next_state".
'grads_target_log_prob', # For "next_state".
# We add a "bogus" field just to ensure that the automatic introspection
# works as intended.
'extraneous',
])
InnerKernelResultsWithCorrection = collections.namedtuple(
'InnerKernelResultsWithCorrection',
[
'log_acceptance_correction',
'target_log_prob', # For "next_state".
'grads_target_log_prob', # For "next_state".
# We add a "bogus" field just to ensure that the automatic introspection
# works as intended.
'extraneous',
])
class FakeTransitionKernel(tfp.mcmc.TransitionKernel):
"""Fake TransitionKernel for testing MetropolisHastings."""
def __init__(self, is_calibrated, one_step_fn, bootstrap_fn,
accepts_seed_arg=True):
self._is_calibrated = is_calibrated
self._one_step_fn = one_step_fn
self._bootstrap_fn = bootstrap_fn
self._call_count = collections.Counter()
self._accepts_seed_arg = accepts_seed_arg
@property
def call_count(self):
return self._call_count
@property
def is_calibrated(self):
self.call_count['is_calibrated'] += 1
return self._is_calibrated
def one_step(self, current_state, previous_kernel_results, **kwargs):
self.call_count['one_step'] += 1
if ('seed' in kwargs) and not self._accepts_seed_arg:
raise TypeError('`seed` arg not welcome')
return self._one_step_fn(current_state, previous_kernel_results)
def bootstrap_results(self, init_state, **kwargs):
self.call_count['bootstrap_results'] += 1
return self._bootstrap_fn(init_state)
def make_one_step_fn(dtype):
def one_step(current_state, previous_kernel_results):
# Make next_state.
if is_list_like(current_state):
next_state = []
for i, s in enumerate(current_state):
next_state.append(tf.identity(s * dtype(i + 2),
name='next_state'))
else:
next_state = tf.identity(2. * current_state,
name='next_state')
# Make kernel_results.
kernel_results = {}
for fn in sorted(previous_kernel_results._fields):
if fn == 'grads_target_log_prob':
kernel_results['grads_target_log_prob'] = [
tf.identity(0.5 * g, name='grad_target_log_prob')
for g in previous_kernel_results.grads_target_log_prob]
elif fn == 'extraneous':
kernel_results[fn] = getattr(previous_kernel_results, fn, None)
else:
kernel_results[fn] = tf.identity(
0.5 * getattr(previous_kernel_results, fn, None),
name=fn)
kernel_results = type(previous_kernel_results)(**kernel_results)
# Done.
return next_state, kernel_results
return one_step
def make_bootstrap_results_fn(true_kernel_results):
kernel_results_cls = type(true_kernel_results)
def bootstrap_results(_):
fake_kernel_results = {}
for fn in sorted(kernel_results_cls._fields):
if fn == 'grads_target_log_prob':
fake_kernel_results['grads_target_log_prob'] = [
tf.identity(g, name='grad_target_log_prob')
for g in true_kernel_results.grads_target_log_prob]
else:
fake_kernel_results[fn] = tf.identity(
getattr(true_kernel_results, fn, None),
name=fn)
fake_kernel_results = kernel_results_cls(**fake_kernel_results)
return fake_kernel_results
return bootstrap_results
@test_util.test_all_tf_execution_regimes
class MetropolisHastingsTest(test_util.TestCase):
def setUp(self):
self.dtype = np.float32
def testCorrectlyWorksWithoutCorrection(self):
current_state_ = [self.dtype([1, 2]),
self.dtype([3, 4])]
current_state = [tf.convert_to_tensor(s) for s in current_state_]
expected_inner_init_kernel_results = InnerKernelResultsWithoutCorrection(
target_log_prob=self.dtype([
+100.,
-100.,
]),
grads_target_log_prob=[self.dtype([1.25, 1.5]),
self.dtype([2.25, 2.5])],
extraneous=self.dtype([1.75, 2.]))
one_step_fn = make_one_step_fn(dtype=self.dtype)
bootstrap_fn = make_bootstrap_results_fn(
expected_inner_init_kernel_results)
# Collect expected results.
expected_init_inner_kernel_results = bootstrap_fn(current_state)
_, expected_inner_kernel_results = one_step_fn(
current_state, expected_init_inner_kernel_results)
# Collect actual results.
mh = tfp.mcmc.MetropolisHastings(
FakeTransitionKernel(
is_calibrated=False,
one_step_fn=one_step_fn,
bootstrap_fn=bootstrap_fn))
stream = test_util.test_seed_stream()
init_kernel_results = mh.bootstrap_results(current_state)
next_state, kernel_results = mh.one_step(
current_state, init_kernel_results, seed=stream())
# Unmodified state is passed through unmodified.
self.assertIs(kernel_results.accepted_results.extraneous,
init_kernel_results.accepted_results.extraneous)
self.assertIs(kernel_results.proposed_results.extraneous,
init_kernel_results.accepted_results.extraneous)
# Check correct types and call pattern.
self.assertEqual(
dict(is_calibrated=1,
one_step=1,
bootstrap_results=1),
mh.inner_kernel.call_count)
for kr in [init_kernel_results.accepted_results,
init_kernel_results.proposed_results]:
self.assertEqual(type(expected_init_inner_kernel_results), type(kr))
for kr in [kernel_results.accepted_results,
kernel_results.proposed_results]:
self.assertEqual(type(expected_inner_kernel_results), type(kr))
# Now check actual values.
[
expected_init_inner_kernel_results_,
expected_inner_kernel_results_,
init_kernel_results_,
kernel_results_,
next_state_,
] = self.evaluate([
expected_init_inner_kernel_results,
expected_inner_kernel_results,
init_kernel_results,
kernel_results,
next_state,
])
# Check that the bootstrapped kernel results are correctly initialized.
for fn in expected_inner_init_kernel_results._fields:
self.assertAllClose(
getattr(expected_init_inner_kernel_results_, fn, np.nan),
getattr(init_kernel_results_.accepted_results, fn, np.nan),
atol=0.,
rtol=1e-5)
# Check that the proposal is correctly computed.
self.assertAllClose([2 * current_state_[0],
3 * current_state_[1]],
kernel_results_.proposed_state,
atol=0., rtol=1e-5)
for fn in expected_inner_kernel_results._fields:
self.assertAllClose(
getattr(expected_inner_kernel_results_, fn, np.nan),
getattr(kernel_results_.proposed_results, fn, np.nan),
atol=0.,
rtol=1e-5)
# Extremely high start prob means first will be rejected.
# Extremely low start prob means second will be accepted.
self.assertAllEqual([False, True],
kernel_results_.is_accepted)
self.assertAllEqual([(0.5 * 100.) - (100.),
(0.5 * -100.) - (-100.)],
kernel_results_.log_accept_ratio)
self.assertAllClose([self.dtype([1, 0 + 2]) * current_state_[0],
self.dtype([1, 1 + 2]) * current_state_[1]],
next_state_)
def testCorrectlyWorksWithCorrection(self):
current_state_ = [self.dtype([1, 2]),
self.dtype([3, 4])]
current_state = [tf.convert_to_tensor(s) for s in current_state_]
expected_inner_init_kernel_results = InnerKernelResultsWithCorrection(
log_acceptance_correction=self.dtype([+300., -300.]),
target_log_prob=self.dtype([100., -100.]),
grads_target_log_prob=[self.dtype([1.25, 1.5]),
self.dtype([2.25, 2.5])],
extraneous=self.dtype([1.75, 2.]))
one_step_fn = make_one_step_fn(dtype=self.dtype)
bootstrap_fn = make_bootstrap_results_fn(
expected_inner_init_kernel_results)
# Collect expected results.
expected_init_inner_kernel_results = bootstrap_fn(current_state)
_, expected_inner_kernel_results = one_step_fn(
current_state, expected_init_inner_kernel_results)
# Collect actual results.
mh = tfp.mcmc.MetropolisHastings(
FakeTransitionKernel(
is_calibrated=False,
one_step_fn=one_step_fn,
bootstrap_fn=bootstrap_fn))
init_kernel_results = mh.bootstrap_results(current_state)
next_state, kernel_results = mh.one_step(
current_state, init_kernel_results, seed=test_util.test_seed())
# Unmodified state is passed through unmodified.
self.assertIs(kernel_results.accepted_results.extraneous,
init_kernel_results.accepted_results.extraneous)
self.assertIs(kernel_results.proposed_results.extraneous,
init_kernel_results.accepted_results.extraneous)
# Check correct types and call pattern.
self.assertEqual(
dict(is_calibrated=1,
one_step=1,
bootstrap_results=1),
mh.inner_kernel.call_count)
for kr in [init_kernel_results.accepted_results,
init_kernel_results.proposed_results]:
self.assertEqual(type(expected_init_inner_kernel_results), type(kr))
for kr in [kernel_results.accepted_results,
kernel_results.proposed_results]:
self.assertEqual(type(expected_inner_kernel_results), type(kr))
# Now check actual values.
[
expected_init_inner_kernel_results_,
expected_inner_kernel_results_,
init_kernel_results_,
kernel_results_,
next_state_,
] = self.evaluate([
expected_init_inner_kernel_results,
expected_inner_kernel_results,
init_kernel_results,
kernel_results,
next_state,
])
# Check that the bootstrapped kernel results are correctly initialized.
for fn in expected_inner_init_kernel_results._fields:
self.assertAllClose(
getattr(expected_init_inner_kernel_results_, fn, np.nan),
getattr(init_kernel_results_.accepted_results, fn, np.nan),
atol=0.,
rtol=1e-5)
# Check that the proposal is correctly computed.
self.assertAllClose([2 * current_state_[0],
3 * current_state_[1]],
kernel_results_.proposed_state,
atol=0., rtol=1e-5)
for fn in expected_inner_kernel_results._fields:
self.assertAllClose(
getattr(expected_inner_kernel_results_, fn, np.nan),
getattr(kernel_results_.proposed_results, fn, np.nan),
atol=0.,
rtol=1e-5)
# First: Extremely high correction means proposed will be accepted, despite
# high prob initial state.
# Second: Extremely low correction means proposed will be rejected, despite
# low prob initial state.
self.assertAllEqual([True, False],
kernel_results_.is_accepted)
self.assertAllEqual([(0.5 * 100.) - (100.) + (0.5 * 300.),
(0.5 * -100.) - (-100.) + (0.5 * -300.)],
kernel_results_.log_accept_ratio)
self.assertAllClose([self.dtype([0 + 2, 1]) * current_state_[0],
self.dtype([1 + 2, 1]) * current_state_[1]],
next_state_)
@test_util.jax_disable_test_missing_functionality('stateful sampler/legacy')
def testInnerOneStepMissingSeedArg(self):
current_state_ = [self.dtype([1, 2]), self.dtype([3, 4])]
current_state = [tf.convert_to_tensor(s) for s in current_state_]
init_inner_kernel_results = InnerKernelResultsWithCorrection(
log_acceptance_correction=self.dtype([+300., -300.]),
target_log_prob=self.dtype([100., -100.]),
grads_target_log_prob=[self.dtype([1.25, 1.5]),
self.dtype([2.25, 2.5])],
extraneous=self.dtype([1.75, 2.]))
one_step_fn = make_one_step_fn(dtype=self.dtype)
bootstrap_fn = make_bootstrap_results_fn(init_inner_kernel_results)
mh = tfp.mcmc.MetropolisHastings(
FakeTransitionKernel(
one_step_fn=one_step_fn,
bootstrap_fn=bootstrap_fn,
is_calibrated=True,
accepts_seed_arg=False))
init_kernel_results = mh.bootstrap_results(current_state)
# Verify we can execute a step without an exception, as long as no seed arg
# is provided to MH.
self.evaluate(mh.one_step(current_state, init_kernel_results))
def testWarnings(self):
current_state_ = [self.dtype([1, 2]),
self.dtype([3, 4])]
current_state = [tf.convert_to_tensor(s) for s in current_state_]
# Verify the warning about lacking a log-acceptance correction field.
expected_inner_init_kernel_results = InnerKernelResultsWithoutCorrection(
target_log_prob=self.dtype([100., -100.]),
grads_target_log_prob=[
self.dtype([1.25, 1.5]),
self.dtype([2.25, 2.5]),
],
extraneous=self.dtype([1.75, 2.]))
one_step_fn = make_one_step_fn(dtype=self.dtype)
bootstrap_fn = make_bootstrap_results_fn(
expected_inner_init_kernel_results)
with warnings.catch_warnings(record=True) as w:
mh = tfp.mcmc.MetropolisHastings(
FakeTransitionKernel(
is_calibrated=True, # Verify the already-calibrated warning.
one_step_fn=one_step_fn,
bootstrap_fn=bootstrap_fn))
init_kernel_results = mh.bootstrap_results(current_state)
_, _ = mh.one_step(current_state, init_kernel_results,
seed=test_util.test_seed())
w = sorted(w, key=lambda w: str(w.message))
self.assertRegexpMatches(
str(w[0].message),
r'`TransitionKernel` is already calibrated')
self.assertRegexpMatches(
str(w[1].message),
r'`TransitionKernel` does not have a `log_acceptance_correction`')
if __name__ == '__main__':
test_util.main()
| 38.624378
| 80
| 0.668642
|
717846d77a73cf304d2b94867f893dcc1d9f33d0
| 864
|
py
|
Python
|
Python/multiplication/multiply.py
|
BackEndTea/Learning
|
ccbf8fdbb8fb23643898d73e64fe7442ab46b791
|
[
"MIT"
] | 1
|
2018-10-31T10:36:34.000Z
|
2018-10-31T10:36:34.000Z
|
Python/multiplication/multiply.py
|
BackEndTea/Learning
|
ccbf8fdbb8fb23643898d73e64fe7442ab46b791
|
[
"MIT"
] | null | null | null |
Python/multiplication/multiply.py
|
BackEndTea/Learning
|
ccbf8fdbb8fb23643898d73e64fe7442ab46b791
|
[
"MIT"
] | 2
|
2021-05-06T11:18:25.000Z
|
2021-12-04T07:56:29.000Z
|
def main():
x = 3141592653589793238462643383279502884197169399375105820974944592
y = 2718281828459045235360287471352662497757247093699959574966967627
out = 8539734222673567065463550869546574495034888535765114961879601127067743044893204848617875072216249073013374895871952806582723184
print(multiply(x,y))
print(x * y)
# Assume x and y are of the same lenght
def multiply(x,y):
x = str(x)
n = len(x)
if (n < 2 ):
return int(x)*int(y)
n_half = n//2
a = int(x[:n_half])
b = int(x[n_half:])
y = str(y)
n_half = len(y)//2
if (n_half < 2):
return int(x) * int(y)
c = int(y[:n_half])
d = int(y[n_half:])
out = (10**n * multiply(a,c) + 10**n_half * (multiply(a,d) + multiply(b,c)) + multiply(b,d))
return out
def _multiply(x,y):
pass
if __name__ == "__main__":
main()
| 24
| 137
| 0.640046
|
4edefd44048497ff7ec6efc87509d05fd544ee60
| 19,238
|
py
|
Python
|
dpmModule/character/characterTemplate.py
|
OniOniOn-/maplestory_dpm_calc
|
fbe824f01ab8e8210b174dd9db8295da80c267cd
|
[
"MIT"
] | null | null | null |
dpmModule/character/characterTemplate.py
|
OniOniOn-/maplestory_dpm_calc
|
fbe824f01ab8e8210b174dd9db8295da80c267cd
|
[
"MIT"
] | null | null | null |
dpmModule/character/characterTemplate.py
|
OniOniOn-/maplestory_dpm_calc
|
fbe824f01ab8e8210b174dd9db8295da80c267cd
|
[
"MIT"
] | null | null | null |
import yaml
import os
from copy import copy, deepcopy
from typing import List, Optional, Tuple, Union
from ..character.characterKernel import GearedCharacter, JobGenerator
from ..gear import Gear, GearBuilder, GearType, GearPropType, Scroll, eval_set_item_effect
from ..jobs import job_branch_list
from ..kernel.core.modifier import ExtendedCharacterModifier as ExMDF
def open_yaml(*paths) -> dict:
with open(os.path.join(os.path.dirname(__file__), *paths), encoding='utf8') as _file:
return yaml.safe_load(_file)
'''How template override works:
1. "default" <- "job" (type: override):
Override by keys in "job"
ex) default.armor = job.armor, default.head = job.head
2. "armor" <- "head":
Override by keys in "head"
'''
'''Order of template gear attributes:
1. id*
2. bonus
3. upgrade
4. star
5. potential
6. add_potential
7. cdr
'''
class TemplateGenerator:
def __init__(self):
# TODO: 보조무기 성장치 반영
# "데몬슬레이어": 1099004, # 성장으로 STR 9, DEX 9, HP 200, 방어력 20 상승
# "데몬어벤져": 1099009, # 성장으로 STR 9, HP 200, 방어력 20 상승
# "미하일": 1098003, # 성장으로 STR 9, DEX 9, HP 200, 방어력 20 상승
self.parts = ("head", "top", "bottom", "shoes", "glove", "cape", "shoulder", "face", "eye", "ear", "belt",
"ring1", "ring2", "ring3", "ring4", "pendant1", "pendant2",
"pocket", "badge", "medal", "weapon", "subweapon", "emblem", "heart")
# load template.yaml to 'data'
self.data = {}
_template_yaml = open_yaml('configs', 'template.yaml')
for key in _template_yaml:
self.data[key] = open_yaml('configs', _template_yaml[key])
def get_spec_names(self) -> Tuple[str]:
names: List[str] = []
for name in self.data.keys():
if not name.startswith("_"):
names.append(name)
return tuple(names)
def get_template_and_weapon_stat(self, gen: JobGenerator, spec_name: str, cdr: int = 0):
return self.get_template(gen, spec_name, cdr), self.get_weapon_stat(gen, spec_name)
def get_weapon_stat(self, gen: JobGenerator, spec_name: str) -> Tuple[int, int]:
return self._get_weapon_stat(self._get_template_dict(spec_name, gen.jobname))
def get_template(self, gen: JobGenerator, spec_name: str, cdr: int = 0) -> GearedCharacter:
"""
:param gen: JobGenerator
:param spec_name: 스펙 이름; 유니온 레벨이 사용됩니다.
:param cdr: 쿨타임 감소 (초)
:return: GearedCharacter
"""
node = self._get_template_dict(spec_name, gen.jobname)
# Create GearedCharacter with level
template = GearedCharacter(gen=gen, level=node['level'])
# Apply arcane, authentic, pet, cash modifiers
template.apply_modifiers([
self._get_arcane_modifier(node, gen.jobtype),
self._get_authentic_modifier(node, gen.jobtype),
self._get_pet_modifier(node),
self._get_cash_modifier(node, gen.jobtype),
self._get_job_specific_item_modifier(node)
])
# Equip title
template.title = self._get_title(node)
template.add_gear_modifier(template.title)
# Equip gears
gear_list = {}
for part in self.parts:
# Set zero subweapon
if part == "subweapon" and gen.jobname == "제로":
gear_list["subweapon"] = self._get_zero_subweapon(gear_list["weapon"])
else:
gear_list[part] = self._get_enchanted_gear(part, node, gen, cdr)
# Get set item effects
gear_list["set_effect"] = self._get_set_effect(gear_list, node)
# Apply gear, set item effect to character mdf
template.set_gears(gear_list)
return template
def _get_template_dict(self, spec_name: str, jobname: str) -> dict:
if spec_name not in self.data:
raise ValueError('Invalid spec_name: ' + spec_name)
# Get job specific copy of node
node: dict = deepcopy(self.data[spec_name]['default'])
if jobname in self.data[spec_name]:
if self.data[spec_name][jobname]['type'] == "full":
node = deepcopy(self.data[spec_name][jobname])
elif self.data[spec_name][jobname]['type'] == "override":
for node_key in self.data[spec_name][jobname]:
node[node_key] = deepcopy(self.data[spec_name][jobname][node_key])
# Assert node contains all necessary parts
assert("level" in node and set(self.parts) <= node.keys())
return node
def _get_weapon_stat(self, node) -> Tuple[int, int]:
if 'weapon_stat' not in node or len(node['weapon_stat']) != 2:
raise TypeError('template does not contain valid weapon_stat field')
return tuple(node['weapon_stat'])
def _get_arcane_modifier(self, node, jobtype: str) -> ExMDF:
if "arcane_symbol_force" not in node:
return ExMDF()
value = node["arcane_symbol_force"] // 10
if jobtype in ("STR", "DEX", "INT", "LUK", "LUK2"):
return ExMDF(stat_main_fixed=value * 100)
elif jobtype == "HP":
return ExMDF(stat_main_fixed=value * 1750)
elif jobtype == "xenon":
# TODO: 제논 아케인심볼 힘덱럭 적용
# return ExMDF(stat_main_fixed=value * 39, stat_sub_fixed=value * 39)
return ExMDF(stat_main_fixed=value * 39 * 3)
def _get_authentic_modifier(self, node, jobtype: str) -> ExMDF:
if "authentic_symbol_level" not in node:
return ExMDF()
value = sum([(2 * n + 3) for n in node["authentic_symbol_level"]])
if jobtype in ("STR", "DEX", "INT", "LUK", "LUK2"):
return ExMDF(stat_main_fixed=value * 100)
elif jobtype == "HP":
return ExMDF(stat_main_fixed=value * 1750)
elif jobtype == "xenon":
# TODO: 제논 어센틱심볼 힘덱럭 적용
# return ExMDF(stat_main_fixed=value * 39, stat_sub_fixed=value * 39 * 2)
return ExMDF(stat_main_fixed=value * 39 * 3)
def _get_pet_modifier(self, node) -> ExMDF:
mdf = ExMDF()
if "pet_equip" in node:
mdf.att += node["pet_equip"]
if "pet_set" in node:
mdf.att += node["pet_set"]
return mdf
def _get_cash_modifier(self, node, jobtype: str) -> ExMDF:
if "cash" not in node:
return ExMDF()
mdf = ExMDF()
for stat_key in ("att", "stat_main"):
if stat_key in node["cash"]:
setattr(mdf, stat_key, node["cash"][stat_key])
if "stat_sub" in node["cash"]: # TODO: 캐시장비도 Gear로 처리하도록 변경할것
if jobtype == "xenon":
setattr(mdf, "stat_main", getattr(mdf, "stat_main", 0) + node["cash"]["stat_sub"])
else:
setattr(mdf, "stat_sub", node["cash"]["stat_sub"])
if "stat_sub2" in node["cash"]:
if jobtype == "xenon":
setattr(mdf, "stat_main", getattr(mdf, "stat_main", 0) + node["cash"]["stat_sub2"])
if jobtype == "LUK2":
setattr(mdf, "stat_sub", getattr(mdf, "stat_sub", 0) + node["cash"]["stat_sub2"])
return mdf
def _get_job_specific_item_modifier(self, node) -> ExMDF:
if "job_specific_item" not in node:
return ExMDF()
mdf = ExMDF()
for stat_key in node["job_specific_item"]:
setattr(mdf, stat_key, node["job_specific_item"][stat_key])
return mdf
def _get_title(self, node) -> Gear:
if "title" not in node:
raise TypeError('template does not contain title.')
return Gear.create_title_from_name(node["title"]['id'])
def _get_enchanted_gear(self, part: str, node, gen: JobGenerator, cdr: int) -> Gear:
return self._apply_gear_options(
self._get_gear_base(node[part]['id'], part, gen.jobname), node[part], part, gen.jobtype, cdr
)
def _get_gear_base(self, name: Union[int, str], part: str, jobname: str) -> Gear:
def _get_job_branch(jobname: str) -> int:
return job_branch_list[jobname]
def _get_gear_id(name: str, part: str, jobname: str) -> int:
part = part.rstrip('0123456789')
preset_node = self.data['_preset']
if part in ("weapon", "subweapon", "emblem"):
if name in preset_node[part]:
if jobname in preset_node[part][name]:
return preset_node[part][name][jobname]
else:
if name in preset_node['default']:
if part in preset_node['default'][name]:
id_list = preset_node['default'][name][part]
if len(id_list) == 6:
return id_list[_get_job_branch(jobname)]
else:
return id_list[0]
return Gear.get_id_from_name(name)
debug_name = str(name)
if type(name) == str:
name = _get_gear_id(name, part, jobname)
if name <= 0:
raise ValueError('Invalid gear name: ' + debug_name + ' (part: ' + part + ', jobname: ' + jobname + ')')
return Gear.create_from_id(name)
def _apply_gear_options(self, gear: Gear, gear_node, part: str, jobtype: str, cdr) -> Gear:
def _apply_bonus(bonus_node):
def _is_bonus_gear(part: str):
part = part.rstrip("0123456789")
return part in ("head", "top", "bottom", "shoes", "glove", "cape", "shoulder",
"face", "eye", "ear", "belt", "pendant", "pocket", "weapon")
if len(bonus_node) > 0 and not _is_bonus_gear(part):
raise TypeError('Cannot apply bonus to gear type: ' + gear.type.name)
for bonus_type in bonus_node:
if bonus_type == "att_grade":
gb.apply_additional_stat(att, bonus_node[bonus_type])
elif bonus_type == "allstat_rate":
gb.apply_additional_stat(GearPropType.allstat, bonus_node[bonus_type])
elif stat_type[bonus_type] is not None:
gear.additional_stat[stat_type[bonus_type]] = bonus_node[bonus_type]
def _apply_upgrade(upgrade_node):
# Ignore special case: subweapon / shield
if len(upgrade_node) > 0 and gear.tuc < 1 and part != "subweapon":
raise TypeError('Cannot apply scroll upgrade to gear: ' + str(gear))
gb.apply_hammer()
for scroll in upgrade_node:
type = scroll['type']
count = scroll['count']
if count < 0:
count = gb.scroll_available
if type == "주문의 흔적" or type == "주흔":
prob = scroll['prob']
stat = stat_type[scroll['stat']]
gb.apply_spell_trace_scroll(prob, stat, count)
elif type == "방공" or type == "악공":
gb.apply_scroll(Scroll.create_from_dict({att: scroll['value']}), count)
elif type == "매지컬":
value = scroll['value']
gb.apply_scroll(Scroll.create_from_dict({
GearPropType.STR: 3, GearPropType.DEX: 3, GearPropType.INT: 3, GearPropType.LUK: 3, att: value
}), count)
elif type == "파편":
gb.apply_scroll(Scroll.create_from_dict({
GearPropType.STR: 3, GearPropType.DEX: 3, GearPropType.INT: 3, GearPropType.LUK: 3,
GearPropType.MHP: 40, GearPropType.att: 3, GearPropType.matt: 3
}), count)
elif type == "혼돈의 주문서" or "혼줌":
stat = {}
for stat_key in scroll['option']:
stat[stat_type[stat_key]] = scroll['option'][stat_key]
gb.apply_scroll(Scroll.create_from_dict(stat), count)
else:
raise TypeError('Invalid upgrade type: ' + type)
def _apply_star(gear_node):
star = gear_node['star']
# Ignore special case: subweapon / shield
if abs(star) > gear.max_star and part != "subweapon":
raise TypeError('Tried to apply star ' + str(star) + ' but max_star was ' + str(gear.max_star))
if star > 0:
gb.apply_stars(star)
elif star < 0:
bonus = 0
if 'surprise_bonus' in gear_node:
bonus = gear_node['surprise_bonus']
bonus_count = abs(star) * bonus // 100
gb.apply_stars(abs(star) - bonus_count, True, False)
gb.apply_stars(bonus_count, True, True)
def _apply_potential(potential_node, gear_potential_dict):
def _is_potential_gear(part: str):
part = part.rstrip("0123456789")
return part in ("head", "top", "bottom", "shoes", "glove", "cape", "shoulder", "face", "eye",
"ear", "belt", "ring", "pendant", "weapon", "subweapon", "emblem", "heart")
if len(potential_node) > 0 and not _is_potential_gear(part):
raise TypeError('Cannot apply potential to gear type: ' + gear.type.name)
gear_potential_dict.clear()
for stat_key in potential_node:
if stat_key == "allstat_rate":
gear_potential_dict[pstat_main] += potential_node[stat_key]
gear_potential_dict[pstat_sub] += potential_node[stat_key]
gear_potential_dict[pstat_sub2] += potential_node[stat_key]
elif stat_type[stat_key] is not None:
gear_potential_dict[stat_type[stat_key]] += potential_node[stat_key]
stat_main, pstat_main, stat_sub, pstat_sub, stat_sub2, pstat_sub2, att, patt = _get_stat_type(jobtype)
stat_type = {
"stat_main": stat_main, "pstat_main": pstat_main,
"stat_sub": stat_sub, "pstat_sub": pstat_sub,
"stat_sub2": stat_sub2, "pstat_sub2": pstat_sub2,
"att": att, "patt": patt,
"pdamage": GearPropType.pdamage,
"boss_pdamage": GearPropType.boss_pdamage,
"armor_ignore": GearPropType.armor_ignore,
"crit": GearPropType.crit,
"crit_damage": GearPropType.crit_damage,
"cooltime_reduce": GearPropType.cooltime_reduce,
"pdamage_indep": GearPropType.pdamage_indep,
}
gb = GearBuilder(gear)
# 추가옵션
if 'bonus' in gear_node:
_apply_bonus(gear_node['bonus'])
# 주문서 강화
if 'upgrade' in gear_node:
_apply_upgrade(gear_node['upgrade'])
# 스타포스 강화
if 'star' in gear_node:
_apply_star(gear_node)
# 잠재능력
if 'potential' in gear_node:
_apply_potential(gear_node['potential'], gear.potential)
# 에디셔널 잠재능력
if 'add_potential' in gear_node:
_apply_potential(gear_node['add_potential'], gear.additional_potential)
# 모자 쿨타임 감소 옵션
if gear.type == GearType.cap and cdr > 0:
if "cdr" not in gear_node or str(cdr) not in gear_node["cdr"]:
raise ValueError('template does not contain cdr information for cdr input: ' + str(cdr))
cdr_node = gear_node["cdr"][str(cdr)]
if 'potential' in cdr_node:
_apply_potential(cdr_node['potential'], gear.potential)
if 'add_potential' in cdr_node:
_apply_potential(cdr_node['add_potential'], gear.additional_potential)
return gear
def _get_zero_subweapon(self, zero_weapon: Gear) -> Gear:
assert (zero_weapon.type == GearType.sword_zl)
subweapon_id = zero_weapon.item_id - 10000
subweapon = Gear.create_from_id(subweapon_id)
shared_types = (GearPropType.boss_pdamage, GearPropType.armor_ignore, GearPropType.pdamage,
GearPropType.STR_rate, GearPropType.DEX_rate, GearPropType.INT_rate, GearPropType.LUK_rate)
for stat_type in shared_types:
subweapon.additional_stat[stat_type] = zero_weapon.additional_stat[stat_type]
subweapon.potential = copy(zero_weapon.potential)
subweapon.additional_potential = copy(zero_weapon.additional_potential)
return subweapon
def _get_set_effect(self, gears, node) -> Gear:
def _get_zero_weapon_set_id(name: str):
try:
return self.data['_preset']["zero_weapon_set_id"][name]
except KeyError:
return 0
set_effect = Gear()
set_effect.name = "세트효과 합계"
set_effect.type = GearType._dummy
# Zero set item id effect
weapon_id = gears["weapon"].item_id
weapon_set_item_id = gears["weapon"].set_item_id
if gears["weapon"].type == GearType.sword_zl and "zero_weapon_set_name" in node:
gears["weapon"].item_id = 1570000
gears["weapon"].set_item_id = _get_zero_weapon_set_id(node["zero_weapon_set_name"])
set_effect.base_stat = eval_set_item_effect(gears.values())
# Revert zero set item id
gears["weapon"].item_id = weapon_id
gears["weapon"].set_item_id = weapon_set_item_id
return set_effect
def _get_stat_type(jobtype: str) -> Tuple[
GearPropType, GearPropType,
GearPropType, GearPropType,
Optional[GearPropType], Optional[GearPropType],
GearPropType, GearPropType]:
# stat_main, pstat_main, stat_sub, pstat_sub, stat_sub2, pstat_sub2, att, patt
return {
"STR": (
GearPropType.STR, GearPropType.STR_rate,
GearPropType.DEX, GearPropType.DEX_rate,
None, None,
GearPropType.att, GearPropType.att_rate
),
"DEX": (
GearPropType.DEX, GearPropType.DEX_rate,
GearPropType.STR, GearPropType.STR_rate,
None, None,
GearPropType.att, GearPropType.att_rate
),
"INT": (
GearPropType.INT, GearPropType.INT_rate,
GearPropType.LUK, GearPropType.LUK_rate,
None, None,
GearPropType.matt, GearPropType.matt_rate
),
"LUK": (
GearPropType.LUK, GearPropType.LUK_rate,
GearPropType.DEX, GearPropType.DEX_rate,
None, None,
GearPropType.att, GearPropType.att_rate
),
"LUK2": (
GearPropType.LUK, GearPropType.LUK_rate,
GearPropType.DEX, GearPropType.DEX_rate,
GearPropType.STR, GearPropType.STR_rate,
GearPropType.att, GearPropType.att_rate
),
"HP": (
GearPropType.MHP, GearPropType.MHP_rate,
GearPropType.STR, GearPropType.STR_rate,
None, None,
GearPropType.att, GearPropType.att_rate
),
"xenon": (
GearPropType.LUK, GearPropType.LUK_rate,
GearPropType.DEX, GearPropType.DEX_rate,
GearPropType.STR, GearPropType.STR_rate,
GearPropType.att, GearPropType.att_rate
),
}[jobtype]
| 44.948598
| 118
| 0.58478
|
c4826366922e9e388c78ffab1f2c206d845740f9
| 6,229
|
py
|
Python
|
contrib/btorcheckmodel.py
|
zyedidia/boolector
|
f1abe139025ca2bda442d094237af66aa6eb7cee
|
[
"MIT"
] | 209
|
2018-05-22T06:52:46.000Z
|
2022-03-19T11:13:22.000Z
|
contrib/btorcheckmodel.py
|
zyedidia/boolector
|
f1abe139025ca2bda442d094237af66aa6eb7cee
|
[
"MIT"
] | 166
|
2018-05-24T06:39:07.000Z
|
2022-03-14T22:56:03.000Z
|
contrib/btorcheckmodel.py
|
zyedidia/boolector
|
f1abe139025ca2bda442d094237af66aa6eb7cee
|
[
"MIT"
] | 48
|
2018-05-23T10:01:31.000Z
|
2022-01-25T09:56:37.000Z
|
#!/usr/bin/env python
# Boolector: Satisfiablity Modulo Theories (SMT) solver.
#
# Copyright (C) 2007-2021 by the authors listed in the AUTHORS file.
#
# This file is part of Boolector.
# See COPYING for more information on using this software.
#
import sys, os, random, signal
def dec2bin(dec, len):
binary = bin(dec).split('b')[1]
evalstring= "%0" + str(len) + "d"
return evalstring % int(binary)
if len(sys.argv) != 4:
print("Usage: ./btorcheckmodel <btor-file> <btor-output-model-file> <boolector-binary>")
sys.exit(2)
pid = os.getpid();
foutname = "/tmp/btorcheckmodel" + str(pid) +".btor"
# get absolute path to boolector binary
boolector = sys.argv[3]
def cleanup():
try:
os.remove(foutname)
except:
pass # ignore exception
def signalhandler(signum, frame):
cleanup()
sys.exit(0)
signal.signal(signal.SIGINT, signalhandler)
signal.signal(signal.SIGTERM, signalhandler)
signal.signal(signal.SIGHUP, signalhandler)
fout = open (foutname, "w")
arrays = {}
arrayass = {}
constants = {}
foundroot=False
with open (sys.argv[1], "r") as fin:
for origline in fin:
line = origline.strip()
words = line.split()
if words[1] == "root":
if foundroot:
print("Multiple roots are not supported")
sys.exit(2)
foundroot=True
id = int(words[0])
rootarg = words[3]
else:
if words[1] == "array":
if int(words[3]) > 8:
print("Arrays with index len > 8 are not supported")
sys.exit(2)
arrays[words[0]]=words[2] + " " + words[3]
arrayass[words[0]]={}
if words[2] not in constants:
randnr = random.randint(0, (2 ** int(words[2])) - 1)
constants[words[2]]=dec2bin (randnr, int(words[2]))
fout.write (origline)
if foundroot == False:
print("Root is missing")
sys.exit(2)
fout.write (str(id) + " const 1 1\n")
lastand = id
id = id + 1
with open (sys.argv[2], "r") as fin:
origline = fin.readline()
if origline.strip() != "sat":
print("Formula is not SAT")
sys.exit(2)
for origline in fin:
line = origline.strip()
words = line.split()
opos = words[0].find("[")
if opos == -1:
#bv model
modelid= words[0]
#check if modelid is really an integer or an identifier
try:
temp = int(modelid)
except:
print("invalid identifier")
sys.exit(2)
## modelid is an identifier, we have to get the id
#ret = os.popen ("grep \"\<" + modelid + "\>\" " + sys.argv[1] + \
# "| awk '{print $1}'")
#lines = ret.readlines()
#if len(lines) > 1:
# print "BV identifier is not unique"
# sys.exit(1)
#if len(lines) == 0:
# print "Cannot find BV identifier"
# sys.exit(1)
#modelid = lines[0].strip()
ass = words[1]
assl = len(ass)
randnr = random.randint (0, 1)
ass = ass.replace("x", str(randnr))
fout.write (str(id) + " const " + str(assl) + " " + ass + "\n")
lastid = str(id)
id = id + 1
fout.write (str(id) + " eq 1 " + modelid + " " + lastid + "\n")
lastid = str(id)
id = id + 1
fout.write (str(id) + " and 1 " + str(lastand) + " " + lastid + "\n")
lastand = id
id = id + 1
else:
cpos = words[0].find("]")
if cpos == -1:
print("Invalid format of model file")
sys.exit(2)
aid=line[0:opos]
#check if array id is really an integer or an identifier
try:
temp = int(aid)
except:
print("invalid identifier")
sys.exit(2)
## aid is an identifier, we have to get the id
#ret = os.popen ("grep \"\<" + aid + "\>\" " + sys.argv[1] + \
# "| awk '{print $1}'")
#lines = ret.readlines()
#if len(lines) > 1:
# print "Array identifier is not unique"
# sys.exit(1)
#if len(lines) == 0:
# print "Cannot find array identifier"
# sys.exit(1)
#aid = lines[0].strip()
iass=line[opos + 1:cpos]
iassl=len(iass)
if iass.find("x") != -1:
print("Unexpected index assignment")
sys.exit(2)
vass=words[1]
vassl=len(vass)
if vass.find("x") != -1:
print("Unexpected value assignment")
sys.exit(2)
fout.write(str(id) + " const " + str(iassl) + " " + iass + "\n")
iid = lastid = str(id)
id = id + 1
fout.write(str(id) + " const " + str(vassl) + " " + vass + "\n")
vid = lastid = str(id)
id = id + 1
fout.write(str(id) + " read " + str(vassl) + " " + aid + " " + iid + "\n")
lastid = str(id)
id = id + 1
fout.write(str(id) + " eq 1 " + vid + " " + lastid + "\n")
lastid = str(id)
id = id + 1
fout.write (str(id) + " and 1 " + str(lastand) + " " + lastid + "\n")
lastand = id
id = id + 1
# remember assignment
arrayass[aid][iass] = vass
for key in arrays:
words = arrays[key].split()
vlen = words[0]
ilen = words[1]
looprange = range (0, 2 ** int(ilen))
ass = arrayass[key]
constant = constants[vlen]
for i in looprange:
binary = dec2bin (i, ilen)
if binary not in ass:
fout.write(str(id) + " const " + str(ilen) + " " + binary + "\n")
iid = lastid = str(id)
id = id + 1
fout.write(str(id) + " const " + str(vlen) + " " + constant + "\n")
vid = lastid = str(id)
id = id + 1
fout.write(str(id) + " read " + str(vlen) + " " + key + " " + iid + "\n")
lastid = str(id)
id = id + 1
fout.write(str(id) + " eq 1 " + vid + " " + lastid + "\n")
lastid = str(id)
id = id + 1
fout.write (str(id) + " and 1 " + str(lastand) + " " + lastid + "\n")
lastand = id
id = id + 1
fout.write(str(id) + " implies 1 " + str(lastand) + " " + rootarg + "\n")
lastid = id
id = id + 1
fout.write(str(id) + " root 1 -" + str(lastid) + "\n")
fout.close()
ret = os.popen (boolector + " -rwl 0 " + foutname)
result = ret.readline().strip()
if result == "sat":
print("Invalid")
sys.exit(1)
elif result != "unsat":
print("Unexpected result")
sys.exit(2)
cleanup()
print("Valid")
sys.exit(0)
| 28.18552
| 90
| 0.533794
|
3017db53ac8910ac687743788813f6d14d181a87
| 6,695
|
py
|
Python
|
ui/command_line.py
|
hkkwok/MachOTool
|
469c0fd06199356fcc6d68809c7ba15a12eac1fd
|
[
"Apache-2.0"
] | 12
|
2016-01-08T22:35:14.000Z
|
2019-07-29T11:50:41.000Z
|
ui/command_line.py
|
uvbs/MachOTool
|
469c0fd06199356fcc6d68809c7ba15a12eac1fd
|
[
"Apache-2.0"
] | 2
|
2015-12-10T21:28:04.000Z
|
2019-10-15T10:05:19.000Z
|
ui/command_line.py
|
uvbs/MachOTool
|
469c0fd06199356fcc6d68809c7ba15a12eac1fd
|
[
"Apache-2.0"
] | 6
|
2016-10-10T05:29:41.000Z
|
2019-10-15T09:59:17.000Z
|
from utils.header import Header
from mach_o.headers.mach_header import MachHeader, MachHeader64
from mach_o.headers.load_command import LoadCommandHeader
from mach_o.non_headers.cstring import Cstring
from mach_o.headers.dylib_command import DylibCommand
class Command(object):
def __init__(self, command, action, desc, flag=None):
self.command = command
self.action = action
self.desc = desc
self.flag = flag
if self.flag is None:
self.flag = '-' + self.command[0]
def match(self, line):
return self.command.startswith(line.strip())
def _get_tokens(self, line):
tokens = line.split()
if len(tokens) > 0:
assert tokens[0].startswith(self.command)
return tokens
def getattr(self):
return self.command.replace('-', '_')
def run(self, line, cli):
tokens = self._get_tokens(line)
if len(tokens) == 0:
return False
method = getattr(cli, self.action)
assert callable(method)
method(*tokens[1:])
class CommandLine(object):
COMMANDS = (
Command('cstring', 'print_cstring', 'print all C strings', '-c'),
Command('fat-header', 'print_fat_header', 'print the fat header', '-f'),
Command('load-command', 'print_load_commands', 'print all load commands', '-l'),
Command('mach-header', 'print_mach_header', 'print all mach headers', '-m'),
Command('raw', 'print_full', 'print the complete structure of the file', '-R'),
Command('shared-library', 'print_shared_libraries', 'print all shared libraries used', '-L'),
Command('shared-library-table', 'print_shared_libraries_table', 'print all shared libraries used', ''),
)
def __init__(self, byte_range):
self.byte_range = byte_range
def run(self, line):
# find all commands that match
matches = list()
for cmd in self.COMMANDS:
if cmd.match(line):
matches.append(cmd)
num_matches = len(matches)
if num_matches != 1:
return matches
else:
# run the only matched command
matches[0].run(line, self)
@classmethod
def configure_parser(cls, parser):
for cmd in cls.COMMANDS:
if cmd.flag is None:
continue
if len(cmd.flag) > 0:
parser.add_argument(cmd.flag, '--' + cmd.command, action='store_true', help=cmd.desc)
else:
parser.add_argument('--' + cmd.command, action='store_true', help=cmd.desc)
def parse_options(self, options):
for cmd in self.COMMANDS:
attr = getattr(options, cmd.getattr())
if attr is True:
cmd.run(cmd.command, self)
def print_full(self):
def format_element(br, start, stop, level):
if level == 0:
return ''
return '%s%d-%d: %s' % (' ' * (level - 1), start, stop, str(br.data))
print '\n'.join(self.byte_range.iterate(format_element))
@staticmethod
def format_header(hdr, trailing_lf=True):
assert isinstance(hdr, Header)
output = hdr.name + '\n '
output += '\n '.join(hdr.get_fields_repr(': '))
if trailing_lf:
output += '\n'
return output
@staticmethod
def _list_remove_all(str_list, pattern):
return filter(lambda x: x != pattern, str_list)
@staticmethod
def _list_remove_empty(str_list):
return CommandLine._list_remove_all(str_list, '')
@staticmethod
def _list_remove_none(list_):
return CommandLine._list_remove_all(list_, None)
def print_mach_header(self):
def format_mach_header(br, start, stop, level):
assert start is not None and stop is not None and level is not None # get rid of pycharm warning
if not isinstance(br.data, (MachHeader, MachHeader64)):
return ''
return self.format_header(br.data)
lines = self.byte_range.iterate(format_mach_header)
lines = self._list_remove_empty(lines)
print '\n'.join(lines)
def print_load_commands(self):
def format_load_command(br, start, stop, level):
assert start is not None and stop is not None and level is not None # get rid of pycharm warning
if not isinstance(br.data, LoadCommandHeader):
return ''
return self.format_header(br.data)
lines = self.byte_range.iterate(format_load_command)
lines = self._list_remove_empty(lines)
print '\n'.join(lines)
print '\n%d load commands' % len(lines)
def print_cstring(self):
def format_cstring(br, start, stop, level):
assert start is not None and stop is not None and level is not None # get rid of pycharm warning
if not isinstance(br.data, Cstring):
return ''
return br.data.string
lines = self.byte_range.iterate(format_cstring)
lines = self._list_remove_empty(lines)
n = 1
for line in lines:
print '%d: %s' % (n, line)
n += 1
def _get_shared_libraries(self):
def filter_dylib_command(br, start, stop, level):
assert start is not None and stop is not None and level is not None # get rid of pycharm warning
if not isinstance(br.data, DylibCommand):
return None
parent_br = br.parent
assert parent_br is not None
assert len(parent_br.subranges) in (2, 3) # 3rd subrange is for optional alignment padding
return br.data, parent_br.subranges[1].data
subranges = self.byte_range.iterate(filter_dylib_command)
dylib_commands = self._list_remove_none(subranges)
return dylib_commands
def print_shared_libraries(self):
for (dylib_command, lc_str) in self._get_shared_libraries():
print self.format_header(dylib_command, trailing_lf=False)
print ' %s: %s\n' % (lc_str.desc, lc_str.value)
def print_shared_libraries_table(self):
print 'timestamp current compatib. name'
print '------------------- ---------- ---------- -------------------------'
for (dylib_command, lc_str) in self._get_shared_libraries():
print '%18s %10s %10s %s' % (
DylibCommand.FIELDS[3].display(dylib_command),
DylibCommand.FIELDS[4].display(dylib_command),
DylibCommand.FIELDS[5].display(dylib_command),
lc_str.value
)
def print_symbol_table(self):
pass
| 38.477011
| 111
| 0.603286
|
dc24a4813fc2346431758022af1f79d4bfd0d09c
| 651
|
py
|
Python
|
scieio/biotechnology/serializers.py
|
arnelimperial/scieio
|
279a25766f20d074a3df824c0fbc8b2d8e35f272
|
[
"MIT"
] | null | null | null |
scieio/biotechnology/serializers.py
|
arnelimperial/scieio
|
279a25766f20d074a3df824c0fbc8b2d8e35f272
|
[
"MIT"
] | 8
|
2021-03-19T01:56:44.000Z
|
2022-03-12T00:24:21.000Z
|
scieio/biotechnology/serializers.py
|
arnelimperial/scieio
|
279a25766f20d074a3df824c0fbc8b2d8e35f272
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from scieio.biotechnology import models
class BioTechSerializer(serializers.ModelSerializer):
class Meta:
model = models.BioTech
fields = (
'id',
'life_science',
'name',
'slug',
'description',
'product_code',
'model',
'condition',
'warranty',
'seller',
'manufacturer',
'image',
'availability',
'price',
'created',
'name',
'slug',
'created',
'updated'
)
| 21.7
| 53
| 0.44086
|
789fcec2cefb69de6f17e9747c2df7d7414a6871
| 48,487
|
py
|
Python
|
seaborn/tests/test_matrix.py
|
bdice/seaborn
|
29f5807efdf0544b16c088beac7c1c5eda174084
|
[
"BSD-3-Clause"
] | null | null | null |
seaborn/tests/test_matrix.py
|
bdice/seaborn
|
29f5807efdf0544b16c088beac7c1c5eda174084
|
[
"BSD-3-Clause"
] | null | null | null |
seaborn/tests/test_matrix.py
|
bdice/seaborn
|
29f5807efdf0544b16c088beac7c1c5eda174084
|
[
"BSD-3-Clause"
] | null | null | null |
import itertools
import tempfile
import copy
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
from scipy.spatial import distance
from scipy.cluster import hierarchy
import nose.tools as nt
import numpy.testing as npt
try:
import pandas.testing as pdt
except ImportError:
import pandas.util.testing as pdt
import pytest
from .. import matrix as mat
from .. import color_palette
try:
import fastcluster
assert fastcluster
_no_fastcluster = False
except ImportError:
_no_fastcluster = True
class TestHeatmap:
rs = np.random.RandomState(sum(map(ord, "heatmap")))
x_norm = rs.randn(4, 8)
letters = pd.Series(["A", "B", "C", "D"], name="letters")
df_norm = pd.DataFrame(x_norm, index=letters)
x_unif = rs.rand(20, 13)
df_unif = pd.DataFrame(x_unif)
default_kws = dict(vmin=None, vmax=None, cmap=None, center=None,
robust=False, annot=False, fmt=".2f", annot_kws=None,
cbar=True, cbar_kws=None, mask=None)
def test_ndarray_input(self):
p = mat._HeatMapper(self.x_norm, **self.default_kws)
npt.assert_array_equal(p.plot_data, self.x_norm)
pdt.assert_frame_equal(p.data, pd.DataFrame(self.x_norm))
npt.assert_array_equal(p.xticklabels, np.arange(8))
npt.assert_array_equal(p.yticklabels, np.arange(4))
nt.assert_equal(p.xlabel, "")
nt.assert_equal(p.ylabel, "")
def test_df_input(self):
p = mat._HeatMapper(self.df_norm, **self.default_kws)
npt.assert_array_equal(p.plot_data, self.x_norm)
pdt.assert_frame_equal(p.data, self.df_norm)
npt.assert_array_equal(p.xticklabels, np.arange(8))
npt.assert_array_equal(p.yticklabels, self.letters.values)
nt.assert_equal(p.xlabel, "")
nt.assert_equal(p.ylabel, "letters")
def test_df_multindex_input(self):
df = self.df_norm.copy()
index = pd.MultiIndex.from_tuples([("A", 1), ("B", 2),
("C", 3), ("D", 4)],
names=["letter", "number"])
index.name = "letter-number"
df.index = index
p = mat._HeatMapper(df, **self.default_kws)
combined_tick_labels = ["A-1", "B-2", "C-3", "D-4"]
npt.assert_array_equal(p.yticklabels, combined_tick_labels)
nt.assert_equal(p.ylabel, "letter-number")
p = mat._HeatMapper(df.T, **self.default_kws)
npt.assert_array_equal(p.xticklabels, combined_tick_labels)
nt.assert_equal(p.xlabel, "letter-number")
@pytest.mark.parametrize("dtype", [float, np.int64, object])
def test_mask_input(self, dtype):
kws = self.default_kws.copy()
mask = self.x_norm > 0
kws['mask'] = mask
data = self.x_norm.astype(dtype)
p = mat._HeatMapper(data, **kws)
plot_data = np.ma.masked_where(mask, data)
npt.assert_array_equal(p.plot_data, plot_data)
def test_mask_limits(self):
"""Make sure masked cells are not used to calculate extremes"""
kws = self.default_kws.copy()
mask = self.x_norm > 0
kws['mask'] = mask
p = mat._HeatMapper(self.x_norm, **kws)
assert p.vmax == np.ma.array(self.x_norm, mask=mask).max()
assert p.vmin == np.ma.array(self.x_norm, mask=mask).min()
mask = self.x_norm < 0
kws['mask'] = mask
p = mat._HeatMapper(self.x_norm, **kws)
assert p.vmin == np.ma.array(self.x_norm, mask=mask).min()
assert p.vmax == np.ma.array(self.x_norm, mask=mask).max()
def test_default_vlims(self):
p = mat._HeatMapper(self.df_unif, **self.default_kws)
nt.assert_equal(p.vmin, self.x_unif.min())
nt.assert_equal(p.vmax, self.x_unif.max())
def test_robust_vlims(self):
kws = self.default_kws.copy()
kws["robust"] = True
p = mat._HeatMapper(self.df_unif, **kws)
nt.assert_equal(p.vmin, np.percentile(self.x_unif, 2))
nt.assert_equal(p.vmax, np.percentile(self.x_unif, 98))
def test_custom_sequential_vlims(self):
kws = self.default_kws.copy()
kws["vmin"] = 0
kws["vmax"] = 1
p = mat._HeatMapper(self.df_unif, **kws)
nt.assert_equal(p.vmin, 0)
nt.assert_equal(p.vmax, 1)
def test_custom_diverging_vlims(self):
kws = self.default_kws.copy()
kws["vmin"] = -4
kws["vmax"] = 5
kws["center"] = 0
p = mat._HeatMapper(self.df_norm, **kws)
nt.assert_equal(p.vmin, -4)
nt.assert_equal(p.vmax, 5)
def test_array_with_nans(self):
x1 = self.rs.rand(10, 10)
nulls = np.zeros(10) * np.nan
x2 = np.c_[x1, nulls]
m1 = mat._HeatMapper(x1, **self.default_kws)
m2 = mat._HeatMapper(x2, **self.default_kws)
nt.assert_equal(m1.vmin, m2.vmin)
nt.assert_equal(m1.vmax, m2.vmax)
def test_mask(self):
df = pd.DataFrame(data={'a': [1, 1, 1],
'b': [2, np.nan, 2],
'c': [3, 3, np.nan]})
kws = self.default_kws.copy()
kws["mask"] = np.isnan(df.values)
m = mat._HeatMapper(df, **kws)
npt.assert_array_equal(np.isnan(m.plot_data.data),
m.plot_data.mask)
def test_custom_cmap(self):
kws = self.default_kws.copy()
kws["cmap"] = "BuGn"
p = mat._HeatMapper(self.df_unif, **kws)
nt.assert_equal(p.cmap, mpl.cm.BuGn)
def test_centered_vlims(self):
kws = self.default_kws.copy()
kws["center"] = .5
p = mat._HeatMapper(self.df_unif, **kws)
nt.assert_equal(p.vmin, self.df_unif.values.min())
nt.assert_equal(p.vmax, self.df_unif.values.max())
def test_default_colors(self):
vals = np.linspace(.2, 1, 9)
cmap = mpl.cm.binary
ax = mat.heatmap([vals], cmap=cmap)
fc = ax.collections[0].get_facecolors()
cvals = np.linspace(0, 1, 9)
npt.assert_array_almost_equal(fc, cmap(cvals), 2)
def test_custom_vlim_colors(self):
vals = np.linspace(.2, 1, 9)
cmap = mpl.cm.binary
ax = mat.heatmap([vals], vmin=0, cmap=cmap)
fc = ax.collections[0].get_facecolors()
npt.assert_array_almost_equal(fc, cmap(vals), 2)
def test_custom_center_colors(self):
vals = np.linspace(.2, 1, 9)
cmap = mpl.cm.binary
ax = mat.heatmap([vals], center=.5, cmap=cmap)
fc = ax.collections[0].get_facecolors()
npt.assert_array_almost_equal(fc, cmap(vals), 2)
def test_cmap_with_properties(self):
kws = self.default_kws.copy()
cmap = copy.copy(mpl.cm.get_cmap("BrBG"))
cmap.set_bad("red")
kws["cmap"] = cmap
hm = mat._HeatMapper(self.df_unif, **kws)
npt.assert_array_equal(
cmap(np.ma.masked_invalid([np.nan])),
hm.cmap(np.ma.masked_invalid([np.nan])))
kws["center"] = 0.5
hm = mat._HeatMapper(self.df_unif, **kws)
npt.assert_array_equal(
cmap(np.ma.masked_invalid([np.nan])),
hm.cmap(np.ma.masked_invalid([np.nan])))
kws = self.default_kws.copy()
cmap = copy.copy(mpl.cm.get_cmap("BrBG"))
cmap.set_under("red")
kws["cmap"] = cmap
hm = mat._HeatMapper(self.df_unif, **kws)
npt.assert_array_equal(cmap(-np.inf), hm.cmap(-np.inf))
kws["center"] = .5
hm = mat._HeatMapper(self.df_unif, **kws)
npt.assert_array_equal(cmap(-np.inf), hm.cmap(-np.inf))
kws = self.default_kws.copy()
cmap = copy.copy(mpl.cm.get_cmap("BrBG"))
cmap.set_over("red")
kws["cmap"] = cmap
hm = mat._HeatMapper(self.df_unif, **kws)
npt.assert_array_equal(cmap(-np.inf), hm.cmap(-np.inf))
kws["center"] = .5
hm = mat._HeatMapper(self.df_unif, **kws)
npt.assert_array_equal(cmap(np.inf), hm.cmap(np.inf))
def test_tickabels_off(self):
kws = self.default_kws.copy()
kws['xticklabels'] = False
kws['yticklabels'] = False
p = mat._HeatMapper(self.df_norm, **kws)
nt.assert_equal(p.xticklabels, [])
nt.assert_equal(p.yticklabels, [])
def test_custom_ticklabels(self):
kws = self.default_kws.copy()
xticklabels = list('iheartheatmaps'[:self.df_norm.shape[1]])
yticklabels = list('heatmapsarecool'[:self.df_norm.shape[0]])
kws['xticklabels'] = xticklabels
kws['yticklabels'] = yticklabels
p = mat._HeatMapper(self.df_norm, **kws)
nt.assert_equal(p.xticklabels, xticklabels)
nt.assert_equal(p.yticklabels, yticklabels)
def test_custom_ticklabel_interval(self):
kws = self.default_kws.copy()
xstep, ystep = 2, 3
kws['xticklabels'] = xstep
kws['yticklabels'] = ystep
p = mat._HeatMapper(self.df_norm, **kws)
nx, ny = self.df_norm.T.shape
npt.assert_array_equal(p.xticks, np.arange(0, nx, xstep) + .5)
npt.assert_array_equal(p.yticks, np.arange(0, ny, ystep) + .5)
npt.assert_array_equal(p.xticklabels,
self.df_norm.columns[0:nx:xstep])
npt.assert_array_equal(p.yticklabels,
self.df_norm.index[0:ny:ystep])
def test_heatmap_annotation(self):
ax = mat.heatmap(self.df_norm, annot=True, fmt=".1f",
annot_kws={"fontsize": 14})
for val, text in zip(self.x_norm.flat, ax.texts):
nt.assert_equal(text.get_text(), "{:.1f}".format(val))
nt.assert_equal(text.get_fontsize(), 14)
def test_heatmap_annotation_overwrite_kws(self):
annot_kws = dict(color="0.3", va="bottom", ha="left")
ax = mat.heatmap(self.df_norm, annot=True, fmt=".1f",
annot_kws=annot_kws)
for text in ax.texts:
nt.assert_equal(text.get_color(), "0.3")
nt.assert_equal(text.get_ha(), "left")
nt.assert_equal(text.get_va(), "bottom")
def test_heatmap_annotation_with_mask(self):
df = pd.DataFrame(data={'a': [1, 1, 1],
'b': [2, np.nan, 2],
'c': [3, 3, np.nan]})
mask = np.isnan(df.values)
df_masked = np.ma.masked_where(mask, df)
ax = mat.heatmap(df, annot=True, fmt='.1f', mask=mask)
nt.assert_equal(len(df_masked.compressed()), len(ax.texts))
for val, text in zip(df_masked.compressed(), ax.texts):
nt.assert_equal("{:.1f}".format(val), text.get_text())
def test_heatmap_annotation_mesh_colors(self):
ax = mat.heatmap(self.df_norm, annot=True)
mesh = ax.collections[0]
nt.assert_equal(len(mesh.get_facecolors()), self.df_norm.values.size)
plt.close("all")
def test_heatmap_annotation_other_data(self):
annot_data = self.df_norm + 10
ax = mat.heatmap(self.df_norm, annot=annot_data, fmt=".1f",
annot_kws={"fontsize": 14})
for val, text in zip(annot_data.values.flat, ax.texts):
nt.assert_equal(text.get_text(), "{:.1f}".format(val))
nt.assert_equal(text.get_fontsize(), 14)
def test_heatmap_annotation_with_limited_ticklabels(self):
ax = mat.heatmap(self.df_norm, fmt=".2f", annot=True,
xticklabels=False, yticklabels=False)
for val, text in zip(self.x_norm.flat, ax.texts):
nt.assert_equal(text.get_text(), "{:.2f}".format(val))
def test_heatmap_cbar(self):
f = plt.figure()
mat.heatmap(self.df_norm)
nt.assert_equal(len(f.axes), 2)
plt.close(f)
f = plt.figure()
mat.heatmap(self.df_norm, cbar=False)
nt.assert_equal(len(f.axes), 1)
plt.close(f)
f, (ax1, ax2) = plt.subplots(2)
mat.heatmap(self.df_norm, ax=ax1, cbar_ax=ax2)
nt.assert_equal(len(f.axes), 2)
plt.close(f)
@pytest.mark.xfail(mpl.__version__ == "3.1.1",
reason="matplotlib 3.1.1 bug")
def test_heatmap_axes(self):
ax = mat.heatmap(self.df_norm)
xtl = [int(l.get_text()) for l in ax.get_xticklabels()]
nt.assert_equal(xtl, list(self.df_norm.columns))
ytl = [l.get_text() for l in ax.get_yticklabels()]
nt.assert_equal(ytl, list(self.df_norm.index))
nt.assert_equal(ax.get_xlabel(), "")
nt.assert_equal(ax.get_ylabel(), "letters")
nt.assert_equal(ax.get_xlim(), (0, 8))
nt.assert_equal(ax.get_ylim(), (4, 0))
def test_heatmap_ticklabel_rotation(self):
f, ax = plt.subplots(figsize=(2, 2))
mat.heatmap(self.df_norm, xticklabels=1, yticklabels=1, ax=ax)
for t in ax.get_xticklabels():
nt.assert_equal(t.get_rotation(), 0)
for t in ax.get_yticklabels():
nt.assert_equal(t.get_rotation(), 90)
plt.close(f)
df = self.df_norm.copy()
df.columns = [str(c) * 10 for c in df.columns]
df.index = [i * 10 for i in df.index]
f, ax = plt.subplots(figsize=(2, 2))
mat.heatmap(df, xticklabels=1, yticklabels=1, ax=ax)
for t in ax.get_xticklabels():
nt.assert_equal(t.get_rotation(), 90)
for t in ax.get_yticklabels():
nt.assert_equal(t.get_rotation(), 0)
plt.close(f)
def test_heatmap_inner_lines(self):
c = (0, 0, 1, 1)
ax = mat.heatmap(self.df_norm, linewidths=2, linecolor=c)
mesh = ax.collections[0]
nt.assert_equal(mesh.get_linewidths()[0], 2)
nt.assert_equal(tuple(mesh.get_edgecolor()[0]), c)
def test_square_aspect(self):
ax = mat.heatmap(self.df_norm, square=True)
obs_aspect = ax.get_aspect()
# mpl>3.3 returns 1 for setting "equal" aspect
# so test for the two possible equal outcomes
assert obs_aspect == "equal" or obs_aspect == 1
def test_mask_validation(self):
mask = mat._matrix_mask(self.df_norm, None)
nt.assert_equal(mask.shape, self.df_norm.shape)
nt.assert_equal(mask.values.sum(), 0)
with nt.assert_raises(ValueError):
bad_array_mask = self.rs.randn(3, 6) > 0
mat._matrix_mask(self.df_norm, bad_array_mask)
with nt.assert_raises(ValueError):
bad_df_mask = pd.DataFrame(self.rs.randn(4, 8) > 0)
mat._matrix_mask(self.df_norm, bad_df_mask)
def test_missing_data_mask(self):
data = pd.DataFrame(np.arange(4, dtype=float).reshape(2, 2))
data.loc[0, 0] = np.nan
mask = mat._matrix_mask(data, None)
npt.assert_array_equal(mask, [[True, False], [False, False]])
mask_in = np.array([[False, True], [False, False]])
mask_out = mat._matrix_mask(data, mask_in)
npt.assert_array_equal(mask_out, [[True, True], [False, False]])
def test_cbar_ticks(self):
f, (ax1, ax2) = plt.subplots(2)
mat.heatmap(self.df_norm, ax=ax1, cbar_ax=ax2,
cbar_kws=dict(drawedges=True))
assert len(ax2.collections) == 2
class TestDendrogram:
rs = np.random.RandomState(sum(map(ord, "dendrogram")))
x_norm = rs.randn(4, 8) + np.arange(8)
x_norm = (x_norm.T + np.arange(4)).T
letters = pd.Series(["A", "B", "C", "D", "E", "F", "G", "H"],
name="letters")
df_norm = pd.DataFrame(x_norm, columns=letters)
try:
import fastcluster
x_norm_linkage = fastcluster.linkage_vector(x_norm.T,
metric='euclidean',
method='single')
except ImportError:
x_norm_distances = distance.pdist(x_norm.T, metric='euclidean')
x_norm_linkage = hierarchy.linkage(x_norm_distances, method='single')
x_norm_dendrogram = hierarchy.dendrogram(x_norm_linkage, no_plot=True,
color_threshold=-np.inf)
x_norm_leaves = x_norm_dendrogram['leaves']
df_norm_leaves = np.asarray(df_norm.columns[x_norm_leaves])
default_kws = dict(linkage=None, metric='euclidean', method='single',
axis=1, label=True, rotate=False)
def test_ndarray_input(self):
p = mat._DendrogramPlotter(self.x_norm, **self.default_kws)
npt.assert_array_equal(p.array.T, self.x_norm)
pdt.assert_frame_equal(p.data.T, pd.DataFrame(self.x_norm))
npt.assert_array_equal(p.linkage, self.x_norm_linkage)
nt.assert_dict_equal(p.dendrogram, self.x_norm_dendrogram)
npt.assert_array_equal(p.reordered_ind, self.x_norm_leaves)
npt.assert_array_equal(p.xticklabels, self.x_norm_leaves)
npt.assert_array_equal(p.yticklabels, [])
nt.assert_equal(p.xlabel, None)
nt.assert_equal(p.ylabel, '')
def test_df_input(self):
p = mat._DendrogramPlotter(self.df_norm, **self.default_kws)
npt.assert_array_equal(p.array.T, np.asarray(self.df_norm))
pdt.assert_frame_equal(p.data.T, self.df_norm)
npt.assert_array_equal(p.linkage, self.x_norm_linkage)
nt.assert_dict_equal(p.dendrogram, self.x_norm_dendrogram)
npt.assert_array_equal(p.xticklabels,
np.asarray(self.df_norm.columns)[
self.x_norm_leaves])
npt.assert_array_equal(p.yticklabels, [])
nt.assert_equal(p.xlabel, 'letters')
nt.assert_equal(p.ylabel, '')
def test_df_multindex_input(self):
df = self.df_norm.copy()
index = pd.MultiIndex.from_tuples([("A", 1), ("B", 2),
("C", 3), ("D", 4)],
names=["letter", "number"])
index.name = "letter-number"
df.index = index
kws = self.default_kws.copy()
kws['label'] = True
p = mat._DendrogramPlotter(df.T, **kws)
xticklabels = ["A-1", "B-2", "C-3", "D-4"]
xticklabels = [xticklabels[i] for i in p.reordered_ind]
npt.assert_array_equal(p.xticklabels, xticklabels)
npt.assert_array_equal(p.yticklabels, [])
nt.assert_equal(p.xlabel, "letter-number")
def test_axis0_input(self):
kws = self.default_kws.copy()
kws['axis'] = 0
p = mat._DendrogramPlotter(self.df_norm.T, **kws)
npt.assert_array_equal(p.array, np.asarray(self.df_norm.T))
pdt.assert_frame_equal(p.data, self.df_norm.T)
npt.assert_array_equal(p.linkage, self.x_norm_linkage)
nt.assert_dict_equal(p.dendrogram, self.x_norm_dendrogram)
npt.assert_array_equal(p.xticklabels, self.df_norm_leaves)
npt.assert_array_equal(p.yticklabels, [])
nt.assert_equal(p.xlabel, 'letters')
nt.assert_equal(p.ylabel, '')
def test_rotate_input(self):
kws = self.default_kws.copy()
kws['rotate'] = True
p = mat._DendrogramPlotter(self.df_norm, **kws)
npt.assert_array_equal(p.array.T, np.asarray(self.df_norm))
pdt.assert_frame_equal(p.data.T, self.df_norm)
npt.assert_array_equal(p.xticklabels, [])
npt.assert_array_equal(p.yticklabels, self.df_norm_leaves)
nt.assert_equal(p.xlabel, '')
nt.assert_equal(p.ylabel, 'letters')
def test_rotate_axis0_input(self):
kws = self.default_kws.copy()
kws['rotate'] = True
kws['axis'] = 0
p = mat._DendrogramPlotter(self.df_norm.T, **kws)
npt.assert_array_equal(p.reordered_ind, self.x_norm_leaves)
def test_custom_linkage(self):
kws = self.default_kws.copy()
try:
import fastcluster
linkage = fastcluster.linkage_vector(self.x_norm, method='single',
metric='euclidean')
except ImportError:
d = distance.pdist(self.x_norm, metric='euclidean')
linkage = hierarchy.linkage(d, method='single')
dendrogram = hierarchy.dendrogram(linkage, no_plot=True,
color_threshold=-np.inf)
kws['linkage'] = linkage
p = mat._DendrogramPlotter(self.df_norm, **kws)
npt.assert_array_equal(p.linkage, linkage)
nt.assert_dict_equal(p.dendrogram, dendrogram)
def test_label_false(self):
kws = self.default_kws.copy()
kws['label'] = False
p = mat._DendrogramPlotter(self.df_norm, **kws)
nt.assert_equal(p.xticks, [])
nt.assert_equal(p.yticks, [])
nt.assert_equal(p.xticklabels, [])
nt.assert_equal(p.yticklabels, [])
nt.assert_equal(p.xlabel, "")
nt.assert_equal(p.ylabel, "")
def test_linkage_scipy(self):
p = mat._DendrogramPlotter(self.x_norm, **self.default_kws)
scipy_linkage = p._calculate_linkage_scipy()
from scipy.spatial import distance
from scipy.cluster import hierarchy
dists = distance.pdist(self.x_norm.T,
metric=self.default_kws['metric'])
linkage = hierarchy.linkage(dists, method=self.default_kws['method'])
npt.assert_array_equal(scipy_linkage, linkage)
@pytest.mark.skipif(_no_fastcluster, reason="fastcluster not installed")
def test_fastcluster_other_method(self):
import fastcluster
kws = self.default_kws.copy()
kws['method'] = 'average'
linkage = fastcluster.linkage(self.x_norm.T, method='average',
metric='euclidean')
p = mat._DendrogramPlotter(self.x_norm, **kws)
npt.assert_array_equal(p.linkage, linkage)
@pytest.mark.skipif(_no_fastcluster, reason="fastcluster not installed")
def test_fastcluster_non_euclidean(self):
import fastcluster
kws = self.default_kws.copy()
kws['metric'] = 'cosine'
kws['method'] = 'average'
linkage = fastcluster.linkage(self.x_norm.T, method=kws['method'],
metric=kws['metric'])
p = mat._DendrogramPlotter(self.x_norm, **kws)
npt.assert_array_equal(p.linkage, linkage)
def test_dendrogram_plot(self):
d = mat.dendrogram(self.x_norm, **self.default_kws)
ax = plt.gca()
xlim = ax.get_xlim()
# 10 comes from _plot_dendrogram in scipy.cluster.hierarchy
xmax = len(d.reordered_ind) * 10
nt.assert_equal(xlim[0], 0)
nt.assert_equal(xlim[1], xmax)
nt.assert_equal(len(ax.collections[0].get_paths()),
len(d.dependent_coord))
@pytest.mark.xfail(mpl.__version__ == "3.1.1",
reason="matplotlib 3.1.1 bug")
def test_dendrogram_rotate(self):
kws = self.default_kws.copy()
kws['rotate'] = True
d = mat.dendrogram(self.x_norm, **kws)
ax = plt.gca()
ylim = ax.get_ylim()
# 10 comes from _plot_dendrogram in scipy.cluster.hierarchy
ymax = len(d.reordered_ind) * 10
# Since y axis is inverted, ylim is (80, 0)
# and therefore not (0, 80) as usual:
nt.assert_equal(ylim[1], 0)
nt.assert_equal(ylim[0], ymax)
def test_dendrogram_ticklabel_rotation(self):
f, ax = plt.subplots(figsize=(2, 2))
mat.dendrogram(self.df_norm, ax=ax)
for t in ax.get_xticklabels():
nt.assert_equal(t.get_rotation(), 0)
plt.close(f)
df = self.df_norm.copy()
df.columns = [str(c) * 10 for c in df.columns]
df.index = [i * 10 for i in df.index]
f, ax = plt.subplots(figsize=(2, 2))
mat.dendrogram(df, ax=ax)
for t in ax.get_xticklabels():
nt.assert_equal(t.get_rotation(), 90)
plt.close(f)
f, ax = plt.subplots(figsize=(2, 2))
mat.dendrogram(df.T, axis=0, rotate=True)
for t in ax.get_yticklabels():
nt.assert_equal(t.get_rotation(), 0)
plt.close(f)
class TestClustermap:
rs = np.random.RandomState(sum(map(ord, "clustermap")))
x_norm = rs.randn(4, 8) + np.arange(8)
x_norm = (x_norm.T + np.arange(4)).T
letters = pd.Series(["A", "B", "C", "D", "E", "F", "G", "H"],
name="letters")
df_norm = pd.DataFrame(x_norm, columns=letters)
try:
import fastcluster
x_norm_linkage = fastcluster.linkage_vector(x_norm.T,
metric='euclidean',
method='single')
except ImportError:
x_norm_distances = distance.pdist(x_norm.T, metric='euclidean')
x_norm_linkage = hierarchy.linkage(x_norm_distances, method='single')
x_norm_dendrogram = hierarchy.dendrogram(x_norm_linkage, no_plot=True,
color_threshold=-np.inf)
x_norm_leaves = x_norm_dendrogram['leaves']
df_norm_leaves = np.asarray(df_norm.columns[x_norm_leaves])
default_kws = dict(pivot_kws=None, z_score=None, standard_scale=None,
figsize=(10, 10), row_colors=None, col_colors=None,
dendrogram_ratio=.2, colors_ratio=.03,
cbar_pos=(0, .8, .05, .2))
default_plot_kws = dict(metric='euclidean', method='average',
colorbar_kws=None,
row_cluster=True, col_cluster=True,
row_linkage=None, col_linkage=None,
tree_kws=None)
row_colors = color_palette('Set2', df_norm.shape[0])
col_colors = color_palette('Dark2', df_norm.shape[1])
def test_ndarray_input(self):
cm = mat.ClusterGrid(self.x_norm, **self.default_kws)
pdt.assert_frame_equal(cm.data, pd.DataFrame(self.x_norm))
nt.assert_equal(len(cm.fig.axes), 4)
nt.assert_equal(cm.ax_row_colors, None)
nt.assert_equal(cm.ax_col_colors, None)
def test_df_input(self):
cm = mat.ClusterGrid(self.df_norm, **self.default_kws)
pdt.assert_frame_equal(cm.data, self.df_norm)
def test_corr_df_input(self):
df = self.df_norm.corr()
cg = mat.ClusterGrid(df, **self.default_kws)
cg.plot(**self.default_plot_kws)
diag = cg.data2d.values[np.diag_indices_from(cg.data2d)]
npt.assert_array_equal(diag, np.ones(cg.data2d.shape[0]))
def test_pivot_input(self):
df_norm = self.df_norm.copy()
df_norm.index.name = 'numbers'
df_long = pd.melt(df_norm.reset_index(), var_name='letters',
id_vars='numbers')
kws = self.default_kws.copy()
kws['pivot_kws'] = dict(index='numbers', columns='letters',
values='value')
cm = mat.ClusterGrid(df_long, **kws)
pdt.assert_frame_equal(cm.data2d, df_norm)
def test_colors_input(self):
kws = self.default_kws.copy()
kws['row_colors'] = self.row_colors
kws['col_colors'] = self.col_colors
cm = mat.ClusterGrid(self.df_norm, **kws)
npt.assert_array_equal(cm.row_colors, self.row_colors)
npt.assert_array_equal(cm.col_colors, self.col_colors)
nt.assert_equal(len(cm.fig.axes), 6)
def test_nested_colors_input(self):
kws = self.default_kws.copy()
row_colors = [self.row_colors, self.row_colors]
col_colors = [self.col_colors, self.col_colors]
kws['row_colors'] = row_colors
kws['col_colors'] = col_colors
cm = mat.ClusterGrid(self.df_norm, **kws)
npt.assert_array_equal(cm.row_colors, row_colors)
npt.assert_array_equal(cm.col_colors, col_colors)
nt.assert_equal(len(cm.fig.axes), 6)
def test_colors_input_custom_cmap(self):
kws = self.default_kws.copy()
kws['cmap'] = mpl.cm.PRGn
kws['row_colors'] = self.row_colors
kws['col_colors'] = self.col_colors
cm = mat.clustermap(self.df_norm, **kws)
npt.assert_array_equal(cm.row_colors, self.row_colors)
npt.assert_array_equal(cm.col_colors, self.col_colors)
nt.assert_equal(len(cm.fig.axes), 6)
def test_z_score(self):
df = self.df_norm.copy()
df = (df - df.mean()) / df.std()
kws = self.default_kws.copy()
kws['z_score'] = 1
cm = mat.ClusterGrid(self.df_norm, **kws)
pdt.assert_frame_equal(cm.data2d, df)
def test_z_score_axis0(self):
df = self.df_norm.copy()
df = df.T
df = (df - df.mean()) / df.std()
df = df.T
kws = self.default_kws.copy()
kws['z_score'] = 0
cm = mat.ClusterGrid(self.df_norm, **kws)
pdt.assert_frame_equal(cm.data2d, df)
def test_standard_scale(self):
df = self.df_norm.copy()
df = (df - df.min()) / (df.max() - df.min())
kws = self.default_kws.copy()
kws['standard_scale'] = 1
cm = mat.ClusterGrid(self.df_norm, **kws)
pdt.assert_frame_equal(cm.data2d, df)
def test_standard_scale_axis0(self):
df = self.df_norm.copy()
df = df.T
df = (df - df.min()) / (df.max() - df.min())
df = df.T
kws = self.default_kws.copy()
kws['standard_scale'] = 0
cm = mat.ClusterGrid(self.df_norm, **kws)
pdt.assert_frame_equal(cm.data2d, df)
def test_z_score_standard_scale(self):
kws = self.default_kws.copy()
kws['z_score'] = True
kws['standard_scale'] = True
with nt.assert_raises(ValueError):
mat.ClusterGrid(self.df_norm, **kws)
def test_color_list_to_matrix_and_cmap(self):
matrix, cmap = mat.ClusterGrid.color_list_to_matrix_and_cmap(
self.col_colors, self.x_norm_leaves)
colors_set = set(self.col_colors)
col_to_value = dict((col, i) for i, col in enumerate(colors_set))
matrix_test = np.array([col_to_value[col] for col in
self.col_colors])[self.x_norm_leaves]
shape = len(self.col_colors), 1
matrix_test = matrix_test.reshape(shape)
cmap_test = mpl.colors.ListedColormap(colors_set)
npt.assert_array_equal(matrix, matrix_test)
npt.assert_array_equal(cmap.colors, cmap_test.colors)
def test_nested_color_list_to_matrix_and_cmap(self):
colors = [self.col_colors, self.col_colors]
matrix, cmap = mat.ClusterGrid.color_list_to_matrix_and_cmap(
colors, self.x_norm_leaves)
all_colors = set(itertools.chain(*colors))
color_to_value = dict((col, i) for i, col in enumerate(all_colors))
matrix_test = np.array(
[color_to_value[c] for color in colors for c in color])
shape = len(colors), len(colors[0])
matrix_test = matrix_test.reshape(shape)
matrix_test = matrix_test[:, self.x_norm_leaves]
matrix_test = matrix_test.T
cmap_test = mpl.colors.ListedColormap(all_colors)
npt.assert_array_equal(matrix, matrix_test)
npt.assert_array_equal(cmap.colors, cmap_test.colors)
def test_color_list_to_matrix_and_cmap_axis1(self):
matrix, cmap = mat.ClusterGrid.color_list_to_matrix_and_cmap(
self.col_colors, self.x_norm_leaves, axis=1)
colors_set = set(self.col_colors)
col_to_value = dict((col, i) for i, col in enumerate(colors_set))
matrix_test = np.array([col_to_value[col] for col in
self.col_colors])[self.x_norm_leaves]
shape = 1, len(self.col_colors)
matrix_test = matrix_test.reshape(shape)
cmap_test = mpl.colors.ListedColormap(colors_set)
npt.assert_array_equal(matrix, matrix_test)
npt.assert_array_equal(cmap.colors, cmap_test.colors)
def test_savefig(self):
# Not sure if this is the right way to test....
cm = mat.ClusterGrid(self.df_norm, **self.default_kws)
cm.plot(**self.default_plot_kws)
cm.savefig(tempfile.NamedTemporaryFile(), format='png')
def test_plot_dendrograms(self):
cm = mat.clustermap(self.df_norm, **self.default_kws)
nt.assert_equal(len(cm.ax_row_dendrogram.collections[0].get_paths()),
len(cm.dendrogram_row.independent_coord))
nt.assert_equal(len(cm.ax_col_dendrogram.collections[0].get_paths()),
len(cm.dendrogram_col.independent_coord))
data2d = self.df_norm.iloc[cm.dendrogram_row.reordered_ind,
cm.dendrogram_col.reordered_ind]
pdt.assert_frame_equal(cm.data2d, data2d)
def test_cluster_false(self):
kws = self.default_kws.copy()
kws['row_cluster'] = False
kws['col_cluster'] = False
cm = mat.clustermap(self.df_norm, **kws)
nt.assert_equal(len(cm.ax_row_dendrogram.lines), 0)
nt.assert_equal(len(cm.ax_col_dendrogram.lines), 0)
nt.assert_equal(len(cm.ax_row_dendrogram.get_xticks()), 0)
nt.assert_equal(len(cm.ax_row_dendrogram.get_yticks()), 0)
nt.assert_equal(len(cm.ax_col_dendrogram.get_xticks()), 0)
nt.assert_equal(len(cm.ax_col_dendrogram.get_yticks()), 0)
pdt.assert_frame_equal(cm.data2d, self.df_norm)
def test_row_col_colors(self):
kws = self.default_kws.copy()
kws['row_colors'] = self.row_colors
kws['col_colors'] = self.col_colors
cm = mat.clustermap(self.df_norm, **kws)
nt.assert_equal(len(cm.ax_row_colors.collections), 1)
nt.assert_equal(len(cm.ax_col_colors.collections), 1)
def test_cluster_false_row_col_colors(self):
kws = self.default_kws.copy()
kws['row_cluster'] = False
kws['col_cluster'] = False
kws['row_colors'] = self.row_colors
kws['col_colors'] = self.col_colors
cm = mat.clustermap(self.df_norm, **kws)
nt.assert_equal(len(cm.ax_row_dendrogram.lines), 0)
nt.assert_equal(len(cm.ax_col_dendrogram.lines), 0)
nt.assert_equal(len(cm.ax_row_dendrogram.get_xticks()), 0)
nt.assert_equal(len(cm.ax_row_dendrogram.get_yticks()), 0)
nt.assert_equal(len(cm.ax_col_dendrogram.get_xticks()), 0)
nt.assert_equal(len(cm.ax_col_dendrogram.get_yticks()), 0)
nt.assert_equal(len(cm.ax_row_colors.collections), 1)
nt.assert_equal(len(cm.ax_col_colors.collections), 1)
pdt.assert_frame_equal(cm.data2d, self.df_norm)
def test_row_col_colors_df(self):
kws = self.default_kws.copy()
kws['row_colors'] = pd.DataFrame({'row_1': list(self.row_colors),
'row_2': list(self.row_colors)},
index=self.df_norm.index,
columns=['row_1', 'row_2'])
kws['col_colors'] = pd.DataFrame({'col_1': list(self.col_colors),
'col_2': list(self.col_colors)},
index=self.df_norm.columns,
columns=['col_1', 'col_2'])
cm = mat.clustermap(self.df_norm, **kws)
row_labels = [l.get_text() for l in
cm.ax_row_colors.get_xticklabels()]
nt.assert_equal(cm.row_color_labels, ['row_1', 'row_2'])
nt.assert_equal(row_labels, cm.row_color_labels)
col_labels = [l.get_text() for l in
cm.ax_col_colors.get_yticklabels()]
nt.assert_equal(cm.col_color_labels, ['col_1', 'col_2'])
nt.assert_equal(col_labels, cm.col_color_labels)
def test_row_col_colors_df_shuffled(self):
# Tests if colors are properly matched, even if given in wrong order
m, n = self.df_norm.shape
shuffled_inds = [self.df_norm.index[i] for i in
list(range(0, m, 2)) + list(range(1, m, 2))]
shuffled_cols = [self.df_norm.columns[i] for i in
list(range(0, n, 2)) + list(range(1, n, 2))]
kws = self.default_kws.copy()
row_colors = pd.DataFrame({'row_annot': list(self.row_colors)},
index=self.df_norm.index)
kws['row_colors'] = row_colors.loc[shuffled_inds]
col_colors = pd.DataFrame({'col_annot': list(self.col_colors)},
index=self.df_norm.columns)
kws['col_colors'] = col_colors.loc[shuffled_cols]
cm = mat.clustermap(self.df_norm, **kws)
nt.assert_equal(list(cm.col_colors)[0], list(self.col_colors))
nt.assert_equal(list(cm.row_colors)[0], list(self.row_colors))
def test_row_col_colors_df_missing(self):
kws = self.default_kws.copy()
row_colors = pd.DataFrame({'row_annot': list(self.row_colors)},
index=self.df_norm.index)
kws['row_colors'] = row_colors.drop(self.df_norm.index[0])
col_colors = pd.DataFrame({'col_annot': list(self.col_colors)},
index=self.df_norm.columns)
kws['col_colors'] = col_colors.drop(self.df_norm.columns[0])
cm = mat.clustermap(self.df_norm, **kws)
nt.assert_equal(list(cm.col_colors)[0],
[(1.0, 1.0, 1.0)] + list(self.col_colors[1:]))
nt.assert_equal(list(cm.row_colors)[0],
[(1.0, 1.0, 1.0)] + list(self.row_colors[1:]))
def test_row_col_colors_df_one_axis(self):
# Test case with only row annotation.
kws1 = self.default_kws.copy()
kws1['row_colors'] = pd.DataFrame({'row_1': list(self.row_colors),
'row_2': list(self.row_colors)},
index=self.df_norm.index,
columns=['row_1', 'row_2'])
cm1 = mat.clustermap(self.df_norm, **kws1)
row_labels = [l.get_text() for l in
cm1.ax_row_colors.get_xticklabels()]
nt.assert_equal(cm1.row_color_labels, ['row_1', 'row_2'])
nt.assert_equal(row_labels, cm1.row_color_labels)
# Test case with onl col annotation.
kws2 = self.default_kws.copy()
kws2['col_colors'] = pd.DataFrame({'col_1': list(self.col_colors),
'col_2': list(self.col_colors)},
index=self.df_norm.columns,
columns=['col_1', 'col_2'])
cm2 = mat.clustermap(self.df_norm, **kws2)
col_labels = [l.get_text() for l in
cm2.ax_col_colors.get_yticklabels()]
nt.assert_equal(cm2.col_color_labels, ['col_1', 'col_2'])
nt.assert_equal(col_labels, cm2.col_color_labels)
def test_row_col_colors_series(self):
kws = self.default_kws.copy()
kws['row_colors'] = pd.Series(list(self.row_colors), name='row_annot',
index=self.df_norm.index)
kws['col_colors'] = pd.Series(list(self.col_colors), name='col_annot',
index=self.df_norm.columns)
cm = mat.clustermap(self.df_norm, **kws)
row_labels = [l.get_text() for l in
cm.ax_row_colors.get_xticklabels()]
nt.assert_equal(cm.row_color_labels, ['row_annot'])
nt.assert_equal(row_labels, cm.row_color_labels)
col_labels = [l.get_text() for l in
cm.ax_col_colors.get_yticklabels()]
nt.assert_equal(cm.col_color_labels, ['col_annot'])
nt.assert_equal(col_labels, cm.col_color_labels)
def test_row_col_colors_series_shuffled(self):
# Tests if colors are properly matched, even if given in wrong order
m, n = self.df_norm.shape
shuffled_inds = [self.df_norm.index[i] for i in
list(range(0, m, 2)) + list(range(1, m, 2))]
shuffled_cols = [self.df_norm.columns[i] for i in
list(range(0, n, 2)) + list(range(1, n, 2))]
kws = self.default_kws.copy()
row_colors = pd.Series(list(self.row_colors), name='row_annot',
index=self.df_norm.index)
kws['row_colors'] = row_colors.loc[shuffled_inds]
col_colors = pd.Series(list(self.col_colors), name='col_annot',
index=self.df_norm.columns)
kws['col_colors'] = col_colors.loc[shuffled_cols]
cm = mat.clustermap(self.df_norm, **kws)
nt.assert_equal(list(cm.col_colors), list(self.col_colors))
nt.assert_equal(list(cm.row_colors), list(self.row_colors))
def test_row_col_colors_series_missing(self):
kws = self.default_kws.copy()
row_colors = pd.Series(list(self.row_colors), name='row_annot',
index=self.df_norm.index)
kws['row_colors'] = row_colors.drop(self.df_norm.index[0])
col_colors = pd.Series(list(self.col_colors), name='col_annot',
index=self.df_norm.columns)
kws['col_colors'] = col_colors.drop(self.df_norm.columns[0])
cm = mat.clustermap(self.df_norm, **kws)
nt.assert_equal(list(cm.col_colors),
[(1.0, 1.0, 1.0)] + list(self.col_colors[1:]))
nt.assert_equal(list(cm.row_colors),
[(1.0, 1.0, 1.0)] + list(self.row_colors[1:]))
def test_row_col_colors_ignore_heatmap_kwargs(self):
g = mat.clustermap(self.rs.uniform(0, 200, self.df_norm.shape),
row_colors=self.row_colors,
col_colors=self.col_colors,
cmap="Spectral",
norm=mpl.colors.LogNorm(),
vmax=100)
assert np.array_equal(
np.array(self.row_colors)[g.dendrogram_row.reordered_ind],
g.ax_row_colors.collections[0].get_facecolors()[:, :3]
)
assert np.array_equal(
np.array(self.col_colors)[g.dendrogram_col.reordered_ind],
g.ax_col_colors.collections[0].get_facecolors()[:, :3]
)
def test_row_col_colors_raise_on_mixed_index_types(self):
row_colors = pd.Series(
list(self.row_colors), name="row_annot", index=self.df_norm.index
)
col_colors = pd.Series(
list(self.col_colors), name="col_annot", index=self.df_norm.columns
)
with pytest.raises(TypeError):
mat.clustermap(self.x_norm, row_colors=row_colors)
with pytest.raises(TypeError):
mat.clustermap(self.x_norm, col_colors=col_colors)
def test_mask_reorganization(self):
kws = self.default_kws.copy()
kws["mask"] = self.df_norm > 0
g = mat.clustermap(self.df_norm, **kws)
npt.assert_array_equal(g.data2d.index, g.mask.index)
npt.assert_array_equal(g.data2d.columns, g.mask.columns)
npt.assert_array_equal(g.mask.index,
self.df_norm.index[
g.dendrogram_row.reordered_ind])
npt.assert_array_equal(g.mask.columns,
self.df_norm.columns[
g.dendrogram_col.reordered_ind])
def test_ticklabel_reorganization(self):
kws = self.default_kws.copy()
xtl = np.arange(self.df_norm.shape[1])
kws["xticklabels"] = list(xtl)
ytl = self.letters.loc[:self.df_norm.shape[0]]
kws["yticklabels"] = ytl
g = mat.clustermap(self.df_norm, **kws)
xtl_actual = [t.get_text() for t in g.ax_heatmap.get_xticklabels()]
ytl_actual = [t.get_text() for t in g.ax_heatmap.get_yticklabels()]
xtl_want = xtl[g.dendrogram_col.reordered_ind].astype("<U1")
ytl_want = ytl[g.dendrogram_row.reordered_ind].astype("<U1")
npt.assert_array_equal(xtl_actual, xtl_want)
npt.assert_array_equal(ytl_actual, ytl_want)
def test_noticklabels(self):
kws = self.default_kws.copy()
kws["xticklabels"] = False
kws["yticklabels"] = False
g = mat.clustermap(self.df_norm, **kws)
xtl_actual = [t.get_text() for t in g.ax_heatmap.get_xticklabels()]
ytl_actual = [t.get_text() for t in g.ax_heatmap.get_yticklabels()]
nt.assert_equal(xtl_actual, [])
nt.assert_equal(ytl_actual, [])
def test_size_ratios(self):
# The way that wspace/hspace work in GridSpec, the mapping from input
# ratio to actual width/height of each axes is complicated, so this
# test is just going to assert comparative relationships
kws1 = self.default_kws.copy()
kws1.update(dendrogram_ratio=.2, colors_ratio=.03,
col_colors=self.col_colors, row_colors=self.row_colors)
kws2 = kws1.copy()
kws2.update(dendrogram_ratio=.3, colors_ratio=.05)
g1 = mat.clustermap(self.df_norm, **kws1)
g2 = mat.clustermap(self.df_norm, **kws2)
assert (g2.ax_col_dendrogram.get_position().height
> g1.ax_col_dendrogram.get_position().height)
assert (g2.ax_col_colors.get_position().height
> g1.ax_col_colors.get_position().height)
assert (g2.ax_heatmap.get_position().height
< g1.ax_heatmap.get_position().height)
assert (g2.ax_row_dendrogram.get_position().width
> g1.ax_row_dendrogram.get_position().width)
assert (g2.ax_row_colors.get_position().width
> g1.ax_row_colors.get_position().width)
assert (g2.ax_heatmap.get_position().width
< g1.ax_heatmap.get_position().width)
kws1 = self.default_kws.copy()
kws1.update(col_colors=self.col_colors)
kws2 = kws1.copy()
kws2.update(col_colors=[self.col_colors, self.col_colors])
g1 = mat.clustermap(self.df_norm, **kws1)
g2 = mat.clustermap(self.df_norm, **kws2)
assert (g2.ax_col_colors.get_position().height
> g1.ax_col_colors.get_position().height)
kws1 = self.default_kws.copy()
kws1.update(dendrogram_ratio=(.2, .2))
kws2 = kws1.copy()
kws2.update(dendrogram_ratio=(.2, .3))
g1 = mat.clustermap(self.df_norm, **kws1)
g2 = mat.clustermap(self.df_norm, **kws2)
assert (g2.ax_row_dendrogram.get_position().width
== g1.ax_row_dendrogram.get_position().width)
assert (g2.ax_col_dendrogram.get_position().height
> g1.ax_col_dendrogram.get_position().height)
def test_cbar_pos(self):
kws = self.default_kws.copy()
kws["cbar_pos"] = (.2, .1, .4, .3)
g = mat.clustermap(self.df_norm, **kws)
pos = g.ax_cbar.get_position()
assert pytest.approx(tuple(pos.p0)) == kws["cbar_pos"][:2]
assert pytest.approx(pos.width) == kws["cbar_pos"][2]
assert pytest.approx(pos.height) == kws["cbar_pos"][3]
kws["cbar_pos"] = None
g = mat.clustermap(self.df_norm, **kws)
assert g.ax_cbar is None
def test_square_warning(self):
kws = self.default_kws.copy()
g1 = mat.clustermap(self.df_norm, **kws)
with pytest.warns(UserWarning):
kws["square"] = True
g2 = mat.clustermap(self.df_norm, **kws)
g1_shape = g1.ax_heatmap.get_position().get_points()
g2_shape = g2.ax_heatmap.get_position().get_points()
assert np.array_equal(g1_shape, g2_shape)
def test_clustermap_annotation(self):
g = mat.clustermap(self.df_norm, annot=True, fmt=".1f")
for val, text in zip(np.asarray(g.data2d).flat, g.ax_heatmap.texts):
assert text.get_text() == "{:.1f}".format(val)
g = mat.clustermap(self.df_norm, annot=self.df_norm, fmt=".1f")
for val, text in zip(np.asarray(g.data2d).flat, g.ax_heatmap.texts):
assert text.get_text() == "{:.1f}".format(val)
def test_tree_kws(self):
rgb = (1, .5, .2)
g = mat.clustermap(self.df_norm, tree_kws=dict(color=rgb))
for ax in [g.ax_col_dendrogram, g.ax_row_dendrogram]:
tree, = ax.collections
assert tuple(tree.get_color().squeeze())[:3] == rgb
| 37.097934
| 79
| 0.599088
|
ee8d2a05b16c9767bcd4ad068524fff899eb34a4
| 1,110
|
py
|
Python
|
training/odsToTsv.py
|
raoofnaushad/resume_parser_nlp
|
50519b9b258b8b6a6739e3fd30a70709c1ffd29d
|
[
"Apache-2.0"
] | 2
|
2020-11-02T16:33:35.000Z
|
2020-11-02T16:42:13.000Z
|
training/odsToTsv.py
|
raoofnaushad/resume_parser_nlp
|
50519b9b258b8b6a6739e3fd30a70709c1ffd29d
|
[
"Apache-2.0"
] | null | null | null |
training/odsToTsv.py
|
raoofnaushad/resume_parser_nlp
|
50519b9b258b8b6a6739e3fd30a70709c1ffd29d
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import math
import string
import training.utils as utils
import training.config as config
def convertODStoTSV(fromODS, toTSV):
data = utils.read_ods(fromODS)
df = pd.DataFrame(data)
print(len(df))
words_list = []
tags_list = []
for ind in df.index:
if df['Word'][ind] == '.' and df['Tag'][ind] == '.':
words_list.append('.')
tags_list.append('.')
continue
if df['Word'][ind] == 'XXX':
words_list.append('\n')
else:
words_list.append(df['Word'][ind])
if df['Tag'][ind][:2] in ('B-', 'I-'):
tags_list.append(df['Tag'][ind])
else:
tags_list.append('O')
dict = {'Word': words_list, 'Tag': tags_list}
df = pd.DataFrame(dict)
print(len(df))
df.to_csv(toTSV,sep='\t', index=False)
# convertODStoTSV('/home/accubits/Documents/Projects/AI/resume/training/training_dataset/Education.ods' \
# , '/home/accubits/Documents/Projects/AI/resume/training/training_dataset/Education.tsv')
| 24.130435
| 105
| 0.568468
|
d7d046dd98a9ccaf7c6962ce4c9469da97f23fcc
| 1,496
|
py
|
Python
|
paradocx/image.py
|
jaraco/paradocx
|
123405b97fc82f60f4bd47cae465802f8063024b
|
[
"MIT"
] | 3
|
2021-02-13T20:07:18.000Z
|
2022-03-17T05:26:18.000Z
|
paradocx/image.py
|
jaraco/paradocx
|
123405b97fc82f60f4bd47cae465802f8063024b
|
[
"MIT"
] | 4
|
2018-03-03T17:45:17.000Z
|
2021-07-06T02:51:30.000Z
|
paradocx/image.py
|
jaraco/paradocx
|
123405b97fc82f60f4bd47cae465802f8063024b
|
[
"MIT"
] | 2
|
2021-02-13T17:21:24.000Z
|
2022-03-26T07:43:00.000Z
|
from openpack.basepack import DefaultNamed, Part
import mimetypes
import warnings
class ImagePart(DefaultNamed, Part):
rel_type = (
'http://schemas.openxmlformats.org/officeDocument/2006/relationships/image'
)
default_name = '/word/media/image.jpeg'
"""
ECMA-376 3nd Edition Part 1 Page 170 states:
A producer that wants interoperability should use
one of the following standard formats:
- image/png ISO/IEC 15948:2003, http://www.libpng.org/pub/png/spec/
- image/jpeg, http://www.w3.org/Graphics/JPEG
"""
interoperability_types = ['image/png', 'image/jpeg']
def _set_name(self, name):
super(ImagePart, self)._set_name(name)
self._guess_mime_type(name)
name = property(Part._get_name, _set_name)
def _guess_mime_type(self, name):
"""
When setting the name, guess the mime type from the extension.
Set the content_type for this instance only if a content_type is not
already defined (this allows an instance to have a content-type pre-
defined or for a subclass to define the content type, and it will not
be overridden by the guessed type).
"""
ct, _ = mimetypes.guess_type(name)
if ct and not self.content_type:
self.content_type = ct
if ct not in self.interoperability_types:
warnings.warn(
"Image type %s is not guaranteed to be interoperable" % ct
)
| 35.619048
| 83
| 0.652406
|
2ff3651403ab10ae4e81527556f605af63e9d493
| 26,038
|
py
|
Python
|
ivy/stateful/module.py
|
saurbhc/ivy
|
20b327b4fab543b26ad5a18acf4deddd6e3c804b
|
[
"Apache-2.0"
] | 2
|
2022-02-13T20:55:02.000Z
|
2022-02-26T18:44:19.000Z
|
ivy/stateful/module.py
|
saurbhc/ivy
|
20b327b4fab543b26ad5a18acf4deddd6e3c804b
|
[
"Apache-2.0"
] | 1
|
2022-03-08T13:29:20.000Z
|
2022-03-08T13:29:20.000Z
|
ivy/stateful/module.py
|
saurbhc/ivy
|
20b327b4fab543b26ad5a18acf4deddd6e3c804b
|
[
"Apache-2.0"
] | null | null | null |
"""
Base class for deriving trainable modules
"""
# global
import os
import abc
import logging
import ivy.functional.backends.numpy
import termcolor
import numpy as np
# local
import ivy
from ivy.container import Container
# Base #
# -----#
class Module(abc.ABC):
def __init__(self, dev=None, v=None, build_mode='on_init', compile_on_next_step=False, store_vars=True,
stateful=None, arg_stateful_idxs=None, kwarg_stateful_idxs=None, fallback_to_non_compiled=False,
with_partial_v=False, devs=None):
"""
Initialze Ivy layer, which is a stateful object consisting of trainable variables.
:param dev: device on which to create the module's variables 'cuda:0', 'cuda:1', 'cpu' etc.
:type dev: ivy.Device, optional
:param v: Ivy container of trainable variables. Created internally by default.
:type v: ivy container, optional
:param build_mode: How the Module is built, either on initialization (now), explicitly by the user by calling
build(), or the first time the __call__ method is run. Default is on initialization.
:type build_mode: str, optional
:param compile_on_next_step: Whether to compile the network on the next forward pass. Default is False.
:type compile_on_next_step: bool, optional
:param store_vars: Whether or not to store the variables created. Default is True.
:type store_vars: bool, optional
:param stateful: The constant id stateful items to track as part of the forward pass.
Used when graph compiling, default is None.
:type stateful: seq of any, optional
:param arg_stateful_idxs: The nested argument indices of stateful items to track as part of the forward pass.
Used when graph compiling, default is None.
:type arg_stateful_idxs: seq of any, optional
:param kwarg_stateful_idxs: The nested keyword argument indices of stateful items to track as part of the
forward pass. Used when graph compiling, default is None.
:type kwarg_stateful_idxs: seq of any, optional
:param fallback_to_non_compiled: Whether to fall back to non-compiled forward call in the case that an error is
raised during the compiled forward pass. Default is True.
:type fallback_to_non_compiled: bool, optional
:param with_partial_v: Whether to allow partial specification of variables. Default is False.
:type with_partial_v: bool, optional
:param devs: devices on which to distribute the module's variables 'cuda:0', 'cuda:1', 'cpu' etc.
:type devs: sequence of str, optional
:type build_mode: str, optional
"""
valid_build_modes = ['on_init', 'explicit', 'on_call']
if build_mode not in valid_build_modes:
raise Exception('build_mode must be one of {} of type str, but found {} of type{}'.format(
valid_build_modes, build_mode, type(build_mode)))
self._dev = ivy.default(dev, ivy.default(lambda: devs[0], ivy.default_device(), True))
self._devs = ivy.default(devs, [self._dev])
self._build_mode = build_mode
self._stateful = stateful
self._arg_stateful_idxs = arg_stateful_idxs
self._kwarg_stateful_idxs = kwarg_stateful_idxs
self._fallback_to_non_compiled = fallback_to_non_compiled
self._with_partial_v = with_partial_v
self._store_vars = store_vars
self._built = False
self._compiled = False
self._compiled_fn = None
self._compile_on_next_step = compile_on_next_step
self._v_in = v if (isinstance(v, Container) or v is None) else Container(v)
self.v = v
self.top_v = None
self.top_mod = None
self._track_submod_rets = False
self._submod_depth = None
self._submods_to_track = None
self._track_submod_call_order = False
self.submod_rets = ivy.Container(alphabetical_keys=False, ivyh=ivy.functional.backends.numpy)
self.expected_submod_rets = None
self.submod_dict = dict()
self.submod_call_order = ivy.Container(alphabetical_keys=False, ivyh=ivy.functional.backends.numpy)
self._sub_mods = set()
if build_mode != 'on_init':
return
self.build()
# Private #
# --------#
def _fn_with_var_arg(self, fn, v_fn):
def new_fn(*a, with_grads=None, **kw):
with_grads = ivy.with_grads(with_grads)
if 'v' in kw.keys():
del kw['v']
v = v_fn(self.v)
if not with_grads:
v = v.stop_gradients()
return fn(*a, **kw, v=v)
new_fn.wrapped = True
return new_fn
def _top_v_fn(self, depth=None, flatten_key_chains=False):
if ivy.exists(self.top_v):
if ivy.exists(depth):
ret = self.top_v(depth - 1) if depth > 1 else self.v
else:
ret = self.top_v()
else:
ret = self.v
if flatten_key_chains:
return ret.flatten_key_chains()
return ret
def _top_mod_fn(self, depth=None):
if ivy.exists(self.top_mod):
if ivy.exists(depth):
return self.top_mod(depth - 1) if depth > 1 else self
return self.top_mod()
return self
# noinspection PyProtectedMember
def track_submod_rets(self):
if not ivy.exists(self.top_mod):
return False
top_mod = self.top_mod()
submods = top_mod._submods_to_track
if ivy.exists(submods):
if self not in submods:
return False
depth = top_mod._submod_depth
if ivy.exists(depth):
return self.top_mod(depth - 1)._track_submod_rets if depth > 0 else self._track_submod_rets
return top_mod._track_submod_rets
def check_submod_rets(self):
if not ivy.exists(self.top_mod):
return False
if ivy.exists(self.top_mod().expected_submod_rets):
return True
return False
# noinspection PyProtectedMember
def track_submod_call_order(self):
if not ivy.exists(self.top_mod):
return False
top_mod = self.top_mod()
submods = top_mod._submods_to_track
if ivy.exists(submods):
if self not in submods:
return False
depth = top_mod._submod_depth
if ivy.exists(depth):
return self.top_mod(depth - 1)._track_submod_call_order if depth > 0 else self._track_submod_call_order
return top_mod._track_submod_call_order
def mod_depth(self):
depth = 0
mod_above = self
while True:
if ivy.exists(mod_above.top_mod):
mod_above = mod_above.top_mod(1)
else:
break
depth += 1
return depth
def mod_height(self):
return self.sub_mods().max_depth - 1
def _find_variables(self, obj=None):
vs = Container()
# ToDo: add support for finding local variables, if/when JAX supports uniquely flagging variables
if isinstance(obj, Module) and obj is not self:
obj.top_v = lambda depth=None, flatten_key_chains=False: self._top_v_fn(depth, flatten_key_chains)
obj.top_mod = lambda depth=None: self._top_mod_fn(depth)
self._sub_mods.add(obj)
return obj.v
elif isinstance(obj, (list, tuple)):
for i, v in enumerate(obj):
ret = self._find_variables(v)
if ret:
vs['v' + str(i)] = ret
return vs
elif isinstance(obj, dict):
for k, v in obj.items():
ret = self._find_variables(v)
if ret:
vs[k[1:] if k[0] == '_' else k] = ret
return vs
elif not hasattr(obj, '__dict__'):
return vs
for k, v in obj.__dict__.items():
if v is not None and k[0:2] != '__':
ret = self._find_variables(v)
if ret:
vs[k[1:] if k[0] == '_' else k] = ret
return vs
@staticmethod
def _extract_v(v, keychain_mappings, orig_key_chain):
if v.has_key_chain(orig_key_chain):
ret_cont = v.at_key_chain(orig_key_chain)
else:
ret_cont = ivy.Container()
for old_kc, new_kc in keychain_mappings.items():
if orig_key_chain in old_kc:
ret_cont = ret_cont.set_at_key_chain('/'.join(new_kc.split('/')[1:]), v.at_key_chain(new_kc))
return ret_cont
def _wrap_call_methods(self, keychain_mappings, key='', obj=None):
if isinstance(obj, Module) and obj is not self:
orig_key_chain = key[1:] if key[0] == '_' else key
obj.__call__ = self._fn_with_var_arg(obj.__call__,
lambda v_: self._extract_v(v_, keychain_mappings, orig_key_chain))
return
elif isinstance(obj, (list, tuple)):
for i, val in enumerate(obj):
self._wrap_call_methods(keychain_mappings, key + '/v' + str(i), val)
return
elif isinstance(obj, dict):
for k, val in obj.items():
k = (key + '/' + k) if key != '' else k
self._wrap_call_methods(keychain_mappings, k, val)
return
if not hasattr(obj, '__dict__'):
return
for k, val in obj.__dict__.items():
if k[0:2] == '__':
continue
k = (key + '/' + k) if key != '' else k
if val is not None:
self._wrap_call_methods(keychain_mappings, k, val)
return
@staticmethod
def _remove_duplicate_variables(vs, created):
created_ids = created.map(lambda x, kc: id(x))
vs_ids = vs.map(lambda x, kc: id(x))
ids = dict()
duplicate_keychains = list()
keychain_mappings = dict()
def unique_callback(x, kc):
ids[x] = kc
def found_dup_callback(x, kc):
if ids[x] == kc:
return
duplicate_keychains.append(kc)
keychain_mappings[kc] = ids[x]
created_ids.map(lambda x, kc: unique_callback(x, kc))
vs_ids.map(lambda x, kc: unique_callback(x, kc) if x not in ids else found_dup_callback(x, kc))
for dup_kc in duplicate_keychains:
vs = vs.prune_key_chain(dup_kc)
return vs, keychain_mappings
# Overridable #
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def _create_variables(self, dev):
"""
create internal trainable variables, and return as arbitrary nested dict. Overridable.
:param dev: The device string, specifying the device on which to create the variables.
:type dev: ivy.Deviceing
"""
return {}
def _build(self, *args, **kwargs) -> bool:
"""
Build the internal layers and variables for this module. Overridable.
Return False or empty Container if the build only partially completed (i.e. some child Modules have
"on_call" build mode). Alternatviely, return True or a container of the built variables if the module is built.
"""
return True
# Abstract #
@abc.abstractmethod
def _forward(self, *args, **kwargs):
"""
Forward pass of the layer, called after handling the optional input variables.
"""
raise NotImplementedError
def _forward_with_tracking(self, *args, **kwargs):
"""
Forward pass while optionally tracking submodule returns and call order
"""
if self.track_submod_call_order():
self._add_submod_enter()
ret = self._forward(*args, **kwargs)
track_submod_rets = self.track_submod_rets()
check_submod_rets = self.check_submod_rets()
if track_submod_rets or check_submod_rets:
self._add_submod_ret(ret)
if check_submod_rets:
self._check_submod_ret()
return ret
def _call(self, *args, v=None, with_grads=None, **kwargs):
"""
the forward pass of the layer, treating layer instance as callable function.
"""
with_grads = ivy.with_grads(with_grads)
if not self._built:
self.build(*args, **kwargs, from_call=True)
if v is not None:
v_orig = self.v
if not with_grads:
v = v.stop_gradients()
self.v = Container(v, **v.config) if isinstance(v, Container) else Container(v)
ret = self._forward_with_tracking(*args, **kwargs)
self.v = v_orig
return ret
elif hasattr(self.__call__, 'wrapped'):
return self.__call__(*args, with_grads=with_grads, **kwargs)
elif not with_grads:
v_orig = self.v
self.v = v_orig.stop_gradients()
ret = self._forward_with_tracking(*args, **kwargs)
self.v = v_orig
return ret
return self._forward_with_tracking(*args, **kwargs)
# Public #
# -------#
def sub_mods(self, show_v=True, depth=None, flatten_key_chains=False):
if self._sub_mods:
if ivy.exists(depth):
if depth == 0:
if show_v:
return self.v
return ''
next_depth = depth - 1
else:
next_depth = None
ret = ivy.Container(
{ivy.Container.flatten_key_chain(sm.__repr__(), '_'):
sm.sub_mods(show_v, next_depth) for sm in self._sub_mods})
if flatten_key_chains:
return ret.flatten_key_chains()
return ret
if show_v:
return self.v
return ''
def show_v_in_top_v(self, depth=None):
if ivy.exists(self.top_v) and ivy.exists(self.v):
self.top_v(depth).show_sub_container(self.v)
else:
print('both self.top_v and self.v must be initialized in order to show v in top_v,'
'but found\n\ntop_v: {}\n\nv: {}.'.format(self.top_v, self.v))
def v_with_top_v_key_chains(self, depth=None, flatten_key_chains=False):
if ivy.exists(self.top_v) and ivy.exists(self.v):
kc = self.top_v(depth).find_sub_container(self.v)
if kc:
ret = self.v.restructure_key_chains({'': kc}, keep_orig=False)
else:
ret = self.v
if flatten_key_chains:
return ret.flatten_key_chains()
return ret
else:
print('both self.top_v and self.v must be initialized in order to show v in top_v,'
'but found\n\ntop_v: {}\n\nv: {}.'.format(self.top_v, self.v))
def mod_with_top_mod_key_chain(self, depth=None, flatten_key_chain=False):
if not ivy.exists(self.top_mod) or depth == 0:
return self.__repr__()
max_depth = depth
depth = 1
top_mod = self
mods = [ivy.Container.flatten_key_chain(top_mod.__repr__(), '_')]
while True:
if not ivy.exists(top_mod.top_mod):
break
top_mod = top_mod.top_mod(1)
mods.append(ivy.Container.flatten_key_chain(top_mod.__repr__(), '_'))
if depth == max_depth:
break
depth += 1
if flatten_key_chain:
return '__'.join(reversed(mods))
return [mod for mod in reversed(mods)]
def show_mod_in_top_mod(self, upper_depth=None, lower_depth=None, flatten_key_chains=False):
if ivy.exists(self.top_mod):
upper_depth = ivy.default(upper_depth, self.mod_depth())
lower_depth = ivy.default(lower_depth, self.mod_height())
mid_depth = upper_depth + lower_depth
upper_sub_mods = self.top_mod(upper_depth).sub_mods(depth=mid_depth)
lower_sub_mods = self.sub_mods(depth=lower_depth)
if flatten_key_chains:
upper_sub_mods = upper_sub_mods.flatten_key_chains()
lower_sub_mods = lower_sub_mods.flatten_key_chains()
upper_sub_mods.show_sub_container(lower_sub_mods)
else:
print('self.top_mod must be initialized in order to show mod in top_mod,'
'but found\n\ntop_mod: {}'.format(self.top_mod))
def _set_submod_flags(self, track_submod_rets, submod_depth, submods_to_track, track_submod_call_order,
expected_submod_rets):
self._track_submod_rets = track_submod_rets
self._submod_depth = submod_depth
self._submods_to_track = submods_to_track
self._track_submod_call_order = track_submod_call_order
self.expected_submod_rets = ivy.Container(expected_submod_rets).to_numpy(map_sequences=True) \
if ivy.exists(expected_submod_rets) else expected_submod_rets
def _unset_submod_flags(self):
self._track_submod_rets = False
self._submod_depth = None
self._submods_to_track = None
self._track_submod_call_order = False
self.expected_submod_rets = None
def get_mod_key(self, top_mod=None):
if top_mod is None:
top_mod = self.top_mod()
submod_dict = top_mod.submod_dict
full_key = self.__repr__().split('.')[-1]
name_key = full_key.split(' ')[0]
if name_key not in submod_dict:
submod_dict[name_key] = dict()
id_str = full_key.split(' ')[-1][:-1]
if id_str not in submod_dict[name_key]:
submod_dict[name_key][id_str] = str(len(submod_dict[name_key]))
idx_key = submod_dict[name_key][id_str]
return ' '*self.mod_depth() + '_'.join([name_key, idx_key])
def _add_submod_ret(self, ret):
top_mod = self.top_mod()
sr = top_mod.submod_rets
ret = ivy.to_numpy(ret)
key = self.get_mod_key(top_mod)
if key in sr:
sr[key].append(ret)
else:
sr[key] = [ret]
def _check_submod_ret(self):
top_mod = self.top_mod()
esr = top_mod.expected_submod_rets
key = self.get_mod_key(top_mod)
esr_key = key
if key not in esr:
esr_key = key.replace(' ', '')
if esr_key not in esr:
return
sr = self.top_mod().submod_rets
rets = sr[key]
esr_ret = esr[esr_key]
if isinstance(esr_ret, dict):
expected_rets = esr_ret['val']
atols = esr_ret['atol'] if 'atol' in esr_ret else None
if not isinstance(atols, list):
atols = [atols] * len(expected_rets)
rtols = esr_ret['rtol'] if 'rtol' in esr_ret else None
if not isinstance(rtols, list):
rtols = [rtols] * len(expected_rets)
else:
expected_rets = esr_ret
atols = [None] * len(expected_rets)
rtols = [None] * len(expected_rets)
for ret, expected_ret, atol, rtol in zip(rets, expected_rets, atols, rtols):
if expected_ret is None:
continue
kwargs = {}
if atol:
kwargs['atol'] = atol
if rtol:
kwargs['rtol'] = rtol
assert np.allclose(ret, expected_ret, **kwargs),\
'ret\n\n{}\n\nand expected_ret\n\n{}\n\nwere not close enough'.format(ret, expected_ret)
# noinspection PyProtectedMember
def _is_submod_leaf(self):
submod_depth = self.top_mod()._submod_depth
submods_to_track = self.top_mod()._submods_to_track
return (ivy.exists(submod_depth) and self.mod_depth() == submod_depth) or \
self.mod_height() == 0 or \
(ivy.exists(submods_to_track) and self in submods_to_track)
def _add_submod_enter(self):
sco = self.top_mod().submod_call_order
key_chain = self.mod_with_top_mod_key_chain()
for key in key_chain[:-1]:
kcs = sco.key_chains_containing(key, include_empty=True)
if kcs:
max_key = sorted(
kcs, key=lambda kc:
int(kc.split('/')[-2 if isinstance(sco[kc], np.ndarray) else -1].split('_')[-1]))[-1].split('/')[0]
else:
max_key = key + '_0'
sco[max_key] = ivy.Container(alphabetical_keys=False, ivyh=ivy.functional.backends.numpy)
sco = sco[max_key]
final_key = key_chain[-1]
kcs = sco.key_chains_containing(final_key, include_empty=True)
if kcs:
sorted_kcs =\
sorted(kcs, key=lambda kc:
int(kc.split('/')[-2 if isinstance(sco[kc], np.ndarray) else -1].split('_')[-1]))
chosen_kc = sorted_kcs[-1]
max_key_idx = int(chosen_kc.split('/')[-2 if isinstance(sco[chosen_kc], np.ndarray) else -1].split('_')[-1])
new_key = final_key + '_{}'.format(max_key_idx + 1)
else:
new_key = final_key + '_0'
if self._is_submod_leaf():
sco[new_key] = self.v_with_top_v_key_chains(flatten_key_chains=True).to_numpy()
else:
sco[new_key] = ivy.Container(alphabetical_keys=False, ivyh=ivy.functional.backends.numpy)
def __call__(self, *args, v=None, with_grads=None, stateful=None, arg_stateful_idxs=None, kwarg_stateful_idxs=None,
track_submod_rets=False, submod_depth=None, submods_to_track=None, track_submod_call_order=False,
expected_submod_rets=None, **kwargs):
with_grads = ivy.with_grads(with_grads)
self.submod_rets = ivy.Container(alphabetical_keys=False, ivyh=ivy.functional.backends.numpy)
self.submod_call_order = ivy.Container(alphabetical_keys=False, ivyh=ivy.functional.backends.numpy)
self._set_submod_flags(
track_submod_rets, submod_depth, submods_to_track, track_submod_call_order, expected_submod_rets)
ret = self._call(*args, v=v, with_grads=with_grads, **kwargs)
self._unset_submod_flags()
return ret
def save_weights(self, weights_path):
"""
Save the weights on the Module.
:param weights_path: The hdf5 file for saving the weights.
:type weights_path: string
"""
os.makedirs('/'.join(weights_path.split('/')[:-1]), exist_ok=True)
self.v.to_disk_as_hdf5(weights_path)
def build(self, *args, from_call=False, dev=None, **kwargs):
"""
Build the internal layers and variables for this module.
"""
self._dev = ivy.default(dev, self._dev)
# return False if not from_call but build_mode is on_call
if not from_call and self._build_mode == 'on_call':
return self.v
# build local Module, and any child modules flagged with "explicit" build mode
built = ivy.default(self._build(*args, **kwargs), True)
# build variables based on locally built layers, if v not passed in constructor
v_from_constructor = self._v_in
created = Container(self._create_variables(self._dev))
created_n_found = Container(dict(**self._find_variables(self), **created))
if ivy.exists(v_from_constructor):
if self._with_partial_v:
if v_from_constructor:
created_n_found.assert_contains_sub_structure(v_from_constructor, partial=True)
self.v = created_n_found.set_at_key_chains(v_from_constructor)
else:
created_n_found, _ = self._remove_duplicate_variables(created_n_found, created)
ivy.Container.assert_identical_structure([created_n_found, v_from_constructor])
self.v = v_from_constructor
else:
self.v = created_n_found
# remove duplicates
self.v, keychain_mappings = self._remove_duplicate_variables(self.v, created)
# build any child 'on_call' layers
if not built and from_call:
# update child modules to share the same device
for k, v in self.__dict__.items():
if isinstance(v, ivy.Module):
v._dev = self._dev
# build during forward pass
self._forward(*args, **kwargs)
# re-build variables based on additional child on-call layers, if v not passed in constructor
if not ivy.exists(v_from_constructor):
created_n_found = Container(dict(**self._find_variables(self), **self._create_variables(self._dev)))
self.v = created_n_found
# remove further duplicates with self.v
self.v, keychain_mappings = self._remove_duplicate_variables(self.v, created)
# set built flag
built = True
# wrap call methods if the module is fully built
if built:
self._wrap_call_methods(keychain_mappings, obj=self)
# flag built and remove local variables if specified
self._built = bool(built)
v_ret = self.v
if not self._store_vars:
# ToDo: verify variables in self.v are released once this method exits
self.v = ivy.Container()
return v_ret if bool(v_ret) or isinstance(built, bool) else built
def show_structure(self):
this_repr = termcolor.colored(object.__repr__(self), 'green')
sub_mod_repr = self.sub_mods(False).__repr__()
if sub_mod_repr == "''":
return this_repr
print('\n'.join([this_repr, sub_mod_repr]))
def __repr__(self):
return object.__repr__(self)
# Properties #
# -----------#
@property
def build_mode(self):
return self._build_mode
@property
def built(self):
return self._built
| 41.330159
| 120
| 0.603695
|
b8b78c684217767e724d04aeb02d2238eb98f722
| 1,223
|
py
|
Python
|
docs/tutorial/grade/tests/q2.py
|
nalderto/otter-grader
|
a4714bf48df07b7eb8b3c41530ce7a778fd42c98
|
[
"BSD-3-Clause"
] | null | null | null |
docs/tutorial/grade/tests/q2.py
|
nalderto/otter-grader
|
a4714bf48df07b7eb8b3c41530ce7a778fd42c98
|
[
"BSD-3-Clause"
] | null | null | null |
docs/tutorial/grade/tests/q2.py
|
nalderto/otter-grader
|
a4714bf48df07b7eb8b3c41530ce7a778fd42c98
|
[
"BSD-3-Clause"
] | null | null | null |
test = { 'name': 'q2',
'points': 1,
'suites': [ { 'cases': [ {'code': '>>> f = fiberator();\n>>> assert next(f) == 0;\n>>> assert next(f) == 1\n', 'hidden': False, 'locked': False},
{ 'code': '>>> f = fiberator();\n'
'>>> assert next(f) == 0;\n'
'>>> assert next(f) == 1;\n'
'>>> assert next(f) == 1;\n'
'>>> assert next(f) == 2;\n'
'>>> assert next(f) == 3;\n'
'>>> assert next(f) == 5;\n'
'>>> assert next(f) == 8;\n'
'>>> assert next(f) == 13;\n'
'>>> assert next(f) == 21\n',
'hidden': True,
'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| 61.15
| 155
| 0.228945
|
3c5dc49b7e0b6ab6aedd74323e978dc9dd33e854
| 15
|
py
|
Python
|
num_thai/__init__.py
|
lanuxos/num-thai
|
26e78537fc2789733c2541c3f31cadc76e25844d
|
[
"MIT"
] | null | null | null |
num_thai/__init__.py
|
lanuxos/num-thai
|
26e78537fc2789733c2541c3f31cadc76e25844d
|
[
"MIT"
] | null | null | null |
num_thai/__init__.py
|
lanuxos/num-thai
|
26e78537fc2789733c2541c3f31cadc76e25844d
|
[
"MIT"
] | 2
|
2020-03-24T10:03:43.000Z
|
2021-06-05T03:32:45.000Z
|
name="num_thai"
| 15
| 15
| 0.8
|
dfa3a40ebcb9f5d194ef50f4c271b35aecd82147
| 324
|
py
|
Python
|
osmaxx/excerptexport/templatetags/whitespace_cleanup.py
|
tyrasd/osmaxx
|
da4454083d17b2ef8b0623cad62e39992b6bd52a
|
[
"MIT"
] | 27
|
2015-03-30T14:17:26.000Z
|
2022-02-19T17:30:44.000Z
|
osmaxx/excerptexport/templatetags/whitespace_cleanup.py
|
tyrasd/osmaxx
|
da4454083d17b2ef8b0623cad62e39992b6bd52a
|
[
"MIT"
] | 483
|
2015-03-09T16:58:03.000Z
|
2022-03-14T09:29:06.000Z
|
osmaxx/excerptexport/templatetags/whitespace_cleanup.py
|
tyrasd/osmaxx
|
da4454083d17b2ef8b0623cad62e39992b6bd52a
|
[
"MIT"
] | 6
|
2015-04-07T07:38:30.000Z
|
2020-04-01T12:45:53.000Z
|
from django import template
from django.template.defaultfilters import stringfilter
register = template.Library()
@register.filter
@stringfilter
def remove_all_whitespace(value):
return ''.join(value.split())
@register.filter
@stringfilter
def strip(value, *args, **kwargs):
return value.strip(*args, **kwargs)
| 19.058824
| 55
| 0.762346
|
facee01a8e1d802cd4f2e581588bb9457cf22b1f
| 419
|
py
|
Python
|
backend/merp_by_cabosys_29948/wsgi.py
|
crowdbotics-apps/merp-by-cabosys-29948
|
770e1c2c74eae77160be81ed5b41b9b5ea934fa4
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/merp_by_cabosys_29948/wsgi.py
|
crowdbotics-apps/merp-by-cabosys-29948
|
770e1c2c74eae77160be81ed5b41b9b5ea934fa4
|
[
"FTL",
"AML",
"RSA-MD"
] | 14
|
2021-08-23T05:43:02.000Z
|
2021-10-05T12:42:46.000Z
|
backend/merp_by_cabosys_29948/wsgi.py
|
crowdbotics-apps/merp-by-cabosys-29948
|
770e1c2c74eae77160be81ed5b41b9b5ea934fa4
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
WSGI config for merp_by_cabosys_29948 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'merp_by_cabosys_29948.settings')
application = get_wsgi_application()
| 24.647059
| 81
| 0.799523
|
b8d980af378041bb250f31dba8093d4d00cd289f
| 1,121
|
py
|
Python
|
src/query_executor/abstract_executor.py
|
imvinod/Eva
|
0ed9814ae89db7dce1fb734dc99d5dac69cb3c82
|
[
"Apache-2.0"
] | 1
|
2019-11-06T03:30:08.000Z
|
2019-11-06T03:30:08.000Z
|
src/query_executor/abstract_executor.py
|
imvinod/Eva
|
0ed9814ae89db7dce1fb734dc99d5dac69cb3c82
|
[
"Apache-2.0"
] | 1
|
2019-11-18T03:09:56.000Z
|
2019-11-18T03:09:56.000Z
|
src/query_executor/abstract_executor.py
|
asrayousuf/Eva
|
f652e5d398556055490c146f37e7a2d7a9d091f3
|
[
"Apache-2.0"
] | null | null | null |
from abc import ABC, abstractmethod
from typing import List
from src.models import FrameBatch
from src.query_planner.abstract_plan import AbstractPlan
class AbstractExecutor(ABC):
"""
An abstract class for the executor engine
Arguments:
node (AbstractPlan): Plan node corresponding to this executor
"""
def __init__(self, node: 'AbstractPlan'):
self._node = node
self._children = []
def append_child(self, child: 'AbstractExecutor'):
"""
appends a child exector node
Arguments:
child {AbstractExecutor} -- child node
"""
self._children.append(child)
@property
def children(self) -> List['AbstractExecutor']:
"""
Returns the list of child executor
Returns:
[] -- list of children
"""
return self._children
@abstractmethod
def validate(self):
NotImplementedError('Must be implemented in subclasses.')
@abstractmethod
def execute(self, batch: FrameBatch):
NotImplementedError('Must be implemented in subclasses.')
| 25.477273
| 69
| 0.636039
|
b4c5dd4ba5438d9cfba50375253a097c27d4c031
| 2,248
|
py
|
Python
|
trick_sims/SIM_sun/RUN_Winter/input.py
|
gilbertguoze/trick
|
f0537efb0fa3cb5c0c84e36b60f055c1d1c60d21
|
[
"NASA-1.3"
] | null | null | null |
trick_sims/SIM_sun/RUN_Winter/input.py
|
gilbertguoze/trick
|
f0537efb0fa3cb5c0c84e36b60f055c1d1c60d21
|
[
"NASA-1.3"
] | null | null | null |
trick_sims/SIM_sun/RUN_Winter/input.py
|
gilbertguoze/trick
|
f0537efb0fa3cb5c0c84e36b60f055c1d1c60d21
|
[
"NASA-1.3"
] | 3
|
2021-06-22T11:02:39.000Z
|
2021-10-21T00:58:00.000Z
|
trick_sys.sched.set_enable_freeze(True)
""" Sim Control Panel """
simControlPanel = trick.SimControlPanel()
trick.add_external_application(simControlPanel)
RECORD_DATA = True
if RECORD_DATA:
execfile("Modified_data/helios.dr")
REALTIME = False
if REALTIME:
execfile("Modified_data/realtime.py")
JAPANESE = False
if JAPANESE:
execfile("Modified_data/Japanese_labels_alt.py")
STRIPCHART = False
if STRIPCHART:
trickView = trick.TrickView()
trickView.set_auto_open_file("sun.tv")
trickView.set_strip_charts_only(True)
trick.add_external_application(trickView)
""" ======================================== """
""" LOCATION """
""" ======================================== """
""" JSC MAIN Gate """
sun_predictor.sun.observer_latitude = trick.sim_services.attach_units("d" , 29.55298)
sun_predictor.sun.observer_longitude = trick.sim_services.attach_units("d" , 95.09379)
""" ======================================== """
""" TIME ZONE """
""" Pick your time zone by uncommenting the """
""" appropriate line below. """
""" ======================================== """
#sun_predictor.sun.observer_offset_from_UTC = -4
""" Eastern Standard Time """
#sun_predictor.sun.observer_offset_from_UTC = -5
""" Central Daylight Time """
#sun_predictor.sun.observer_offset_from_UTC = -5
""" Central Standard Time """
sun_predictor.sun.observer_offset_from_UTC = -6
""" Mountain Daylight Time """
#sun_predictor.sun.observer_offset_from_UTC = -6
""" Mountain Standard Time """
#sun_predictor.sun.observer_offset_from_UTC = -7
""" Pacific Daylight Time """
#sun_predictor.sun.observer_offset_from_UTC = -7
""" Pacific Standard Time """
#sun_predictor.sun.observer_offset_from_UTC = -8
""" ======================================== """
""" LOCAL TIME """
""" Set local time here. """
""" ======================================== """
sun_predictor.sun.local_time.year = 2006
sun_predictor.sun.local_time.month = 12
sun_predictor.sun.local_time.day = 21
sun_predictor.sun.local_time.hour = 0
sun_predictor.sun.local_time.min = 0
sun_predictor.sun.local_time.sec = 0.0
trick.stop(86400.0)
| 31.222222
| 86
| 0.610765
|
c305d6a2c74f0f831854743c50f77a294c879fd1
| 1,472
|
py
|
Python
|
youtuatools/extractor/restudy.py
|
Pagasis/YouTua
|
edb44b2065a7224f8b26aaf76166bf7287901567
|
[
"MIT"
] | 47
|
2021-01-02T07:44:50.000Z
|
2022-02-28T22:02:13.000Z
|
nextdl/extractor/restudy.py
|
devenu85/nextdl
|
0b458f556e2e0be80cb94bd9a9b1405ad2e9182d
|
[
"MIT"
] | 4
|
2021-02-07T03:35:13.000Z
|
2021-10-31T19:23:53.000Z
|
nextdl/extractor/restudy.py
|
devenu85/nextdl
|
0b458f556e2e0be80cb94bd9a9b1405ad2e9182d
|
[
"MIT"
] | 8
|
2021-01-03T05:44:39.000Z
|
2021-11-01T05:46:32.000Z
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class RestudyIE(InfoExtractor):
_VALID_URL = (
r"https?://(?:(?:www|portal)\.)?restudy\.dk/video/[^/]+/id/(?P<id>[0-9]+)"
)
_TESTS = [
{
"url": "https://www.restudy.dk/video/play/id/1637",
"info_dict": {
"id": "1637",
"ext": "flv",
"title": "Leiden-frosteffekt",
"description": "Denne video er et eksperiment med flydende kvælstof.",
},
"params": {
# rtmp download
"skip_download": True,
},
},
{
"url": "https://portal.restudy.dk/video/leiden-frosteffekt/id/1637",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage).strip()
description = self._og_search_description(webpage).strip()
formats = self._extract_smil_formats(
"https://cdn.portal.restudy.dk/dynamic/themes/front/awsmedia/SmilDirectory/video_%s.xml"
% video_id,
video_id,
)
self._sort_formats(formats)
return {
"id": video_id,
"title": title,
"description": description,
"formats": formats,
}
| 28.307692
| 100
| 0.521739
|
932f5afba6a61d9d9ef413707433f887a4d5eeb6
| 1,806
|
py
|
Python
|
venv/lib/python3.6/site-packages/smartfields/processors/video.py
|
Eccie-K/neighbour-hood
|
f874f9468160aa34dee294d685374e4c5e2eec4d
|
[
"MIT"
] | null | null | null |
venv/lib/python3.6/site-packages/smartfields/processors/video.py
|
Eccie-K/neighbour-hood
|
f874f9468160aa34dee294d685374e4c5e2eec4d
|
[
"MIT"
] | 4
|
2020-06-05T23:21:40.000Z
|
2021-06-10T21:57:32.000Z
|
virtual/lib/python3.6/site-packages/smartfields/processors/video.py
|
Eccie-K/neighbour-hood
|
f874f9468160aa34dee294d685374e4c5e2eec4d
|
[
"MIT"
] | null | null | null |
import re
from django.utils import six
from smartfields.processors.base import ExternalFileProcessor
from smartfields.utils import ProcessingError
__all__ = [
'FFMPEGProcessor'
]
class FFMPEGProcessor(ExternalFileProcessor):
duration_re = re.compile(r'Duration: (?P<hours>\d+):(?P<minutes>\d+):(?P<seconds>\d+)')
progress_re = re.compile(r'time=(?P<hours>\d+):(?P<minutes>\d+):(?P<seconds>\d+)')
error_re = re.compile(r'Invalid data found when processing input')
cmd_template = "ffmpeg -i {input} -y -codec:v {vcodec} -b:v {vbitrate} " \
"-maxrate {maxrate} -bufsize {bufsize} -vf " \
"scale={width}:{height} -threads {threads} -c:a {acodec} {output}"
def stdout_handler(self, line, duration=None):
if duration is None:
duration_time = self.duration_re.search(line)
if duration_time:
duration = self.timedict_to_seconds(duration_time.groupdict())
elif duration != 0:
current_time = self.progress_re.search(line)
if current_time:
seconds = self.timedict_to_seconds(current_time.groupdict())
progress = float(seconds)/duration
progress = progress if progress < 1 else 0.99
self.set_progress(progress)
elif self.error_re.search(line):
raise ProcessingError("Invalid video file or unknown video format.")
return (duration,)
def timedict_to_seconds(self, timedict):
seconds = 0
for key, t in six.iteritems(timedict):
if key == 'seconds':
seconds+= int(t)
elif key == 'minutes':
seconds+= int(t)*60
elif key == 'hours':
seconds+= int(t)*3600
return seconds
| 40.133333
| 91
| 0.601883
|
2e812e3a18aa46e7e0ceb489457e4727057e2d53
| 23,700
|
py
|
Python
|
sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2020_09_01/operations/_resource_health_metadata_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 2
|
2021-03-24T06:26:11.000Z
|
2021-04-18T15:55:59.000Z
|
sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2020_09_01/operations/_resource_health_metadata_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 4
|
2019-04-17T17:57:49.000Z
|
2020-04-24T21:11:22.000Z
|
sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2020_09_01/operations/_resource_health_metadata_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ResourceHealthMetadataOperations(object):
"""ResourceHealthMetadataOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2020_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ResourceHealthMetadataCollection"]
"""List all ResourceHealthMetadata for all sites in the subscription.
Description for List all ResourceHealthMetadata for all sites in the subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceHealthMetadataCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_09_01.models.ResourceHealthMetadataCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceHealthMetadataCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ResourceHealthMetadataCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Web/resourceHealthMetadata'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ResourceHealthMetadataCollection"]
"""List all ResourceHealthMetadata for all sites in the resource group in the subscription.
Description for List all ResourceHealthMetadata for all sites in the resource group in the
subscription.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceHealthMetadataCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_09_01.models.ResourceHealthMetadataCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceHealthMetadataCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ResourceHealthMetadataCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/resourceHealthMetadata'} # type: ignore
def list_by_site(
self,
resource_group_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ResourceHealthMetadataCollection"]
"""Gets the category of ResourceHealthMetadata to use for the given site as a collection.
Description for Gets the category of ResourceHealthMetadata to use for the given site as a
collection.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of web app.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceHealthMetadataCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_09_01.models.ResourceHealthMetadataCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceHealthMetadataCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_site.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ResourceHealthMetadataCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_site.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/resourceHealthMetadata'} # type: ignore
def get_by_site(
self,
resource_group_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ResourceHealthMetadata"
"""Gets the category of ResourceHealthMetadata to use for the given site.
Description for Gets the category of ResourceHealthMetadata to use for the given site.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of web app.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceHealthMetadata, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.ResourceHealthMetadata
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceHealthMetadata"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get_by_site.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ResourceHealthMetadata', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_site.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/resourceHealthMetadata/default'} # type: ignore
def list_by_site_slot(
self,
resource_group_name, # type: str
name, # type: str
slot, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ResourceHealthMetadataCollection"]
"""Gets the category of ResourceHealthMetadata to use for the given site as a collection.
Description for Gets the category of ResourceHealthMetadata to use for the given site as a
collection.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of web app.
:type name: str
:param slot: Name of web app slot. If not specified then will default to production slot.
:type slot: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceHealthMetadataCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_09_01.models.ResourceHealthMetadataCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceHealthMetadataCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_site_slot.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ResourceHealthMetadataCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_site_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/resourceHealthMetadata'} # type: ignore
def get_by_site_slot(
self,
resource_group_name, # type: str
name, # type: str
slot, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ResourceHealthMetadata"
"""Gets the category of ResourceHealthMetadata to use for the given site.
Description for Gets the category of ResourceHealthMetadata to use for the given site.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of web app.
:type name: str
:param slot: Name of web app slot. If not specified then will default to production slot.
:type slot: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceHealthMetadata, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.ResourceHealthMetadata
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceHealthMetadata"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get_by_site_slot.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ResourceHealthMetadata', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_site_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/resourceHealthMetadata/default'} # type: ignore
| 49.170124
| 206
| 0.651941
|
35e9849010e5db79126f1d8c4fb6851966622acd
| 22,160
|
py
|
Python
|
mqtt_bme280.py
|
grodansparadis/vscp-python-sensor-bme280
|
8984dd509b7eb2bfb99958c2eb6be44a2281d0c6
|
[
"MIT"
] | 1
|
2022-01-24T20:17:55.000Z
|
2022-01-24T20:17:55.000Z
|
mqtt_bme280.py
|
grodansparadis/vscp-python-sensor-bme280
|
8984dd509b7eb2bfb99958c2eb6be44a2281d0c6
|
[
"MIT"
] | null | null | null |
mqtt_bme280.py
|
grodansparadis/vscp-python-sensor-bme280
|
8984dd509b7eb2bfb99958c2eb6be44a2281d0c6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# --------------------------------------
# ___ ___ _ ____
# / _ \/ _ \(_) __/__ __ __
# / , _/ ___/ /\ \/ _ \/ // /
# /_/|_/_/ /_/___/ .__/\_, /
# /_/ /___/
#
# bme280.py
# Read data from a digital pressure sensor.
#
# Official datasheet available from :
# https://www.bosch-sensortec.com/bst/products/all_products/bme280
#
# Author : Matt Hawkins
# Date : 21/01/2018
#
# https://www.raspberrypi-spy.co.uk/
#
# --------------------------------------
# Changes and additions for VSCP © 2021 Ake Hedman, Grodans Paradis AB <info@grodansparadis.com>
# File is part of the VSCP project https://www.vscp.org
import configparser
import getopt
import json
import math
import sys
import time
from ctypes import c_byte, c_short, c_ubyte
import paho.mqtt.client as mqtt
import smbus
import vscp
import vscp_class as vc
import vscp_type as vt
BMP180_CHIP_ID = 0x55 // 85
BMP280_CHIP_ID = 0x58 // 88
BME280_CHIP_ID = 0x60 // 96
BME280_SOFT_RESET_VAL = 0x86
DEVICE = 0x76 # Default device I2C address
# ----------------------------------------------------------------------------
# C O N F I G U R E
# ----------------------------------------------------------------------------
# change this to match the location's pressure (hPa) at sea level
sea_level_pressure = 1013.25
# Print some info along the way
bVerbose = False
# Print debug info if true
bDebug = False
# Subtract this value from reported temperature
temp_corr = 0.0
# Height at installation location
height_at_location = 0.0
# GUID for sensors (Ethernet MAC used if empty)
# Should normally have two LSB's set to zero for sensor id use
guid = ""
# MQTT broker
host = "192.168.1.7"
# MQTT broker port
port = 1883
# Username to login at server
user = "vscp"
# Password to login at server
password = "secret"
# MQTT publish topic.
# %guid% is replaced with GUID
# %class% is replaced with event class
# %type% is replaced with event type
topic = "vscp/{xguid}/{xclass}/{xtype}/{xsensorindex}"
# Sensor index for sensors (BME280)
# Default is to use GUID to identify sensor
sensorindex_temperature = 0
sensorindex_humidity = 1
sensorindex_pressure = 2
sensorindex_pressure_adj = 3
sensorindex_altitude = 4
sensorindex_dewpoint = 5
# Zone for module
zone = 0
# Subzone for module
subzone = 0
# Last two bytes for GUID is made up of number
# given here on the form MSB:LSB
id_temperature = 0
id_humidity = 1
id_pressure = 2
id_pressure_adj = 3
id_altitude = 4
id_dewpoint = 5
note_temperature = "Temperature from BME280"
note_humidity = "Humidity from BME280"
note_pressure = "Pressure from BME280"
note_pressure_adj = "Sea level pressure from BME280"
note_altitude = "Altitude from BME280"
note_dewpoint = "Dewpoint from BME280"
# Configuration will be read from path set here
cfgpath = ""
# ----------------------------------------------------------------------------------------
config = configparser.ConfigParser()
bus = smbus.SMBus(1) # Rev 2 Pi, Pi 2 & Pi 3 uses bus 1
# Rev 1 Pi uses bus 0
def usage():
print("usage: mqtt-bm280.py -v -c <pat-to-config-file> -h ")
print("---------------------------------------------")
print("-h/--help - This text.")
print("-v/--verbose - Print output also to screen.")
print("-c/--config - Path to configuration file.")
def getShort(data, index):
# return two bytes from data as a signed 16-bit value
return c_short((data[index+1] << 8) + data[index]).value
def getUShort(data, index):
# return two bytes from data as an unsigned 16-bit value
return (data[index+1] << 8) + data[index]
def getChar(data, index):
# return one byte from data as a signed char
result = data[index]
if result > 127:
result -= 256
return result
def getUChar(data, index):
# return one byte from data as an unsigned char
result = data[index] & 0xFF
return result
def readBME280ID(addr=DEVICE):
# Chip ID Register Address
REG_ID = 0xD0
(chip_id, chip_version) = bus.read_i2c_block_data(addr, REG_ID, 2)
return (chip_id, chip_version)
def readBME280All(addr=DEVICE):
# Register Addresses
REG_DATA = 0xF7
REG_CONTROL = 0xF4
REG_CONFIG = 0xF5
REG_CONTROL_HUM = 0xF2
REG_HUM_MSB = 0xFD
REG_HUM_LSB = 0xFE
# Oversample setting - page 27
OVERSAMPLE_TEMP = 2
OVERSAMPLE_PRES = 2
MODE = 1
# Oversample setting for humidity register - page 26
OVERSAMPLE_HUM = 2
bus.write_byte_data(addr, REG_CONTROL_HUM, OVERSAMPLE_HUM)
control = OVERSAMPLE_TEMP << 5 | OVERSAMPLE_PRES << 2 | MODE
bus.write_byte_data(addr, REG_CONTROL, control)
# Read blocks of calibration data from EEPROM
# See Page 22 data sheet
cal1 = bus.read_i2c_block_data(addr, 0x88, 24)
cal2 = bus.read_i2c_block_data(addr, 0xA1, 1)
cal3 = bus.read_i2c_block_data(addr, 0xE1, 7)
# Convert byte data to word values
dig_T1 = getUShort(cal1, 0)
dig_T2 = getShort(cal1, 2)
dig_T3 = getShort(cal1, 4)
dig_P1 = getUShort(cal1, 6)
dig_P2 = getShort(cal1, 8)
dig_P3 = getShort(cal1, 10)
dig_P4 = getShort(cal1, 12)
dig_P5 = getShort(cal1, 14)
dig_P6 = getShort(cal1, 16)
dig_P7 = getShort(cal1, 18)
dig_P8 = getShort(cal1, 20)
dig_P9 = getShort(cal1, 22)
dig_H1 = getUChar(cal2, 0)
dig_H2 = getShort(cal3, 0)
dig_H3 = getUChar(cal3, 2)
dig_H4 = getChar(cal3, 3)
dig_H4 = (dig_H4 << 24) >> 20
dig_H4 = dig_H4 | (getChar(cal3, 4) & 0x0F)
dig_H5 = getChar(cal3, 5)
dig_H5 = (dig_H5 << 24) >> 20
dig_H5 = dig_H5 | (getUChar(cal3, 4) >> 4 & 0x0F)
dig_H6 = getChar(cal3, 6)
# Wait in ms (Datasheet Appendix B: Measurement time and current calculation)
wait_time = 1.25 + (2.3 * OVERSAMPLE_TEMP) + ((2.3 *
OVERSAMPLE_PRES) + 0.575) + ((2.3 * OVERSAMPLE_HUM)+0.575)
time.sleep(wait_time/1000) # Wait the required time
# Read temperature/pressure/humidity
data = bus.read_i2c_block_data(addr, REG_DATA, 8)
pres_raw = (data[0] << 12) | (data[1] << 4) | (data[2] >> 4)
temp_raw = (data[3] << 12) | (data[4] << 4) | (data[5] >> 4)
hum_raw = (data[6] << 8) | data[7]
# Refine temperature
var1 = ((((temp_raw >> 3)-(dig_T1 << 1)))*(dig_T2)) >> 11
var2 = (((((temp_raw >> 4) - (dig_T1)) *
((temp_raw >> 4) - (dig_T1))) >> 12) * (dig_T3)) >> 14
t_fine = var1+var2
temperature = float(((t_fine * 5) + 128) >> 8);
# Refine pressure and adjust for temperature
var1 = t_fine / 2.0 - 64000.0
var2 = var1 * var1 * dig_P6 / 32768.0
var2 = var2 + var1 * dig_P5 * 2.0
var2 = var2 / 4.0 + dig_P4 * 65536.0
var1 = (dig_P3 * var1 * var1 / 524288.0 + dig_P2 * var1) / 524288.0
var1 = (1.0 + var1 / 32768.0) * dig_P1
if var1 == 0:
pressure = 0
else:
pressure = 1048576.0 - pres_raw
pressure = ((pressure - var2 / 4096.0) * 6250.0) / var1
var1 = dig_P9 * pressure * pressure / 2147483648.0
var2 = pressure * dig_P8 / 32768.0
pressure = pressure + (var1 + var2 + dig_P7) / 16.0
# Refine humidity
humidity = t_fine - 76800.0
humidity = (hum_raw - (dig_H4 * 64.0 + dig_H5 / 16384.0 * humidity)) * (dig_H2 / 65536.0 *
(1.0 + dig_H6 / 67108864.0 * humidity * (1.0 + dig_H3 / 67108864.0 * humidity)))
humidity = humidity * (1.0 - dig_H1 * humidity / 524288.0)
if humidity > 100:
humidity = 100
elif humidity < 0:
humidity = 0
return temperature/100.0, pressure/100.0, humidity
# def main():
# ----------------------------------------------------------------------------
args = sys.argv[1:]
nargs = len(args)
try:
opts, args = getopt.getopt(args, "hvc:", ["help", "verbose", "config="])
except getopt.GetoptError:
print("unrecognized format!")
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print("HELP")
usage()
sys.exit()
elif opt in ("-v", "--verbose"):
bVerbose = True
elif opt in ("-c", "--config"):
cfgpath = arg
if (len(cfgpath)):
init = config.read(cfgpath)
# ----------------- GENERAL -----------------
if 'bVerbose' in config['GENERAL']:
bVerbose = config.getboolean('GENERAL', 'bVerbose')
if bVerbose:
print('Verbose mode enabled.')
print('READING CONFIGURATION')
print('---------------------')
# ----------------- VSCP -----------------
if 'guid' in config['VSCP']:
guid = config['VSCP']['guid']
if bVerbose:
print("guid =", guid)
if 'sensorindex_temperature' in config['VSCP']:
sensorindex_temperature = int(config['VSCP']['sensorindex_temperature'])
if bVerbose:
print("sensorindex_temperature =", sensorindex_temperature)
if 'sensorindex_humidity' in config['VSCP']:
sensorindex_humidity = int(config['VSCP']['sensorindex_humidity'])
if bVerbose:
print("sensorindex_humidity =", sensorindex_humidity)
if 'sensorindex_pressure' in config['VSCP']:
sensorindex_pressure = int(config['VSCP']['sensorindex_pressure'])
if bVerbose:
print("sensorindex_pressure =", sensorindex_pressure)
if 'sensorindex_pressure_adj' in config['VSCP']:
sensorindex_pressure_adj = int(config['VSCP']['sensorindex_pressure_adj'])
if bVerbose:
print("sensorindex_pressure_adj =", sensorindex_pressure_adj)
if 'sensorindex_gas' in config['VSCP']:
sensorindex_gas = int(config['VSCP']['sensorindex_gas'])
if bVerbose:
print("sensorindex_gas =", sensorindex_gas)
if 'sensorindex_altitude' in config['VSCP']:
sensorindex_altitude = int(config['VSCP']['sensorindex_altitude'])
if bVerbose:
print("sensorindex_altitude =", sensorindex_altitude)
if 'sensorindex_dewpoint' in config['VSCP']:
sensorindex_dewpoint = int(config['VSCP']['sensorindex_dewpoint'])
if bVerbose:
print("sensorindex_dewpoint =", sensorindex_dewpoint)
if 'zone' in config['VSCP']:
zone = int(config['VSCP']['zone'])
if bVerbose:
print("zone =", zone)
if 'subzone' in config['VSCP']:
subzone = int(config['VSCP']['subzone'])
if bVerbose:
print("subzone =", subzone)
if 'id_temperature' in config['VSCP']:
id_temperature = int(config['VSCP']['id_temperature'])
if bVerbose:
print("id_temperature =", id_temperature)
if 'id_humidity' in config['VSCP']:
id_humidity = int(config['VSCP']['id_humidity'])
if bVerbose:
print("id_humidity =", id_humidity)
if 'id_pressure' in config['VSCP']:
id_pressure = int(config['VSCP']['id_pressure'])
if bVerbose:
print("id_pressure =", id_pressure)
if 'id_pressure_adj' in config['VSCP']:
id_pressure_adj = int(config['VSCP']['id_pressure_adj'])
if bVerbose:
print("id_pressure_adj =", id_pressure_adj)
if 'id_gas' in config['VSCP']:
id_gas = int(config['VSCP']['id_gas'])
if bVerbose:
print("id_gas =", id_gas)
if 'id_altitude' in config['VSCP']:
id_altitude = int(config['VSCP']['id_altitude'])
if bVerbose:
print("id_altitude =", id_altitude)
if 'id_dewpoint' in config['VSCP']:
id_dewpoint = int(config['VSCP']['id_dewpoint'])
if bVerbose:
print("id_dewpoint =", id_dewpoint)
# ----------------- MQTT -----------------
if 'host' in config['MQTT']:
host = config['MQTT']['host']
if bVerbose:
print("host =", host)
if 'port' in config['MQTT']:
port = int(config['MQTT']['port'])
if bVerbose:
print("port =", port)
if 'user' in config['MQTT']:
user = config['MQTT']['user']
if bVerbose:
print("user =", user)
if 'password' in config['MQTT']:
password = config['MQTT']['password']
if bVerbose:
print("password =", "***********")
# print("password =", password)
if 'topic' in config['MQTT']:
topic = config['MQTT']['topic']
if bVerbose:
print("topic =", password)
if 'note_temperature' in config['MQTT']:
note_temperature = config['MQTT']['note_temperature']
if bVerbose:
print("note_temperature =", note_temperature)
if 'note_humidity' in config['MQTT']:
note_humidity = config['MQTT']['note_humidity']
if bVerbose:
print("note_humidity =", note_humidity)
if 'note_pressure' in config['MQTT']:
note_pressure = config['MQTT']['note_pressure']
if bVerbose:
print("note_pressure =", note_pressure)
if 'note_pressure_adj' in config['MQTT']:
note_pressure_adj = config['MQTT']['note_pressure_adj']
if bVerbose:
print("note_pressure_adj =", note_pressure_adj)
if 'note_gas' in config['MQTT']:
note_gas = config['MQTT']['note_gas']
if bVerbose:
print("note_gas =", note_gas)
if 'note_altitude' in config['MQTT']:
note_altitude = config['MQTT']['note_altitude']
if bVerbose:
print("note_altitude =", note_altitude)
if 'note_dewpoint' in config['MQTT']:
note_dewpoint = config['MQTT']['note_dewpoint']
if bVerbose:
print("note_dewpoint =", note_dewpoint)
# ----------------- BME280 -----------------
if 'sea_level_pressure' in config['BME280']:
if not bDebug :
sea_level_pressure = float(config['BME280']['sea_level_pressure'])
if bVerbose:
print("sea_level_pressure =", float(config['BME280']['sea_level_pressure']))
if 'temp_corr' in config['BME280']:
if not bDebug :
temp_corr = float(config['BME280']['temp_corr'])
if bVerbose:
print("temp_corr =", temp_corr)
if 'height_at_location' in config['BME280']:
if not bDebug :
height_at_location = float(config['BME280']['height_at_location'])
if bVerbose:
print("height_at_location =", height_at_location)
# -----------------------------------------------------------------------------
# define message callback
def on_message(client, userdata, msg):
print(msg.topic+" "+str(msg.payload))
# define connect callback
def on_connect(client, userdata, flags, rc):
print("Connected =",str(rc))
# define publish callback
def on_publish(client, userdata, result):
print("Publish callback\n", result)
# -----------------------------------------------------------------------------
client= mqtt.Client()
# bind callback function
client.on_message=on_connect
client.on_message=on_message
client.on_message=on_publish
client.username_pw_set(user, password)
if bVerbose :
print("\n\nConnection in progress...", host, port)
client.connect(host,port)
client.loop_start() # start loop to process received messages
# -----------------------------------------------------------------------------
# Initialize VSCP event content
def initEvent(ex,id,vscpClass,vscpType):
# Dumb node, priority normal
ex.head = vscp.VSCP_PRIORITY_NORMAL | vscp.VSCP_HEADER16_DUMB
g = vscp.guid()
if ("" == guid):
g.setFromString(guid)
else :
g.setGUIDFromMAC(id)
ex.guid = g.guid
ex.vscpclass = vscpClass
ex.vscptype = vscpType
return g
# -----------------------------------------------------------------------------
# Read sensor id etc
(chip_id, chip_version) = readBME280ID()
if bVerbose :
print("-------------------------------------------------------------------------------")
print("Sending events...")
print( "Chip ID : %d" % chip_id)
print( "Version : %d" % chip_version)
temperature,pressure,humidity = readBME280All()
# -----------------------------------------------------------------------------
# T E M P E R A T U R E
# -----------------------------------------------------------------------------
temperature_str = "{:0.2f}".format(temperature - temp_corr)
if bVerbose :
print( "Temperature : %0.2f C" % (temperature - temp_corr))
ex = vscp.vscpEventEx()
g = initEvent(ex, id_temperature, vc.VSCP_CLASS2_MEASUREMENT_STR, vt.VSCP_TYPE_MEASUREMENT_TEMPERATURE)
# Size is predata + string length + terminating zero
ex.sizedata = 4 + len(temperature_str) + 1
ex.data[0] = sensorindex_temperature
ex.data[1] = zone
ex.data[2] = subzone
ex.data[3] = 1 # unit is degrees Celsius
b = temperature_str.encode()
for idx in range(len(b)):
ex.data[idx + 4] = b[idx]
ex.data[4 + len(temperature_str)] = 0 # optional terminating zero
j = ex.toJSON()
j["vscpNote"] = note_temperature
# Add extra measurement information
j["measurement"] = {
"value" : temperature,
"unit" : 1,
"sensorindex" : sensorindex_temperature,
"zone" : zone,
"subzone" : subzone
}
ptopic = topic.format( xguid=g.getAsString(), xclass=ex.vscpclass, xtype=ex.vscptype, xsensorindex=sensorindex_temperature)
if ( len(ptopic) ):
rv = client.publish(ptopic, json.dumps(j))
if 0 != rv[0] :
print("Failed to pressure rv=", rv)
# -----------------------------------------------------------------------------
# H U M I D I T Y
# -----------------------------------------------------------------------------
if BME280_CHIP_ID == chip_id:
humidity_str = "{:0.0f}".format(humidity)
if bVerbose :
print( "Humidity : %f%%" % humidity)
ex = vscp.vscpEventEx()
initEvent(ex, id_humidity, vc.VSCP_CLASS2_MEASUREMENT_STR,vt.VSCP_TYPE_MEASUREMENT_HUMIDITY)
# Size is predata + string length + terminating zero
ex.sizedata = 4 + len(humidity_str) + 1
ex.data[0] = sensorindex_humidity
ex.data[1] = zone
ex.data[2] = subzone
ex.data[3] = 0 # default unit % of moisture
b = humidity_str.encode()
for idx in range(len(b)):
ex.data[idx + 4] = b[idx]
ex.data[4 + len(humidity_str)] = 0 # optional terminating zero
j = ex.toJSON()
j["vscpNote"] = note_humidity
# Add extra measurement information
j["measurement"] = {
"value" : humidity,
"unit" : 0,
"sensorindex" : sensorindex_humidity,
"zone" : zone,
"subzone" : subzone
}
ptopic = topic.format( xguid=g.getAsString(), xclass=ex.vscpclass, xtype=ex.vscptype, xsensorindex=sensorindex_humidity)
if ( len(ptopic) ):
rv = client.publish(ptopic, json.dumps(j))
if 0 != rv[0] :
print("Failed to pressure rv=", rv)
# -----------------------------------------------------------------------------
# P R E S S U R E
# -----------------------------------------------------------------------------
pressure = pressure
pressure_str = "{:0.2f}".format(pressure*100)
if bVerbose :
print( "Pressure : %0.2f hPa" % pressure)
print(pressure_str)
ex = vscp.vscpEventEx()
initEvent(ex, id_pressure, vc.VSCP_CLASS2_MEASUREMENT_STR,vt.VSCP_TYPE_MEASUREMENT_PRESSURE)
# Size is predata + string length + terminating zero
ex.sizedata = 4 + len(pressure_str) + 1
ex.data[0] = sensorindex_pressure
ex.data[1] = zone
ex.data[2] = subzone
ex.data[3] = 0 # default unit Pascal
b = pressure_str.encode()
for idx in range(len(b)):
ex.data[idx + 4] = b[idx]
ex.data[4 + len(pressure_str)] = 0 # optional terminating zero
j = ex.toJSON()
j["vscpNote"] = note_pressure
# Add extra pressure information
j["measurement"] = {
"value" : round(pressure*100,2),
"unit" : 0,
"sensorindex" : sensorindex_pressure,
"zone" : zone,
"subzone" : subzone
}
ptopic = topic.format( xguid=g.getAsString(), xclass=ex.vscpclass, xtype=ex.vscptype, xsensorindex=sensorindex_pressure)
if ( len(ptopic) ):
rv = client.publish(ptopic, payload=json.dumps(j), qos=1)
if 0 != rv[0] :
print("Failed to pressure rv=", rv)
# -----------------------------------------------------------------------------
# Adjusted Pressure
# -----------------------------------------------------------------------------
pressure_adj_str = "{:f}".format((pressure*100 + height_at_location/8.3))
if bVerbose :
print( "Adjusted pressure : %0.2f hPa" % float(pressure_adj_str))
ex = vscp.vscpEventEx()
initEvent(ex, id_pressure_adj, vc.VSCP_CLASS2_MEASUREMENT_STR, vt.VSCP_TYPE_MEASUREMENT_PRESSURE)
# Size is predata + string length + terminating zero
ex.sizedata = 4 + len(pressure_adj_str) + 1
ex.data[0] = sensorindex_pressure_adj
ex.data[1] = zone
ex.data[2] = subzone
ex.data[3] = 0 # default unit Pascal
b = pressure_adj_str.encode()
for idx in range(len(b)):
ex.data[idx + 4] = b[idx]
ex.data[4 + len(pressure_adj_str)] = 0 # optional terminating zero
j = ex.toJSON()
j["vscpNote"] = note_pressure_adj
# Add extra pressure information
j["measurement"] = {
"value" : round(float(float(pressure_adj_str)),2),
"unit" : 0,
"sensorindex" : sensorindex_pressure_adj,
"zone" : zone,
"subzone" : subzone
}
#print(json.dumps(j))
ptopic = topic.format( xguid=g.getAsString(), xclass=ex.vscpclass, xtype=ex.vscptype, xsensorindex=sensorindex_pressure_adj)
if ( len(ptopic) ):
rv = client.publish(ptopic, json.dumps(j))
if 0 != rv[0] :
print("Failed to send sea level pressure rv=", rv)
# -----------------------------------------------------------------------------
# Dewpoint
# -----------------------------------------------------------------------------
if BME280_CHIP_ID == chip_id:
dewpoint = temperature - ((100 - humidity) / 5)
dewpoint_str = "{:0.2f}".format(dewpoint)
if bVerbose :
print( "Dewpoint : %f C" % dewpoint)
ex = vscp.vscpEventEx()
initEvent(ex, id_dewpoint, vc.VSCP_CLASS2_MEASUREMENT_STR,vt.VSCP_TYPE_MEASUREMENT_DEWPOINT)
# Size is predata + string length + terminating zero
ex.sizedata = 4 + len(dewpoint) + 1
ex.data[0] = sensorindex_dewpoint
ex.data[1] = zone
ex.data[2] = subzone
ex.data[3] = 0 # default unit Pascal
b = pressure_str.encode()
for idx in range(len(b)):
ex.data[idx + 4] = b[idx]
ex.data[4 + len(dewpoint)] = 0 # optional terminating zero
j = ex.toJSON()
j["vscpNote"] = note_dewpoint
# Add extra pressure information
j["measurement"] = {
"value" : float(dewpoint),
"unit" : 0,
"sensorindex" : sensorindex_dewpoint,
"zone" : zone,
"subzone" : subzone
}
ptopic = topic.format( xguid=g.getAsString(), xclass=ex.vscpclass, xtype=ex.vscptype, xsensorindex=sensorindex_dewpoint)
if ( len(ptopic) ):
rv = client.publish(ptopic, json.dumps(j))
if 0 != rv[0] :
print("Failed to pressure rv=", rv)
# -----------------------------------------------------------------------------
#time.sleep(0.5)
client.loop_stop()
client.disconnect()
if bVerbose :
print("-------------------------------------------------------------------------------")
print("Closed")
# if __name__=="__main__":
# main()
| 29.196311
| 124
| 0.606273
|
5914ea7871b3f3ee65c3748efb01f5e3c999bb7a
| 2,661
|
py
|
Python
|
src/patent_client/uspto/fulltext/patent/settings.py
|
parkerhancock/python-ip
|
046c1ebbe31e9058a0e421659a3300e8a578fd41
|
[
"Apache-2.0"
] | 15
|
2018-10-23T01:15:29.000Z
|
2022-02-01T19:53:44.000Z
|
src/patent_client/uspto/fulltext/patent/settings.py
|
grimmer0125/patent_client
|
8943ca970d174ecf8aad10669a95ceddd75f5e13
|
[
"Apache-2.0"
] | 33
|
2018-10-26T03:48:51.000Z
|
2022-01-17T09:16:07.000Z
|
src/patent_client/uspto/fulltext/patent/settings.py
|
grimmer0125/patent_client
|
8943ca970d174ecf8aad10669a95ceddd75f5e13
|
[
"Apache-2.0"
] | 7
|
2019-02-14T20:25:20.000Z
|
2021-05-24T07:49:34.000Z
|
SEARCH_PARAMS = {
"Sect1": "PTO2",
"Sect2": "HITOFF",
"u": "/netahtml/PTO/search-adv.htm",
"r": "0",
"p": "1",
"f": "S",
"l": "50",
"Query": "",
"d": "PTXT",
}
SEARCH_FIELDS = {
"patent_number": "PN",
"issue_date": "ISD",
"title": "TTL",
"abstract": "ABST",
"claims": "ACLM",
"specification": "SPEC",
"current_us_classification": "CCL",
"current_cpc_classification": "CPC",
"current_cpc_classification_class": "CPCL",
"international_classification": "ICL",
"application_serial_number": "APN",
"application_date": "APD",
"application_type": "APT",
"government_interest": "GOVT",
"patent_family_id": "FMID",
"parent_case_information": "PARN",
"related_us_app._data": "RLAP",
"related_application_filing_date": "RLFD",
"foreign_priority": "PRIR",
"priority_filing_date": "PRAD",
"pct_information": "PCT",
"pct_filing_date": "PTAD",
"pct_national_stage_filing_date": "PT3D",
"prior_published_document_date": "PPPD",
"reissue_data": "REIS",
"reissued_patent_application_filing_date": "RPAF",
"130_b_affirmation_flag": "AFFF",
"130_b_affirmation_statement": "AFFT",
"inventor_name": "IN",
"inventor_city": "IC",
"inventor_state": "IS",
"inventor_country": "ICN",
"applicant_name": "AANM",
"applicant_city": "AACI",
"applicant_state": "AAST",
"applicant_country": "AACO",
"applicant_type": "AAAT",
"attorney_or_agent": "LREP",
"assignee_name": "AN",
"assignee_city": "AC",
"assignee_state": "AS",
"assignee_country": "ACN",
"primary_examiner": "EXP",
"assistant_examiner": "EXA",
"referenced_by": "REF",
"foreign_references": "FREF",
"other_references": "OREF",
"certificate_of_correction": "COFC",
"reexamination_certificate": "REEX",
"ptab_trial_certificate": "PTAB",
"supplemental_exam_certificate": "SEC",
"international_registration_number": "ILRN",
"international_registration_date": "ILRD",
"international_registration_publication_date": "ILPD",
"hague_international_filing_date": "ILFD",
}
SEARCH_PARAMS = {
"Sect1": "PTO2",
"Sect2": "HITOFF",
"u": "/netahtml/PTO/search-adv.htm",
"r": "0",
"p": "1",
"f": "S",
"l": "50",
"Query": "",
"d": "PTXT",
}
SEARCH_URL = "https://patft.uspto.gov/netacgi/nph-Parser"
PUBLICATION_URL = (
"https://patft.uspto.gov/netacgi/nph-Parser?"
"Sect1=PTO1&Sect2=HITOFF&d=PALL&p=1&"
"u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1={publication_number}.PN.&"
"OS=PN/{publication_number}&RS=PN/{publication_number}"
)
| 29.241758
| 81
| 0.622322
|
1773c5f2cbf43155b4c4ebf00ad4a7af706ef8e0
| 5,282
|
py
|
Python
|
src_files/loss_functions/losses.py
|
kprokofi/ML_Decoder
|
c01c50e0165e607afbebd8d615708ef9c084dd5b
|
[
"MIT"
] | null | null | null |
src_files/loss_functions/losses.py
|
kprokofi/ML_Decoder
|
c01c50e0165e607afbebd8d615708ef9c084dd5b
|
[
"MIT"
] | null | null | null |
src_files/loss_functions/losses.py
|
kprokofi/ML_Decoder
|
c01c50e0165e607afbebd8d615708ef9c084dd5b
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
class AsymmetricLoss(nn.Module):
def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-8, disable_torch_grad_focal_loss=True):
super(AsymmetricLoss, self).__init__()
self.gamma_neg = gamma_neg
self.gamma_pos = gamma_pos
self.clip = clip
self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss
self.eps = eps
def forward(self, x, y):
""""
Parameters
----------
x: input logits
y: targets (multi-label binarized vector)
"""
# Calculating Probabilities
x_sigmoid = torch.sigmoid(x)
xs_pos = x_sigmoid
xs_neg = 1 - x_sigmoid
# Asymmetric Clipping
if self.clip is not None and self.clip > 0:
xs_neg = (xs_neg + self.clip).clamp(max=1)
# Basic CE calculation
los_pos = y * torch.log(xs_pos.clamp(min=self.eps))
los_neg = (1 - y) * torch.log(xs_neg.clamp(min=self.eps))
loss = los_pos + los_neg
# Asymmetric Focusing
if self.gamma_neg > 0 or self.gamma_pos > 0:
if self.disable_torch_grad_focal_loss:
torch.set_grad_enabled(False)
pt0 = xs_pos * y
pt1 = xs_neg * (1 - y) # pt = p if t > 0 else 1-p
pt = pt0 + pt1
one_sided_gamma = self.gamma_pos * y + self.gamma_neg * (1 - y)
one_sided_w = torch.pow(1 - pt, one_sided_gamma)
if self.disable_torch_grad_focal_loss:
torch.set_grad_enabled(True)
loss *= one_sided_w
return -loss.sum()
class AsymmetricLossOptimized(nn.Module):
''' Notice - optimized version, minimizes memory allocation and gpu uploading,
favors inplace operations'''
def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-8, disable_torch_grad_focal_loss=False):
super(AsymmetricLossOptimized, self).__init__()
self.gamma_neg = gamma_neg
self.gamma_pos = gamma_pos
self.clip = clip
self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss
self.eps = eps
# prevent memory allocation and gpu uploading every iteration, and encourages inplace operations
self.targets = self.anti_targets = self.xs_pos = self.xs_neg = self.asymmetric_w = self.loss = None
def forward(self, x, y):
""""
Parameters
----------
x: input logits
y: targets (multi-label binarized vector)
"""
self.targets = y
self.anti_targets = 1 - y
# Calculating Probabilities
self.xs_pos = torch.sigmoid(x)
self.xs_neg = 1.0 - self.xs_pos
# Asymmetric Clipping
if self.clip is not None and self.clip > 0:
self.xs_neg.add_(self.clip).clamp_(max=1)
# Basic CE calculation
self.loss = self.targets * torch.log(self.xs_pos.clamp(min=self.eps))
self.loss.add_(self.anti_targets * torch.log(self.xs_neg.clamp(min=self.eps)))
# Asymmetric Focusing
if self.gamma_neg > 0 or self.gamma_pos > 0:
if self.disable_torch_grad_focal_loss:
torch.set_grad_enabled(False)
self.xs_pos = self.xs_pos * self.targets
self.xs_neg = self.xs_neg * self.anti_targets
self.asymmetric_w = torch.pow(1 - self.xs_pos - self.xs_neg,
self.gamma_pos * self.targets + self.gamma_neg * self.anti_targets)
if self.disable_torch_grad_focal_loss:
torch.set_grad_enabled(True)
self.loss *= self.asymmetric_w
return -self.loss.sum()
class ASLSingleLabel(nn.Module):
'''
This loss is intended for single-label classification problems
'''
def __init__(self, gamma_pos=0, gamma_neg=4, eps: float = 0.1, reduction='mean'):
super(ASLSingleLabel, self).__init__()
self.eps = eps
self.logsoftmax = nn.LogSoftmax(dim=-1)
self.targets_classes = []
self.gamma_pos = gamma_pos
self.gamma_neg = gamma_neg
self.reduction = reduction
def forward(self, inputs, target):
'''
"input" dimensions: - (batch_size,number_classes)
"target" dimensions: - (batch_size)
'''
num_classes = inputs.size()[-1]
log_preds = self.logsoftmax(inputs)
self.targets_classes = torch.zeros_like(inputs).scatter_(1, target.long().unsqueeze(1), 1)
# ASL weights
targets = self.targets_classes
anti_targets = 1 - targets
xs_pos = torch.exp(log_preds)
xs_neg = 1 - xs_pos
xs_pos = xs_pos * targets
xs_neg = xs_neg * anti_targets
asymmetric_w = torch.pow(1 - xs_pos - xs_neg,
self.gamma_pos * targets + self.gamma_neg * anti_targets)
log_preds = log_preds * asymmetric_w
if self.eps > 0: # label smoothing
self.targets_classes = self.targets_classes.mul(1 - self.eps).add(self.eps / num_classes)
# loss calculation
loss = - self.targets_classes.mul(log_preds)
loss = loss.sum(dim=-1)
if self.reduction == 'mean':
loss = loss.mean()
return loss
| 34.980132
| 109
| 0.604127
|
4f5fcf6cbc29bac6b0739c46abf3694340912518
| 5,061
|
py
|
Python
|
pybank/bank.py
|
Nat1405/pychain
|
a8b01afcf0df3ce24ec30d6f887d16ed3aa28d63
|
[
"MIT"
] | null | null | null |
pybank/bank.py
|
Nat1405/pychain
|
a8b01afcf0df3ce24ec30d6f887d16ed3aa28d63
|
[
"MIT"
] | null | null | null |
pybank/bank.py
|
Nat1405/pychain
|
a8b01afcf0df3ce24ec30d6f887d16ed3aa28d63
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Basic implementation of a banking application in Python
# Copyright Nat Comeau 2017
# STDLIB
# LOCAL
class Bank(object):
"""A bank has customers that want to do things with their money
This is a simple bank that holds people's money and transfers it to each other.
Parameters
----------
name : str
name of the bank
Attributes
----------
name : str
name of bank
customers : dict
dictionary of customer: <customer_object> pairs
"""
def __init__(self, name):
self.name = name
# Create an empty dict to hold
class Customer(object):
"""
A customer starts with just a name. After, you must add accounts to their
list of accounts with a starting balance.
Parameters
----------
name : str
name of the customer
Attributes
----------
name : str
name of the customer
accounts : dict
dictionary of account objects with balances
Examples
--------
Create a new Customer
>>> nat = Customer('Nathaniel C')
Give them a student banking account and a credit account
>>> nat.add_account('Student Banking', 10.00)
>>> nat.add_account('Credit', 50.00)
>>> print(nat)
Valued Customer Name: Nathaniel C
Account name: Student Banking Account balance: 10.0
Account name: Credit Account balance: 50.0
>>> nat.deposit('Student Banking', 10.0)
>>> print(nat)
Valued Customer Name: Nathaniel C
Account name: Student Banking Account balance: 20.0
Account name: Credit Account balance: 50.0
"""
def __init__(self, name):
self.name = name
self.accounts = {}
def add_account(self, name, start=0.00):
"""Add account to a customers portfolio
Parameters
----------
name : str
The account name
start : float, optional
The starting value of the account
"""
self.accounts[name] = Account(name, start)
def withdraw(self, account, amount):
"""Withdraw from an account
Parameters
----------
account : str
name of account to withdraw from
amount : float
amount of money to withdraw from account
Returns
-------
withdrawn : float
amount of money withdrawn from account
"""
# Take the amount of money our of the account
self.accounts[account].balance -= amount
# Return the amount of money we withdrew
return amount
def deposit(self, account, amount):
"""Deposit some money in an account
Parameters
----------
account : str
name of account to deposit in
amount : float
amount of money to deposit in account
"""
self.accounts[account].balance += amount
def transfer(self, sending, recieving, amount):
"""Transfer money between two accounts
Parameters
----------
sending : str
recieving : str
amount : float
"""
pass
def __str__(self):
"""
"""
rep = "Valued Customer Name: {}\n".format(self.name)
for i, account in enumerate(self.accounts):
if i < len(self.accounts) - 1:
rep += str(self.accounts[account]) + "\n"
else:
rep += str(self.accounts[account])
return rep
class Account(object):
"""
Parameters
----------
name : str
account name
start : float, optional
starting balance to begin with; default 0.00
Attributes
----------
name : str
account name
balance : float
account balance
Examples
--------
>>> acc = Account('Student Banking', 100.00)
>>> print(acc)
Account name: Student Banking Account balance: 100.0
"""
def __init__(self, name, start=0.00):
self.name = name
self.balance = start
def deposit(self, amount):
"""Add money to self
Parameters
----------
amount : float
amount to deposit; must be positive
Raises
------
ValueError
If the amount is not a positive number
"""
if amount >= 0:
self.balance += amount
else:
raise ValueError
def withdraw(self, amount):
"""Remove money from self
Parameters
----------
amount : float
amount to withdraw; must be positive
Raises
------
ValueError
If the amount is not a positive number
"""
if amount >= 0:
self.balance -= amount
else:
raise ValueError
def __str__(self):
"""
"""
return "Account name: {} Account balance: {}".format(self.name, self.balance)
if __name__ == "__main__":
# Use doctest to test new accounts
import doctest
doctest.testmod()
| 22.900452
| 85
| 0.550879
|
4894f0328ee1d21c0f7504ea539c07e66d4009a7
| 303
|
py
|
Python
|
teamcat_service/docker_build/target/one_step_build/teamcat/doraemon/toolbox/views/commonview.py
|
zhangyin2088/Teamcat
|
be9be8d7c1e58c8d2d22ab78d25783d9aee4de71
|
[
"Apache-2.0"
] | 6
|
2018-11-26T08:42:52.000Z
|
2020-06-01T08:33:48.000Z
|
teamcat_service/docker_build/target/one_step_build/teamcat/doraemon/toolbox/views/commonview.py
|
zhangyin2088/Teamcat
|
be9be8d7c1e58c8d2d22ab78d25783d9aee4de71
|
[
"Apache-2.0"
] | null | null | null |
teamcat_service/docker_build/target/one_step_build/teamcat/doraemon/toolbox/views/commonview.py
|
zhangyin2088/Teamcat
|
be9be8d7c1e58c8d2d22ab78d25783d9aee4de71
|
[
"Apache-2.0"
] | 1
|
2019-01-22T06:45:36.000Z
|
2019-01-22T06:45:36.000Z
|
#coding=utf-8
# coding=utf-8
'''
Created on 2014-3-18
@author: ETHAN
'''
from django.shortcuts import render_to_response
def loadleftnavigater(request):
''' load left navigater'''
print("#####################################")
return render_to_response("common/toolboxleftnavigater.html")
| 21.642857
| 65
| 0.636964
|
305648a78e35a485d26b857f860223b0bbebbc15
| 1,345
|
py
|
Python
|
plugins/modules/ytthumb.py
|
bughunter0/Helper-Bot
|
f03370623160dceec7a790f5836536aefffbdf7b
|
[
"MIT"
] | null | null | null |
plugins/modules/ytthumb.py
|
bughunter0/Helper-Bot
|
f03370623160dceec7a790f5836536aefffbdf7b
|
[
"MIT"
] | null | null | null |
plugins/modules/ytthumb.py
|
bughunter0/Helper-Bot
|
f03370623160dceec7a790f5836536aefffbdf7b
|
[
"MIT"
] | 2
|
2021-09-19T17:32:17.000Z
|
2021-11-21T09:49:09.000Z
|
import ytthumb
from pyrogram import Client, filters
from pyrogram.types import Message, InlineKeyboardMarkup, InlineKeyboardButton
@Client.on_message(
filters.command(["ytthumb", "thumbnail"]),
group=1
)
async def youtube_thumbnail(update: Message):
reply_markup = InlineKeyboardMarkup(
[[InlineKeyboardButton('Join Updates Channel', url='https://telegram.me/FayasNoushad')]]
)
if len(update.text.split()) == 1 or len(update.text.split()) >= 4:
await update.reply_text(
"Send command with youtube video link with quality ( optional )",
quote=True,
reply_markup=reply_markup
)
else:
length = len(update.text.split())
video = update.text.split()[1]
if length == 2:
thumbnail = ytthumb.thumbnail(video)
else:
quality = update.text.split()[2]
thumbnail = ytthumb.thumbnail(video, quality)
try:
await update.reply_photo(
thumbnail,
quote=True,
reply_markup=reply_markup
)
except Exception as error:
await update.reply_text(
text=error,
disable_web_page_preview=True,
reply_markup=reply_markup,
quote=True
)
| 32.02381
| 96
| 0.581413
|
8e3d4838c58bd05cb7777f9e823275daa2e27454
| 22,887
|
py
|
Python
|
Lib/test/test_trace.py
|
ystk/debian-python2.6
|
17d77164dc5d5748e54aeaa5adc89ac511fc71ae
|
[
"PSF-2.0"
] | 3
|
2015-09-22T14:04:54.000Z
|
2021-07-15T07:07:11.000Z
|
Lib/test/test_trace.py
|
ystk/debian-python2.6
|
17d77164dc5d5748e54aeaa5adc89ac511fc71ae
|
[
"PSF-2.0"
] | 1
|
2020-09-07T15:33:56.000Z
|
2020-09-07T15:33:56.000Z
|
Lib/test/test_trace.py
|
ystk/debian-python2.6
|
17d77164dc5d5748e54aeaa5adc89ac511fc71ae
|
[
"PSF-2.0"
] | 2
|
2015-09-22T14:05:27.000Z
|
2015-11-08T20:15:42.000Z
|
# Testing the line trace facility.
from test import test_support
import unittest
import sys
import difflib
import gc
# A very basic example. If this fails, we're in deep trouble.
def basic():
return 1
basic.events = [(0, 'call'),
(1, 'line'),
(1, 'return')]
# Many of the tests below are tricky because they involve pass statements.
# If there is implicit control flow around a pass statement (in an except
# clause or else caluse) under what conditions do you set a line number
# following that clause?
# The entire "while 0:" statement is optimized away. No code
# exists for it, so the line numbers skip directly from "del x"
# to "x = 1".
def arigo_example():
x = 1
del x
while 0:
pass
x = 1
arigo_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(5, 'line'),
(5, 'return')]
# check that lines consisting of just one instruction get traced:
def one_instr_line():
x = 1
del x
x = 1
one_instr_line.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(3, 'return')]
def no_pop_tops(): # 0
x = 1 # 1
for a in range(2): # 2
if a: # 3
x = 1 # 4
else: # 5
x = 1 # 6
no_pop_tops.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(6, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(2, 'line'),
(2, 'return')]
def no_pop_blocks():
y = 1
while not y:
bla
x = 1
no_pop_blocks.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(4, 'line'),
(4, 'return')]
def called(): # line -3
x = 1
def call(): # line 0
called()
call.events = [(0, 'call'),
(1, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'return'),
(1, 'return')]
def raises():
raise Exception
def test_raise():
try:
raises()
except Exception, exc:
x = 1
test_raise.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'exception'),
(-2, 'return'),
(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
def _settrace_and_return(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
def settrace_and_return(tracefunc):
_settrace_and_return(tracefunc)
settrace_and_return.events = [(1, 'return')]
def _settrace_and_raise(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
raise RuntimeError
def settrace_and_raise(tracefunc):
try:
_settrace_and_raise(tracefunc)
except RuntimeError, exc:
pass
settrace_and_raise.events = [(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
# implicit return example
# This test is interesting because of the else: pass
# part of the code. The code generate for the true
# part of the if contains a jump past the else branch.
# The compiler then generates an implicit "return None"
# Internally, the compiler visits the pass statement
# and stores its line number for use on the next instruction.
# The next instruction is the implicit return None.
def ireturn_example():
a = 5
b = 5
if a == b:
b = a+1
else:
pass
ireturn_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(6, 'line'),
(6, 'return')]
# Tight loop with while(1) example (SF #765624)
def tightloop_example():
items = range(0, 3)
try:
i = 0
while 1:
b = items[i]; i+=1
except IndexError:
pass
tightloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'exception'),
(6, 'line'),
(7, 'line'),
(7, 'return')]
def tighterloop_example():
items = range(1, 4)
try:
i = 0
while 1: i = items[i]
except IndexError:
pass
tighterloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'exception'),
(5, 'line'),
(6, 'line'),
(6, 'return')]
def generator_function():
try:
yield True
"continued"
finally:
"finally"
def generator_example():
# any() will leave the generator before its end
x = any(generator_function())
# the following lines were not traced
for x in range(10):
y = x
generator_example.events = ([(0, 'call'),
(2, 'line'),
(-6, 'call'),
(-5, 'line'),
(-4, 'line'),
(-4, 'return'),
(-4, 'call'),
(-4, 'exception'),
(-1, 'line'),
(-1, 'return')] +
[(5, 'line'), (6, 'line')] * 10 +
[(5, 'line'), (5, 'return')])
class Tracer:
def __init__(self):
self.events = []
def trace(self, frame, event, arg):
self.events.append((frame.f_lineno, event))
return self.trace
def traceWithGenexp(self, frame, event, arg):
(o for o in [1])
self.events.append((frame.f_lineno, event))
return self.trace
class TraceTestCase(unittest.TestCase):
# Disable gc collection when tracing, otherwise the
# deallocators may be traced as well.
def setUp(self):
self.using_gc = gc.isenabled()
gc.disable()
def tearDown(self):
if self.using_gc:
gc.enable()
def compare_events(self, line_offset, events, expected_events):
events = [(l - line_offset, e) for (l, e) in events]
if events != expected_events:
self.fail(
"events did not match expectation:\n" +
"\n".join(difflib.ndiff([str(x) for x in expected_events],
[str(x) for x in events])))
def run_and_compare(self, func, events):
tracer = Tracer()
sys.settrace(tracer.trace)
func()
sys.settrace(None)
self.compare_events(func.func_code.co_firstlineno,
tracer.events, events)
def run_test(self, func):
self.run_and_compare(func, func.events)
def run_test2(self, func):
tracer = Tracer()
func(tracer.trace)
sys.settrace(None)
self.compare_events(func.func_code.co_firstlineno,
tracer.events, func.events)
def set_and_retrieve_none(self):
sys.settrace(None)
assert sys.gettrace() is None
def set_and_retrieve_func(self):
def fn(*args):
pass
sys.settrace(fn)
try:
assert sys.gettrace() is fn
finally:
sys.settrace(None)
def test_01_basic(self):
self.run_test(basic)
def test_02_arigo(self):
self.run_test(arigo_example)
def test_03_one_instr(self):
self.run_test(one_instr_line)
def test_04_no_pop_blocks(self):
self.run_test(no_pop_blocks)
def test_05_no_pop_tops(self):
self.run_test(no_pop_tops)
def test_06_call(self):
self.run_test(call)
def test_07_raise(self):
self.run_test(test_raise)
def test_08_settrace_and_return(self):
self.run_test2(settrace_and_return)
def test_09_settrace_and_raise(self):
self.run_test2(settrace_and_raise)
def test_10_ireturn(self):
self.run_test(ireturn_example)
def test_11_tightloop(self):
self.run_test(tightloop_example)
def test_12_tighterloop(self):
self.run_test(tighterloop_example)
def test_13_genexp(self):
self.run_test(generator_example)
# issue1265: if the trace function contains a generator,
# and if the traced function contains another generator
# that is not completely exhausted, the trace stopped.
# Worse: the 'finally' clause was not invoked.
tracer = Tracer()
sys.settrace(tracer.traceWithGenexp)
generator_example()
sys.settrace(None)
self.compare_events(generator_example.__code__.co_firstlineno,
tracer.events, generator_example.events)
def test_14_onliner_if(self):
def onliners():
if True: False
else: True
return 0
self.run_and_compare(
onliners,
[(0, 'call'),
(1, 'line'),
(3, 'line'),
(3, 'return')])
def test_15_loops(self):
# issue1750076: "while" expression is skipped by debugger
def for_example():
for x in range(2):
pass
self.run_and_compare(
for_example,
[(0, 'call'),
(1, 'line'),
(2, 'line'),
(1, 'line'),
(2, 'line'),
(1, 'line'),
(1, 'return')])
def while_example():
# While expression should be traced on every loop
x = 2
while x > 0:
x -= 1
self.run_and_compare(
while_example,
[(0, 'call'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(3, 'line'),
(4, 'line'),
(3, 'line'),
(3, 'return')])
def test_16_blank_lines(self):
exec("def f():\n" + "\n" * 256 + " pass")
self.run_and_compare(
f,
[(0, 'call'),
(257, 'line'),
(257, 'return')])
class RaisingTraceFuncTestCase(unittest.TestCase):
def trace(self, frame, event, arg):
"""A trace function that raises an exception in response to a
specific trace event."""
if event == self.raiseOnEvent:
raise ValueError # just something that isn't RuntimeError
else:
return self.trace
def f(self):
"""The function to trace; raises an exception if that's the case
we're testing, so that the 'exception' trace event fires."""
if self.raiseOnEvent == 'exception':
x = 0
y = 1 // x
else:
return 1
def run_test_for_event(self, event):
"""Tests that an exception raised in response to the given event is
handled OK."""
self.raiseOnEvent = event
try:
for i in xrange(sys.getrecursionlimit() + 1):
sys.settrace(self.trace)
try:
self.f()
except ValueError:
pass
else:
self.fail("exception not thrown!")
except RuntimeError:
self.fail("recursion counter not reset")
# Test the handling of exceptions raised by each kind of trace event.
def test_call(self):
self.run_test_for_event('call')
def test_line(self):
self.run_test_for_event('line')
def test_return(self):
self.run_test_for_event('return')
def test_exception(self):
self.run_test_for_event('exception')
def test_trash_stack(self):
def f():
for i in range(5):
print i # line tracing will raise an exception at this line
def g(frame, why, extra):
if (why == 'line' and
frame.f_lineno == f.func_code.co_firstlineno + 2):
raise RuntimeError, "i am crashing"
return g
sys.settrace(g)
try:
f()
except RuntimeError:
# the test is really that this doesn't segfault:
import gc
gc.collect()
else:
self.fail("exception not propagated")
# 'Jump' tests: assigning to frame.f_lineno within a trace function
# moves the execution position - it's how debuggers implement a Jump
# command (aka. "Set next statement").
class JumpTracer:
"""Defines a trace function that jumps from one place to another,
with the source and destination lines of the jump being defined by
the 'jump' property of the function under test."""
def __init__(self, function):
self.function = function
self.jumpFrom = function.jump[0]
self.jumpTo = function.jump[1]
self.done = False
def trace(self, frame, event, arg):
if not self.done and frame.f_code == self.function.func_code:
firstLine = frame.f_code.co_firstlineno
if frame.f_lineno == firstLine + self.jumpFrom:
# Cope with non-integer self.jumpTo (because of
# no_jump_to_non_integers below).
try:
frame.f_lineno = firstLine + self.jumpTo
except TypeError:
frame.f_lineno = self.jumpTo
self.done = True
return self.trace
# The first set of 'jump' tests are for things that are allowed:
def jump_simple_forwards(output):
output.append(1)
output.append(2)
output.append(3)
jump_simple_forwards.jump = (1, 3)
jump_simple_forwards.output = [3]
def jump_simple_backwards(output):
output.append(1)
output.append(2)
jump_simple_backwards.jump = (2, 1)
jump_simple_backwards.output = [1, 1, 2]
def jump_out_of_block_forwards(output):
for i in 1, 2:
output.append(2)
for j in [3]: # Also tests jumping over a block
output.append(4)
output.append(5)
jump_out_of_block_forwards.jump = (3, 5)
jump_out_of_block_forwards.output = [2, 5]
def jump_out_of_block_backwards(output):
output.append(1)
for i in [1]:
output.append(3)
for j in [2]: # Also tests jumping over a block
output.append(5)
output.append(6)
output.append(7)
jump_out_of_block_backwards.jump = (6, 1)
jump_out_of_block_backwards.output = [1, 3, 5, 1, 3, 5, 6, 7]
def jump_to_codeless_line(output):
output.append(1)
# Jumping to this line should skip to the next one.
output.append(3)
jump_to_codeless_line.jump = (1, 2)
jump_to_codeless_line.output = [3]
def jump_to_same_line(output):
output.append(1)
output.append(2)
output.append(3)
jump_to_same_line.jump = (2, 2)
jump_to_same_line.output = [1, 2, 3]
# Tests jumping within a finally block, and over one.
def jump_in_nested_finally(output):
try:
output.append(2)
finally:
output.append(4)
try:
output.append(6)
finally:
output.append(8)
output.append(9)
jump_in_nested_finally.jump = (4, 9)
jump_in_nested_finally.output = [2, 9]
# The second set of 'jump' tests are for things that are not allowed:
def no_jump_too_far_forwards(output):
try:
output.append(2)
output.append(3)
except ValueError, e:
output.append('after' in str(e))
no_jump_too_far_forwards.jump = (3, 6)
no_jump_too_far_forwards.output = [2, True]
def no_jump_too_far_backwards(output):
try:
output.append(2)
output.append(3)
except ValueError, e:
output.append('before' in str(e))
no_jump_too_far_backwards.jump = (3, -1)
no_jump_too_far_backwards.output = [2, True]
# Test each kind of 'except' line.
def no_jump_to_except_1(output):
try:
output.append(2)
except:
e = sys.exc_info()[1]
output.append('except' in str(e))
no_jump_to_except_1.jump = (2, 3)
no_jump_to_except_1.output = [True]
def no_jump_to_except_2(output):
try:
output.append(2)
except ValueError:
e = sys.exc_info()[1]
output.append('except' in str(e))
no_jump_to_except_2.jump = (2, 3)
no_jump_to_except_2.output = [True]
def no_jump_to_except_3(output):
try:
output.append(2)
except ValueError, e:
output.append('except' in str(e))
no_jump_to_except_3.jump = (2, 3)
no_jump_to_except_3.output = [True]
def no_jump_to_except_4(output):
try:
output.append(2)
except (ValueError, RuntimeError), e:
output.append('except' in str(e))
no_jump_to_except_4.jump = (2, 3)
no_jump_to_except_4.output = [True]
def no_jump_forwards_into_block(output):
try:
output.append(2)
for i in 1, 2:
output.append(4)
except ValueError, e:
output.append('into' in str(e))
no_jump_forwards_into_block.jump = (2, 4)
no_jump_forwards_into_block.output = [True]
def no_jump_backwards_into_block(output):
try:
for i in 1, 2:
output.append(3)
output.append(4)
except ValueError, e:
output.append('into' in str(e))
no_jump_backwards_into_block.jump = (4, 3)
no_jump_backwards_into_block.output = [3, 3, True]
def no_jump_into_finally_block(output):
try:
try:
output.append(3)
x = 1
finally:
output.append(6)
except ValueError, e:
output.append('finally' in str(e))
no_jump_into_finally_block.jump = (4, 6)
no_jump_into_finally_block.output = [3, 6, True] # The 'finally' still runs
def no_jump_out_of_finally_block(output):
try:
try:
output.append(3)
finally:
output.append(5)
output.append(6)
except ValueError, e:
output.append('finally' in str(e))
no_jump_out_of_finally_block.jump = (5, 1)
no_jump_out_of_finally_block.output = [3, True]
# This verifies the line-numbers-must-be-integers rule.
def no_jump_to_non_integers(output):
try:
output.append(2)
except ValueError, e:
output.append('integer' in str(e))
no_jump_to_non_integers.jump = (2, "Spam")
no_jump_to_non_integers.output = [True]
# This verifies that you can't set f_lineno via _getframe or similar
# trickery.
def no_jump_without_trace_function():
try:
previous_frame = sys._getframe().f_back
previous_frame.f_lineno = previous_frame.f_lineno
except ValueError, e:
# This is the exception we wanted; make sure the error message
# talks about trace functions.
if 'trace' not in str(e):
raise
else:
# Something's wrong - the expected exception wasn't raised.
raise RuntimeError, "Trace-function-less jump failed to fail"
class JumpTestCase(unittest.TestCase):
def compare_jump_output(self, expected, received):
if received != expected:
self.fail( "Outputs don't match:\n" +
"Expected: " + repr(expected) + "\n" +
"Received: " + repr(received))
def run_test(self, func):
tracer = JumpTracer(func)
sys.settrace(tracer.trace)
output = []
func(output)
sys.settrace(None)
self.compare_jump_output(func.output, output)
def test_01_jump_simple_forwards(self):
self.run_test(jump_simple_forwards)
def test_02_jump_simple_backwards(self):
self.run_test(jump_simple_backwards)
def test_03_jump_out_of_block_forwards(self):
self.run_test(jump_out_of_block_forwards)
def test_04_jump_out_of_block_backwards(self):
self.run_test(jump_out_of_block_backwards)
def test_05_jump_to_codeless_line(self):
self.run_test(jump_to_codeless_line)
def test_06_jump_to_same_line(self):
self.run_test(jump_to_same_line)
def test_07_jump_in_nested_finally(self):
self.run_test(jump_in_nested_finally)
def test_08_no_jump_too_far_forwards(self):
self.run_test(no_jump_too_far_forwards)
def test_09_no_jump_too_far_backwards(self):
self.run_test(no_jump_too_far_backwards)
def test_10_no_jump_to_except_1(self):
self.run_test(no_jump_to_except_1)
def test_11_no_jump_to_except_2(self):
self.run_test(no_jump_to_except_2)
def test_12_no_jump_to_except_3(self):
self.run_test(no_jump_to_except_3)
def test_13_no_jump_to_except_4(self):
self.run_test(no_jump_to_except_4)
def test_14_no_jump_forwards_into_block(self):
self.run_test(no_jump_forwards_into_block)
def test_15_no_jump_backwards_into_block(self):
self.run_test(no_jump_backwards_into_block)
def test_16_no_jump_into_finally_block(self):
self.run_test(no_jump_into_finally_block)
def test_17_no_jump_out_of_finally_block(self):
self.run_test(no_jump_out_of_finally_block)
def test_18_no_jump_to_non_integers(self):
self.run_test(no_jump_to_non_integers)
def test_19_no_jump_without_trace_function(self):
no_jump_without_trace_function()
def test_20_large_function(self):
d = {}
exec("""def f(output): # line 0
x = 0 # line 1
y = 1 # line 2
''' # line 3
%s # lines 4-1004
''' # line 1005
x += 1 # line 1006
output.append(x) # line 1007
return""" % ('\n' * 1000,), d)
f = d['f']
f.jump = (2, 1007)
f.output = [0]
self.run_test(f)
def test_main():
test_support.run_unittest(
TraceTestCase,
RaisingTraceFuncTestCase,
JumpTestCase
)
if __name__ == "__main__":
test_main()
| 29.762029
| 76
| 0.546773
|
970bf22dd34ba9137616d25879b43de3b962c6e0
| 2,333
|
py
|
Python
|
flask/lib/python2.7/site-packages/sqlparse/__init__.py
|
navodissa/python-flask
|
76589828a4eba01ccbf019d9336f359f04f37067
|
[
"BSD-3-Clause"
] | null | null | null |
flask/lib/python2.7/site-packages/sqlparse/__init__.py
|
navodissa/python-flask
|
76589828a4eba01ccbf019d9336f359f04f37067
|
[
"BSD-3-Clause"
] | null | null | null |
flask/lib/python2.7/site-packages/sqlparse/__init__.py
|
navodissa/python-flask
|
76589828a4eba01ccbf019d9336f359f04f37067
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (C) 2008 Andi Albrecht, albrecht.andi@gmail.com
#
# This module is part of python-sqlparse and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php.
"""Parse SQL statements."""
__version__ = '0.1.13'
# Setup namespace
from sqlparse import engine
from sqlparse import filters
from sqlparse import formatter
# Deprecated in 0.1.5. Will be removed in 0.2.0
from sqlparse.exceptions import SQLParseError
def parse(sql, encoding=None):
"""Parse sql and return a list of statements.
:param sql: A string containting one or more SQL statements.
:param encoding: The encoding of the statement (optional).
:returns: A tuple of :class:`~sqlparse.sql.Statement` instances.
"""
return tuple(parsestream(sql, encoding))
def parsestream(stream, encoding=None):
"""Parses sql statements from file-like object.
:param stream: A file-like object.
:param encoding: The encoding of the stream contents (optional).
:returns: A generator of :class:`~sqlparse.sql.Statement` instances.
"""
stack = engine.FilterStack()
stack.full_analyze()
return stack.run(stream, encoding)
def format(sql, **options):
"""Format *sql* according to *options*.
Available options are documented in :ref:`formatting`.
In addition to the formatting options this function accepts the
keyword "encoding" which determines the encoding of the statement.
:returns: The formatted SQL statement as string.
"""
encoding = options.pop('encoding', None)
stack = engine.FilterStack()
options = formatter.validate_options(options)
stack = formatter.build_filter_stack(stack, options)
stack.postprocess.append(filters.SerializerUnicode())
return ''.join(stack.run(sql, encoding))
def split(sql, encoding=None):
"""Split *sql* into single statements.
:param sql: A string containting one or more SQL statements.
:param encoding: The encoding of the statement (optional).
:returns: A list of strings.
"""
stack = engine.FilterStack()
stack.split_statements = True
return [unicode(stmt).strip() for stmt in stack.run(sql, encoding)]
from sqlparse.engine.filter import StatementFilter
def split2(stream):
splitter = StatementFilter()
return list(splitter.process(None, stream))
| 29.531646
| 72
| 0.71796
|
d1b39a88deb09497ae687341263900f5b5464bbf
| 2,761
|
py
|
Python
|
monitor/format.py
|
madbuda/chia-monitor
|
88ba28bba0ce54b0b06d4a10b3ed05c886a29d1f
|
[
"Apache-2.0"
] | null | null | null |
monitor/format.py
|
madbuda/chia-monitor
|
88ba28bba0ce54b0b06d4a10b3ed05c886a29d1f
|
[
"Apache-2.0"
] | null | null | null |
monitor/format.py
|
madbuda/chia-monitor
|
88ba28bba0ce54b0b06d4a10b3ed05c886a29d1f
|
[
"Apache-2.0"
] | null | null | null |
from chia.util.misc import format_bytes, format_minutes
def format_og_plot_count(plot_count: int) -> str:
return f"🌾 OG Plot Count: {plot_count}"
def format_og_plot_size(plot_size: int) -> str:
return f"🧺 OG Plot Size: {format_bytes(plot_size)}"
def format_portable_plot_count(plot_count: int) -> str:
return f"🌾 Portable Plot Count: {plot_count}"
def format_portable_plot_size(plot_size: int) -> str:
return f"🧺 Portable Plot Size: {format_bytes(plot_size)}"
def format_plot_count(plot_count: int) -> str:
return f"🌾 Plot Count: {plot_count}"
def format_plot_size(plot_size: int) -> str:
return f"🧺 Plot Size: {format_bytes(plot_size)}"
def format_plot_delta_24h(count_delta: int, size_delta: int) -> str:
size_prefix = "+" if size_delta > 0 else "-"
return f"🚜 Plot Change 24h: {count_delta:+} ({size_prefix}{format_bytes(abs(size_delta))})"
def format_balance(balance: int) -> str:
return f"💰 Total Balance: {balance/1e12:.5f} XCH"
def format_space(space: int) -> str:
return f"💾 Current Netspace: {format_bytes(space)}"
def format_diffculty(diffculty: int) -> str:
return f"📈 Farming Difficulty: {diffculty}"
def format_peak_height(peak_height: int, fix_indent=False) -> str:
indent = " " * (1 if fix_indent else 0)
return f"🏔️ {indent}Peak Height: {peak_height}"
def format_synced(synced: int) -> str:
return f"🔄 Synced: {synced}"
def format_full_node_count(full_node_count: int, node_type="Full Node") -> str:
return f"📶 {node_type} Peer Count: {full_node_count}"
def format_hostname(hostname: str, fix_indent=False) -> str:
indent = " " * (1 if fix_indent else 0)
return f"🖥️ {indent}Host: {hostname}"
def format_challenge_hash(challenge_hash: str) -> str:
return f"🎰 Challenge Hash: {challenge_hash}"
def format_challenges_per_min(challenges_per_min: float) -> str:
return f"🎰 Challenges Per Minute: {challenges_per_min:.2f}"
def format_signage_point(signage_point: str) -> str:
return f"⌛ Signage Point: {signage_point}"
def format_signage_points_per_min(signage_points_per_min: float) -> str:
return f"⌛ Signage Points Per Minute: {signage_points_per_min:.2f}"
def format_signage_point_index(signage_point_index: int) -> str:
return f"🔏 Signage Point Index: {signage_point_index}"
def format_passed_filter(passed_filter: int) -> str:
return f"🔎 Passed Filter: {passed_filter}"
def format_passed_filter_per_min(passed_filter_per_min: float) -> str:
return f"🔎 Passed Filters Per Minute: {passed_filter_per_min:.2f}"
def format_proofs(proofs: int) -> str:
return f"✅ Total Proofs found: {proofs}"
def format_expected_time_to_win(minutes: int) -> str:
return f"🕰️ Time To Win: {format_minutes(minutes)}"
| 28.463918
| 95
| 0.716407
|
eea7c362d774ff8b826b9c421c8c16ffc1736bce
| 2,561
|
py
|
Python
|
src/nibeuplink/__init__.py
|
lipoja/nibeuplink
|
f8da14cc0d31342cb44026d9eded2bf82b52460b
|
[
"MIT"
] | 20
|
2018-10-16T11:01:00.000Z
|
2022-01-04T13:19:42.000Z
|
src/nibeuplink/__init__.py
|
lipoja/nibeuplink
|
f8da14cc0d31342cb44026d9eded2bf82b52460b
|
[
"MIT"
] | 4
|
2018-05-14T13:05:16.000Z
|
2022-01-17T05:42:52.000Z
|
src/nibeuplink/__init__.py
|
lipoja/nibeuplink
|
f8da14cc0d31342cb44026d9eded2bf82b52460b
|
[
"MIT"
] | 8
|
2018-10-16T11:34:51.000Z
|
2021-08-08T09:45:11.000Z
|
import asyncio
import logging
from typing import Dict
from .const import (
MAX_REQUEST_PARAMETERS,
PARAM_HOTWATER_SYSTEMS,
PARAM_CLIMATE_SYSTEMS,
PARAM_COMPRESSOR_FREQUENCY,
PARAM_PUMP_SPEED_HEATING_MEDIUM,
PARAM_STATUS_COOLING,
PARAM_VENTILATION_SYSTEMS,
SMARTHOME_MODES,
)
from .typing import StatusItemIcon, SetThermostatModel, Thermostat, SmartHomeSystem
from .types import (
VentilationSystem,
ClimateSystem,
HotWaterSystem,
)
from .monitor import Monitor
from .uplink import Uplink
from .session import UplinkSession
_LOGGER = logging.getLogger(__name__)
async def get_active_climate(
uplink: Uplink, system_id: int
) -> Dict[str, ClimateSystem]:
active = {}
async def check(key: str, value: ClimateSystem):
if value.active_accessory is None:
active[key] = value
return
available = await uplink.get_parameter(system_id, value.active_accessory)
_LOGGER.debug("Climate %s:%s active_accessory: %s", system_id, key, available)
if available and available["rawValue"] == 1:
active[key] = value
await asyncio.gather(
*[check(key, value) for key, value in PARAM_CLIMATE_SYSTEMS.items()]
)
return active
async def get_active_hotwater(
uplink: Uplink, system_id: int
) -> Dict[str, HotWaterSystem]:
active = {}
async def check(key: str, value: HotWaterSystem):
if value.hot_water_production is None:
active[key] = value
return
available = await uplink.get_parameter(system_id, value.hot_water_production)
_LOGGER.debug(
"Hotwater %s:%s hot_water_production: %s", system_id, key, available
)
if available and available["rawValue"] == 1:
active[key] = value
await asyncio.gather(
*[check(key, value) for key, value in PARAM_HOTWATER_SYSTEMS.items()]
)
return active
async def get_active_ventilations(
uplink: Uplink, system_id: int
) -> Dict[str, VentilationSystem]:
active = {}
async def check(key: str, value: VentilationSystem):
if value.fan_speed is None:
return
available = await uplink.get_parameter(system_id, value.fan_speed)
_LOGGER.debug("Ventilation %s:%s fan_speed: %s", system_id, key, available)
if available and available["rawValue"] != -32768:
active[key] = value
await asyncio.gather(
*[check(key, value) for key, value in PARAM_VENTILATION_SYSTEMS.items()]
)
return active
| 26.402062
| 86
| 0.673565
|
583be644a39ee9a19e6a60a41e8d13fd37ed3b58
| 381
|
py
|
Python
|
examples/tla202x_simpletest.py
|
jposada202020/Adafruit_CircuitPython_TLA202x
|
ab2449aadd4256ec68efd773ef264b1d6e86a3cd
|
[
"Unlicense",
"MIT-0",
"MIT"
] | 47
|
2021-02-15T23:02:36.000Z
|
2022-03-04T21:30:03.000Z
|
examples/tla202x_simpletest.py
|
jposada202020/Adafruit_CircuitPython_TLA202x
|
ab2449aadd4256ec68efd773ef264b1d6e86a3cd
|
[
"Unlicense",
"MIT-0",
"MIT"
] | 7
|
2021-02-19T20:00:08.000Z
|
2022-01-14T10:51:12.000Z
|
examples/tla202x_simpletest.py
|
jposada202020/Adafruit_CircuitPython_TLA202x
|
ab2449aadd4256ec68efd773ef264b1d6e86a3cd
|
[
"Unlicense",
"MIT-0",
"MIT"
] | 14
|
2021-02-20T17:40:56.000Z
|
2022-01-01T19:53:38.000Z
|
# SPDX-FileCopyrightText: 2020 Bryan Siepert, written for Adafruit Industries
#
# SPDX-License-Identifier: Unlicense
# pylint:disable=no-member
import board
import busio
from adafruit_tla202x import TLA2024
i2c = busio.I2C(board.SCL, board.SDA)
tla = TLA2024(i2c)
for i in range(4):
channel = i
tla.input_channel = channel
print("Channel", channel, ":", tla.voltage)
| 23.8125
| 77
| 0.742782
|
dbec6d5e2c5866c22bca8a9ed415125efd9a0de6
| 29,762
|
py
|
Python
|
vistrails/packages/vtk/tf_widget.py
|
remram44/VisTrails-mybinder
|
ee7477b471920d738f3ac430932f01901b56ed44
|
[
"BSD-3-Clause"
] | 83
|
2015-01-05T14:50:50.000Z
|
2021-09-17T19:45:26.000Z
|
vistrails/packages/vtk/tf_widget.py
|
remram44/VisTrails-mybinder
|
ee7477b471920d738f3ac430932f01901b56ed44
|
[
"BSD-3-Clause"
] | 254
|
2015-01-02T20:39:19.000Z
|
2018-11-28T17:16:44.000Z
|
vistrails/packages/vtk/tf_widget.py
|
remram44/VisTrails-mybinder
|
ee7477b471920d738f3ac430932f01901b56ed44
|
[
"BSD-3-Clause"
] | 40
|
2015-04-17T16:46:36.000Z
|
2021-09-28T22:43:24.000Z
|
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
##############################################################################
# Transfer Function Widget for VTK
from __future__ import division
from PyQt4 import QtCore, QtGui
from vistrails.core.modules.vistrails_module import Module
from vistrails.core.modules.basic_modules import Constant
from vistrails.core.modules.module_registry import get_module_registry
from vistrails.core.system import get_elementtree_library
from vistrails.core.utils.color import ColorByName
from vistrails.gui.modules.constant_configuration import ConstantWidgetMixin
import vtk
import math
import pickle
import copy
import StringIO
import unittest
ElementTree = get_elementtree_library()
################################################################################
# etc
def clamp(v, mn, mx, eps=0.0):
mne = mn + eps
mxe = mx - eps
if v < mne: return mn
if v > mxe: return mx
return v
# Because of a Qt bug see
# http://bugreports.qt.nokia.com/browse/QTBUG-17985
# We cannot set the scene from 0 to 1. In this case we will set it
# 4000 x 4000 with GLOBAL_SCALE. When the bug is fixed, just set it to 1.0
GLOBAL_SCALE = 4000.0
##############################################################################
# Transfer Function object
class TransferFunction(object):
def __init__(self):
self._min_range = 0.0
self._max_range = 1.0
self._pts = []
def set_range(self, mn, mx):
self._min_range = mn
self._max_range = mx
def set_on_vtk_volume_property(self, vtk_volume_property):
# Builds the opacity and color functions
of = vtk.vtkPiecewiseFunction()
cf = vtk.vtkColorTransferFunction()
vp = vtk_volume_property
for pt in self._pts:
(scalar, opacity, color) = pt
# Map scalar to tf range
s = self._min_range + (self._max_range - self._min_range) * scalar
of.AddPoint(s, opacity)
cf.AddRGBPoint(s, color[0], color[1], color[2])
vp.SetScalarOpacity(of)
vp.SetColor(cf)
def get_vtk_transfer_functions(self):
of = vtk.vtkPiecewiseFunction()
cf = vtk.vtkColorTransferFunction()
for pt in self._pts:
(scalar, opacity, color) = pt
# Map scalar to tf range
s = self._min_range + (self._max_range - self._min_range) * scalar
of.AddPoint(s, opacity)
cf.AddRGBPoint(s, color[0], color[1], color[2])
return (of,cf)
def add_point(self, scalar, opacity, color):
self._pts.append((scalar, opacity, color))
self._pts.sort()
def get_value(self, scalar):
"""get_value(scalar): returns the opacity and color
linearly interpolated at the value. Useful for
adding knots."""
ix = 0
while ix < len(self._pts) and self._pts[ix][0] > scalar:
ix += 1
if ix == 0:
return (self._pts[0][1], self._pts[0][2])
elif ix == len(self._pts):
return (self._pts[-1][1], self._pts[-1][2])
else:
u = ((self._pts[ix][0] - scalar) /
(self._pts[ix][0] - self._pts[ix-1][0]))
do = self._pts[ix][1] - self._pts[ix-1][1]
dr = self._pts[ix][2][0] - self._pts[ix-1][2][0]
dg = self._pts[ix][2][1] - self._pts[ix-1][2][1]
db = self._pts[ix][2][2] - self._pts[ix-1][2][2]
return (self._pts[ix-1][1] + u * do,
(self._pts[ix-1][2][0] + u * dr,
self._pts[ix-1][2][1] + u * dg,
self._pts[ix-1][2][2] + u * db))
def __copy__(self):
result = TransferFunction()
result._min_range = self._min_range
result._max_range = self._max_range
result._pts = copy.copy(self._pts)
return result
def __eq__(self, other):
if type(other) != type(self):
return False
if self._min_range != other._min_range:
return False
if self._max_range != other._max_range:
return False
for my_pt, other_pt in zip(self._pts, other._pts):
if my_pt != other_pt:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def serialize(self, node=None):
"""serialize(node: ElementTree.Element) -> str
Convert this object to an XML representation in string format.
"""
if node is None:
node = ElementTree.Element('transfer_function')
node.set('min_range', str(self._min_range))
node.set('max_range', str(self._max_range))
for pt in self._pts:
ptNode = ElementTree.SubElement(node, 'point')
ptNode.set('scalar', str(pt[0]))
ptNode.set('opacity', str(pt[1]))
color = pt[2]
colorNode = ElementTree.SubElement(ptNode, 'color')
colorNode.set('R', str(color[0]))
colorNode.set('G', str(color[1]))
colorNode.set('B', str(color[2]))
return ElementTree.tostring(node)
@staticmethod
def parse(strNode):
"""parse(strNode: str) -> TransferFunction
Parses a string representing a TransferFunction and returns a
TransferFunction object
"""
try:
node = ElementTree.fromstring(strNode)
except SyntaxError:
#it was serialized using pickle
class FixUnpickler(pickle.Unpickler):
def find_class(self, module, name):
if module == 'packages.vtk.tf_widget':
module = 'vistrails.packages.vtk.tf_widget'
return pickle.Unpickler.find_class(self, module, name)
tf = FixUnpickler(StringIO.StringIO(strNode.decode('hex'))).load()
tf._pts.sort()
return tf
if node.tag != 'transfer_function':
return None
#read attributes
tf = TransferFunction()
tf._min_range = float(node.get('min_range', "0.0"))
tf._max_range = float(node.get('max_range', "1.0"))
for ptNode in node.getchildren():
if ptNode.tag == 'point':
scalar = float(ptNode.get('scalar','-1.0'))
opacity = float(ptNode.get('opacity', '1.0'))
for colorNode in ptNode.getchildren():
if colorNode.tag == 'color':
color = (float(colorNode.get('R','0.0')),
float(colorNode.get('G','0.0')),
float(colorNode.get('B','0.0')))
break
else:
assert "'point' node has no 'color' child"
tf._pts.append((scalar,opacity,color))
tf._pts.sort()
return tf
##############################################################################
# Graphics Items
class TransferFunctionPoint(QtGui.QGraphicsEllipseItem):
selection_pens = { True: QtGui.QPen(QtGui.QBrush(
QtGui.QColor(*(ColorByName.get_int('goldenrod_medium')))),GLOBAL_SCALE * 0.012),
False: QtGui.QPen() }
def __init__(self, scalar, opacity, color, parent=None):
QtGui.QGraphicsEllipseItem.__init__(self, parent)
self._scalar = scalar
self._opacity = opacity
self._color = QtGui.QColor(color[0]*255,
color[1]*255,
color[2]*255)
self.setPen(QtGui.QPen(QtGui.QColor(0,0,0)))
self.setFlag(QtGui.QGraphicsItem.ItemIsMovable)
self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable)
self.setFlag(QtGui.QGraphicsItem.ItemIsFocusable)
if QtCore.QT_VERSION >= 0x40600:
self.setFlag(QtGui.QGraphicsItem.ItemSendsGeometryChanges)
self.setZValue(2.0)
self._sx = 1.0
self._sy = 1.0
# fixed scale
self._fsx = GLOBAL_SCALE
self._fsy = GLOBAL_SCALE
self._left_line = None
self._right_line = None
self._point = QtCore.QPointF(scalar * self._fsx, opacity * self._fsy)
self.refresh()
self.setToolTip("Double-click to change color\n"
"Right-click to remove point\n"
"Scalar: %.5f, Opacity: %.5f" % (self._scalar,
self._opacity))
# This sets up the linked list of Lines
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Backspace or \
event.key() == QtCore.Qt.Key_Delete:
self.remove_self()
def refresh(self):
dx = self._fsx * 0.025 / self._sx
dy = self._fsy * 0.025 / self._sy
# this is the setup
self.setBrush(QtGui.QBrush(self._color))
self.setRect(-dx,
-dy,
2 * dx, 2 * dy)
self.setPos(self._fsx * self._scalar,
self._fsy * self._opacity)
self.update()
def update_scale(self, sx, sy):
self._sx = sx
self._sy = sy
self.refresh()
def itemChange(self, change, value):
if change == QtGui.QGraphicsItem.ItemSelectedChange:
self.setPen(self.selection_pens[value])
if change == QtGui.QGraphicsItem.ItemPositionChange:
# moves point
# value is now a QPointF, not a QPoint so no conversion needed
pt = value
pt.setY(clamp(pt.y(), 0.0, 1.0 * self._fsy) )
self._opacity = pt.y() / self._fsy
self._point.setY(pt.y())
if not self._left_line:
pt.setX(0.0)
elif not self._right_line:
pt.setX(1.0 * self._fsx)
else:
assert self._left_line._point_right == self
assert self._right_line._point_left == self
pt.setX(clamp(pt.x(),
self._left_line._point_left._point.x(),
self._right_line._point_right._point.x(),
1e-6))
self._point.setX(pt.x())
self._scalar = pt.x() / self._fsx
if self._left_line:
self._left_line.refresh()
if self._right_line:
self._right_line.refresh()
if self.parentItem():
self.parentItem()._tf_poly.setup()
self.setToolTip("Double-click to change color\n"
"Right-click to remove point\n"
"Scalar: %.5f, Opacity: %.5f" % (self._scalar,
self._opacity))
return QtGui.QGraphicsItem.itemChange(self, change, pt)
return QtGui.QGraphicsItem.itemChange(self, change, value)
def remove_self(self):
if not self._left_line or not self._right_line:
# Ignore, self is a corner node that can't be removed
return
# Removes the right line and self, re-ties data structure
self._left_line._point_right = self._right_line._point_right
self._left_line._point_right._left_line = self._left_line
# be friends with garbage collector
self._right_line._point_left = None
self._right_line._point_right = None
self.parentItem()._tf_poly.setup()
self.scene().removeItem(self._right_line)
self.scene().removeItem(self)
self._left_line.refresh()
def mouseDoubleClickEvent(self, event):
new_color = QtGui.QColorDialog.getColor(self._color)
if not new_color.isValid():
return
self._color = new_color
if self._left_line:
self._left_line.refresh()
if self._right_line:
self._right_line.refresh()
self.refresh()
# sometimes the graphicsitem gets recreated, and we need to abort
if self.parentItem():
self.parentItem()._tf_poly.setup()
QtGui.QGraphicsEllipseItem.mouseDoubleClickEvent(self, event)
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.RightButton:
event.accept()
self.remove_self()
else:
QtGui.QGraphicsEllipseItem.mousePressEvent(self, event)
def paint(self, painter, option, widget=None):
""" paint(painter: QPainter, option: QStyleOptionGraphicsItem,
widget: QWidget) -> None
Peform painting of the point without the ugly default dashed-line black
square
"""
painter.setBrush(self.brush())
painter.setPen(self.pen())
painter.drawEllipse(self.rect())
def add_self_to_transfer_function(self, tf):
tf.add_point(self._scalar,
self._opacity,
(self._color.redF(),
self._color.greenF(),
self._color.blueF()))
class TransferFunctionPolygon(QtGui.QGraphicsPolygonItem):
def __init__(self, parent=None):
QtGui.QGraphicsPolygonItem.__init__(self, parent)
def setup(self):
# This inspects the scene, finds the left-most point, and
# then builds the polygon traversing the linked list structure
pt = self.parentItem().get_leftmost_point()
if not pt:
return
self.setZValue(1.25)
g = QtGui.QLinearGradient()
g.setStart(0.0, 0.5)
g.setFinalStop(1.0, 0.5)
g.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
p = QtGui.QPen()
p.setStyle(QtCore.Qt.NoPen)
pts = [QtCore.QPointF(pt.x(), 0)]
self.setPen(p)
while 1:
c = QtGui.QColor(pt._color)
c.setAlphaF(pt._opacity)
g.setColorAt(pt._scalar, c)
pts.append(pt._point)
# move cursor fwd
if pt._right_line:
pt = pt._right_line._point_right
else:
break
self.setBrush(QtGui.QBrush(g))
pts.append(QtCore.QPointF(pt._point.x(), 0))
polygon = QtGui.QPolygonF(pts)
self.setPolygon(polygon)
class TransferFunctionLine(QtGui.QGraphicsPolygonItem):
def __init__(self, point_left, point_right, parent=None):
assert point_right._scalar >= point_left._scalar
QtGui.QGraphicsPolygonItem.__init__(self, parent)
self._point_left = point_left
self._point_right = point_right
self._point_left._right_line = self
self._point_right._left_line = self
self.setup(1.0, 1.0)
self._sx = 1.0
self._sy = 1.0
# fixed scale
self._fsx = GLOBAL_SCALE
self._fsy = GLOBAL_SCALE
self.setToolTip('')
def setup(self, sx, sy):
d = self._point_right._point - self._point_left._point
d_normal = QtCore.QPointF(d.y(), -d.x())
l = math.sqrt(d.x() * d.x() + d.y() * d.y())
if l != 0.0:
d_normal /= l
d_normal *= GLOBAL_SCALE * 0.010
d_normal.setX(d_normal.x() / sx)
d_normal.setY(d_normal.y() / sy)
ps = [self._point_left._point + d_normal,
self._point_right._point + d_normal,
self._point_right._point - d_normal,
self._point_left._point - d_normal]
self.setPolygon(QtGui.QPolygonF(ps))
self.setZValue(1.5)
# Gradient for filling
g = QtGui.QLinearGradient()
g.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
g.setStart(self._point_left._scalar, self._point_left._opacity)
g.setFinalStop(self._point_right._scalar, self._point_right._opacity)
g.setColorAt(0.0, self._point_left._color)
g.setColorAt(1.0, self._point_right._color)
self.setBrush(QtGui.QBrush(g))
# Gradient for outlining
g = QtGui.QLinearGradient()
g.setStart(self._point_left._point)
g.setFinalStop(self._point_right._point)
dark_pl = QtGui.QColor(self._point_left._color.red() * 0.5,
self._point_left._color.green() * 0.5,
self._point_left._color.blue() * 0.5)
dark_pr = QtGui.QColor(self._point_right._color.red() * 0.5,
self._point_right._color.green() * 0.5,
self._point_right._color.blue() * 0.5)
g.setColorAt(0.0, dark_pl)
g.setColorAt(1.0, dark_pr)
p = QtGui.QPen()
p.setBrush(QtGui.QBrush(g))
self.setPen(p)
def update_scale(self, sx, sy):
self._sx = sx
self._sy = sy
self.refresh()
def refresh(self):
self.setup(self._sx, self._sy)
def mouseDoubleClickEvent(self, event):
p = event.pos()
c_left = self._point_left._color
c_right = self._point_right._color
u = ((p.x() - self._point_left._point.x()) /
(self._point_right._point.x() - self._point_left._point.x()))
new_c = (u * c_right.redF() + (1-u) * c_left.redF(),
u * c_right.greenF() + (1-u) * c_left.greenF(),
u * c_right.blueF() + (1-u) * c_left.blueF())
new_point = TransferFunctionPoint(p.x()/ self._fsx, p.y()/self._fsy, new_c, self.parentItem())
self.parentItem()._tf_items.append(new_point)
new_line = TransferFunctionLine(new_point, self._point_right, self.parentItem())
self.parentItem()._tf_items.append(new_line)
new_point._left_line = self
self._point_right = new_point
new_line.update_scale(self._point_left._sx,
self._point_left._sy)
new_point.update_scale(self._point_left._sx,
self._point_left._sy)
new_point.refresh()
self.refresh()
def mousePressEvent(self, event):
# This needs to be here, otherwise mouseDoubleClickEvent does
# not get called.
event.accept()
##############################################################################
# Scene, view, widget
class QGraphicsTransferFunction(QtGui.QGraphicsWidget, ConstantWidgetMixin):
contentsChanged = QtCore.pyqtSignal(tuple)
def __init__(self, param, parent=None):
QtGui.QGraphicsWidget.__init__(self, parent)
ConstantWidgetMixin.__init__(self, param.strValue)
self.setAcceptHoverEvents(True)
if not param.strValue:
self._tf = copy.copy(default_tf)
else:
self._tf = TransferFunction.parse(param.strValue)
self._tf_items = []
poly = TransferFunctionPolygon(self)
poly.setup()
self._tf_poly = poly
self.create_tf_items(self._tf)
self._tf_poly.setup()
#current scale
self._sx = 1.0
self._sy = 1.0
# Add outlines
line_color = QtGui.QColor(200, 200, 200)
pen = QtGui.QPen(line_color)
ps = [QtCore.QPointF(0.0, 0.0),
QtCore.QPointF(GLOBAL_SCALE, 0.0),
QtCore.QPointF(GLOBAL_SCALE, GLOBAL_SCALE),
QtCore.QPointF(0.0, GLOBAL_SCALE)]
polygon = QtGui.QGraphicsPolygonItem(QtGui.QPolygonF(ps), self)
polygon.setPen(pen)
for i in xrange(51):
u = GLOBAL_SCALE * float(i) / 50.0
line = QtGui.QGraphicsLineItem(QtCore.QLineF(u, 0.0, u, GLOBAL_SCALE), self)
line.setPen(pen)
line = QtGui.QGraphicsLineItem(QtCore.QLineF(0.0, u, GLOBAL_SCALE, u), self)
line.setPen(pen)
self.setGeometry(self.boundingRect())
# restore y axis inversion
self.setTransform(QtGui.QTransform(1, 0, 0, -1, 0, GLOBAL_SCALE))
self.setTransformOriginPoint(0, GLOBAL_SCALE)
self.reset_transfer_function(self._tf)
def boundingRect(self):
return QtCore.QRectF(0.0, 0.0, GLOBAL_SCALE, GLOBAL_SCALE)
def reset_transfer_function(self, tf):
self.create_tf_items(tf)
self.update_scale(self._sx, self._sy)
self._tf_poly.setup()
def create_tf_items(self, tf):
if self._tf_items and not self.scene(): # not added to scene yet
return
items = copy.copy(self._tf_items)
for item in items:
self.scene().removeItem(item)
self._tf_items = []
if len(tf._pts) == 0:
pt_left = TransferFunctionPoint(0.0, 0.0, (0.0, 0.0, 0.0), self)
self._tf_items.append(pt_left)
pt_right = TransferFunctionPoint(1.0, 0.0, (0.0, 0.0, 0.0), self)
self._tf_items.append(pt_right)
self._tf_items.append(TransferFunctionLine(pt_left, pt_right, self))
else:
pts = [TransferFunctionPoint(*pt, parent=self)
for pt in tf._pts]
self._tf_items.extend(pts)
lns = [TransferFunctionLine(pt_l, pt_r, self)
for (pt_l, pt_r) in zip(pts[:-1], pts[1:])]
self._tf_items.extend(lns)
def add_knot(self, scalar, opacity):
pass
def update_scale(self, sx, sy):
for item in self._tf_items:
item.update_scale(sx, sy)
self._sx = sx
self._sy = sy
def get_leftmost_point(self):
pt = None
for item in self._tf_items:
if hasattr(item, '_left_line') and not item._left_line:
pt = item
break
return pt
def get_transfer_function(self):
result = TransferFunction()
pt = self.get_leftmost_point()
while 1:
pt.add_self_to_transfer_function(result)
if pt._right_line:
pt = pt._right_line._point_right
else:
break
return result
def contents(self):
return self.get_transfer_function().serialize()
def setContents(self, strValue, silent=True):
if not strValue:
self._tf = copy.copy(default_tf)
else:
self._tf = TransferFunction.parse(strValue)
self.reset_transfer_function(self._tf)
if not silent:
self.update_parent()
def hoverLeaveEvent(self, event):
self.update_parent()
QtGui.QGraphicsWidget.hoverLeaveEvent(self, event)
class TransferFunctionScene(QtGui.QGraphicsScene):
def __init__(self, param, parent=None):
QtGui.QGraphicsScene.__init__(self, parent)
self.tf = QGraphicsTransferFunction(param)
self.addItem(self.tf)
class TransferFunctionView(QtGui.QGraphicsView):
def __init__(self, parent=None):
QtGui.QGraphicsView.__init__(self, parent)
self.setRenderHint(QtGui.QPainter.Antialiasing)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
def resizeEvent(self, event):
self.resetMatrix()
self.setMatrix(QtGui.QMatrix(event.size().width() / (GLOBAL_SCALE *10.0/9) , 0,
0, event.size().height() / (GLOBAL_SCALE*10.0/9), GLOBAL_SCALE, 0))
self.scene().tf.update_scale(event.size().width()/(2000.0/9), event.size().height()/(2000.0/9))
def focusOutEvent(self, event):
self.parent().update_parent()
QtGui.QGraphicsView.focusOutEvent(self, event)
default_tf = TransferFunction()
default_tf.add_point(0.0, 0.0, (0.0, 0.0, 0.0))
default_tf.add_point(1.0, 0.0, (0.0, 0.0, 0.0))
class TransferFunctionWidget(QtGui.QWidget, ConstantWidgetMixin):
contentsChanged = QtCore.pyqtSignal(tuple)
GraphicsItem = QGraphicsTransferFunction
def __init__(self, param, parent=None):
QtGui.QWidget.__init__(self, parent)
self._scene = TransferFunctionScene(param, self)
self._scene.tf.update_parent = self.update_parent
layout = QtGui.QVBoxLayout()
self.setLayout(layout)
self._view = TransferFunctionView(self)
self._view.setScene(self._scene)
self._view.setMinimumSize(200,200)
self._view.setMaximumHeight(280)
self._view.show()
self._view.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
# TODO remove this
self._view.setMatrix(QtGui.QMatrix(1, 0, 0, -1, GLOBAL_SCALE, 0))
self.setMinimumSize(260,240)
caption = QtGui.QLabel("Double-click on the line to add a point")
font = QtGui.QFont('Arial', 11)
font.setItalic(True)
caption.setFont(font)
layout.addWidget(self._view)
layout.addWidget(caption)
def contents(self):
return self._scene.tf.contents()
def setContents(self, strValue, silent=True):
self._scene.tf.setContents(strValue, silent)
def set_last_contents(self, contents):
self._scene.tf._last_contents = contents
def get_last_contents(self):
return self._scene.tf._last_contents
_last_contents = property(get_last_contents, set_last_contents)
##############################################################################
# Helper module to adjust range
class vtkScaledTransferFunction(Module):
# FIXME Add documentation
_input_ports = [
['Input', 'vtkAlgorithmOutput'],
['Dataset', 'vtkDataObject'],
['Range', '(basic:Float, basic:Float)'],
['TransferFunction', 'TransferFunction']]
_output_ports = [
['TransferFunction', 'TransferFunction'],
['vtkPiecewiseFunction', 'vtkPiecewiseFunction'],
['vtkColorTransferFunction', 'vtkColorTransferFunction']]
def compute(self):
reg = get_module_registry()
tf = self.get_input('TransferFunction')
new_tf = copy.copy(tf)
if self.has_input('Input'):
port = self.get_input('Input')
algo = port.GetProducer()
output = algo.GetOutput(port.GetIndex())
(new_tf._min_range, new_tf._max_range) = output.GetScalarRange()
elif self.has_input('Dataset'):
algo = self.get_input('Dataset')
output = algo
(new_tf._min_range, new_tf._max_range) = output.GetScalarRange()
else:
(new_tf._min_range, new_tf._max_range) = self.get_input('Range')
self.set_output('TransferFunction', new_tf)
(of,cf) = new_tf.get_vtk_transfer_functions()
self.set_output('vtkPicewiseFunction', of)
self.set_output('vtkColorTransferFunction', cf)
class TransferFunctionConstant(Constant):
default_value = default_tf
@staticmethod
def translate_to_python(x):
return TransferFunction.parse(x)
@staticmethod
def translate_to_string(x):
return x.serialize()
@staticmethod
def validate(x):
return isinstance(x, TransferFunction)
@staticmethod
def get_widget_class():
return TransferFunctionWidget
##############################################################################
class TestTransferFunction(unittest.TestCase):
def test_serialization(self):
tf = TransferFunction()
tf._min_range = 0.1
tf._max_range = 2.0
tf._pts.append((0.3,0.5,(1.0,1.0,1.0)))
tf._pts.append((0.6,0.7,(1.0,0.5,1.0)))
tf._pts.append((0.2,0.8,(1.0,0.0,1.0)))
tf._pts.sort()
#simulate old serialization method
ser1 = pickle.dumps(tf).encode('hex')
ser2 = tf.serialize()
tf1 = TransferFunction.parse(ser1)
tf2 = TransferFunction.parse(ser2)
assert tf == tf1
assert tf == tf2
assert tf1 == tf2
TransferFunctionConstant.__name__ = "TransferFunction"
_modules = [TransferFunctionConstant, vtkScaledTransferFunction]
if __name__ == "__main__":
unittest.main()
| 38.254499
| 104
| 0.586486
|
275385256995154c07ed305d1e5f2ebd25c41c28
| 1,574
|
py
|
Python
|
wsgi_entry_test.py
|
auyeongwy/mportal
|
e406baea802093569c90c7206649c5afd9431dab
|
[
"Apache-2.0"
] | null | null | null |
wsgi_entry_test.py
|
auyeongwy/mportal
|
e406baea802093569c90c7206649c5afd9431dab
|
[
"Apache-2.0"
] | null | null | null |
wsgi_entry_test.py
|
auyeongwy/mportal
|
e406baea802093569c90c7206649c5afd9431dab
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Au Yeong Wing Yau
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cgi
def application(environ, start_response):
"""Implements the WSGI entry function. This just prints out everything in the 'environ' dictionary for degugging and testing."""
status = '200 OK'
output = str(environ)
# Check HTTP Request type and print the parameters
output += '\n\nRequest Method: '
if environ['REQUEST_METHOD'] == 'GET' and 'QUERY_STRING' in environ:
output += 'GET\nQuery String: '+environ['QUERY_STRING']
elif environ['REQUEST_METHOD'] == 'POST':
post_data_len = 0
output += 'POST'
if 'CONTENT_LENGTH' in environ and 'wsgi.input' in environ:
post_data_len = int(environ['CONTENT_LENGTH'])
post_data = environ['wsgi.input'].read(post_data_len)
post_data_dic = cgi.parse_qs(post_data)
output += '\n'+str(post_data_dic)
else:
output += '\nPOST ERROR'
else:
output += '\nPOST ERROR'
response_headers = [('Content-type', 'text/plain'),('Content-Length', str(len(output)))]
start_response(status, response_headers)
return [output]
| 35.772727
| 129
| 0.730623
|
73a0a41caf69c903d5638add82d4704d6dacfdbc
| 1,022
|
py
|
Python
|
switchcalendar/urls.py
|
marcosorive/switchcalendar
|
a8771929b93e6b69a75d80a72299116e0175e961
|
[
"MIT"
] | null | null | null |
switchcalendar/urls.py
|
marcosorive/switchcalendar
|
a8771929b93e6b69a75d80a72299116e0175e961
|
[
"MIT"
] | 1
|
2019-12-26T07:57:24.000Z
|
2020-01-02T08:21:20.000Z
|
switchcalendar/urls.py
|
marcosorive/switchcalendar
|
a8771929b93e6b69a75d80a72299116e0175e961
|
[
"MIT"
] | null | null | null |
"""switchcalendar URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
from django.conf.urls import url
from django.views.generic import TemplateView
from django.contrib.auth import views as auth_views
urlpatterns = [
path('',include('games.urls')),
path('accounts/',include('accounts.urls')),
path('contact/',include('contact.urls')),
path('totoro/', admin.site.urls),
]
| 35.241379
| 77
| 0.721135
|
8f8c483ab12efa4a47f5f74fba0fc177c1543442
| 892
|
py
|
Python
|
test/test_campfire_callback.py
|
Logicworks/opsgenie-python-sdk
|
244c4c40ddcc25e70df5ba4425ab8d7c8da59c18
|
[
"Apache-2.0"
] | null | null | null |
test/test_campfire_callback.py
|
Logicworks/opsgenie-python-sdk
|
244c4c40ddcc25e70df5ba4425ab8d7c8da59c18
|
[
"Apache-2.0"
] | null | null | null |
test/test_campfire_callback.py
|
Logicworks/opsgenie-python-sdk
|
244c4c40ddcc25e70df5ba4425ab8d7c8da59c18
|
[
"Apache-2.0"
] | 1
|
2020-11-07T11:27:13.000Z
|
2020-11-07T11:27:13.000Z
|
# coding: utf-8
"""
OpsGenie REST API
OpsGenie OpenAPI Specification # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import opsgenie_swagger
from opsgenie_swagger.models.campfire_callback import CampfireCallback # noqa: E501
from opsgenie_swagger.rest import ApiException
class TestCampfireCallback(unittest.TestCase):
"""CampfireCallback unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCampfireCallback(self):
"""Test CampfireCallback"""
# FIXME: construct object with mandatory attributes with example values
# model = opsgenie_swagger.models.campfire_callback.CampfireCallback() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 21.756098
| 92
| 0.710762
|
851f01b02b72aed187e197bb723e94fa414176aa
| 1,412
|
py
|
Python
|
locations/spiders/stater_bros.py
|
mfjackson/alltheplaces
|
37c90b4041c80a574e6e4c2f886883e97df4b636
|
[
"MIT"
] | null | null | null |
locations/spiders/stater_bros.py
|
mfjackson/alltheplaces
|
37c90b4041c80a574e6e4c2f886883e97df4b636
|
[
"MIT"
] | null | null | null |
locations/spiders/stater_bros.py
|
mfjackson/alltheplaces
|
37c90b4041c80a574e6e4c2f886883e97df4b636
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
class StaterBrosSpider(scrapy.Spider):
name = "stater-bros"
item_attributes = {"brand": "Stater Bros"}
allowed_domains = ["www.staterbros.com"]
def start_requests(self):
urls = [
"http://www.staterbros.com/store-locator/",
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
stores = response.xpath('//div[@class="store"]')
for index, store in enumerate(stores):
properties = {
"addr_full": store.xpath("@data-address").extract_first(),
"phone": store.xpath(
'div[@class="left"]/div[@class="phone"]/p/text()'
).extract()[1],
"ref": index,
"lon": store.xpath("@data-longitude").extract_first(),
"lat": store.xpath("@data-latitude").extract_first(),
"opening_hours": " ".join(
stores[0]
.xpath('div[@class="right"]/div[@class="hours"]/p/text()')
.extract()[:2]
),
"name": store.xpath(
'div[@class="left"]/div[@class="name"]/text()'
).extract_first(),
}
yield GeojsonPointItem(**properties)
| 35.3
| 78
| 0.50779
|
327c5809b7bbc3d6fdade2feb4313d4a7615d431
| 758
|
py
|
Python
|
lib/mpl_toolkits/axisartist/__init__.py
|
pierre-haessig/matplotlib
|
0d945044ca3fbf98cad55912584ef80911f330c6
|
[
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 35
|
2015-10-23T08:15:36.000Z
|
2022-02-03T10:17:15.000Z
|
lib/mpl_toolkits/axisartist/__init__.py
|
pierre-haessig/matplotlib
|
0d945044ca3fbf98cad55912584ef80911f330c6
|
[
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 3
|
2015-09-17T16:27:45.000Z
|
2018-07-31T05:59:33.000Z
|
lib/mpl_toolkits/axisartist/__init__.py
|
pierre-haessig/matplotlib
|
0d945044ca3fbf98cad55912584ef80911f330c6
|
[
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 25
|
2016-01-18T12:19:11.000Z
|
2021-12-11T15:45:17.000Z
|
from axislines import Axes, Subplot, AxesZero, SubplotZero, GridHelperRectlinear, \
AxisArtistHelperRectlinear, AxisArtistHelper, GridHelperBase, AxisArtist
from axis_artist import AxisArtist, GridlinesCollection
from grid_helper_curvelinear import GridHelperCurveLinear
from floating_axes import FloatingAxes, FloatingSubplot
from mpl_toolkits.axes_grid1.parasite_axes import \
subplot_class_factory, \
parasite_axes_class_factory, parasite_axes_auxtrans_class_factory, \
host_axes_class_factory
ParasiteAxes = parasite_axes_class_factory(Axes)
ParasiteAxesAuxTrans = parasite_axes_auxtrans_class_factory(axes_class=ParasiteAxes)
HostAxes = host_axes_class_factory(axes_class=Axes)
SubplotHost = subplot_class_factory(HostAxes)
| 32.956522
| 84
| 0.853562
|
55cfd0c7eb226c3fd304b50e219a05863a15743d
| 13,944
|
py
|
Python
|
zerver/tornado/socket.py
|
Supermanu/zulip
|
26f6d708c2e30cfe50d9d61031edb759e8117596
|
[
"Apache-2.0"
] | null | null | null |
zerver/tornado/socket.py
|
Supermanu/zulip
|
26f6d708c2e30cfe50d9d61031edb759e8117596
|
[
"Apache-2.0"
] | 15
|
2020-06-05T18:44:15.000Z
|
2022-03-11T23:26:03.000Z
|
zerver/tornado/socket.py
|
Supermanu/zulip
|
26f6d708c2e30cfe50d9d61031edb759e8117596
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from typing import Any, Dict, Mapping, Optional, Text, Union
from django.conf import settings
from django.utils.timezone import now as timezone_now
from django.utils.translation import ugettext as _
from django.contrib.sessions.models import Session as djSession
try:
from django.middleware.csrf import _compare_salted_tokens
except ImportError:
# This function was added in Django 1.10.
def _compare_salted_tokens(token1, token2):
# type: (str, str) -> bool
return token1 == token2
import sockjs.tornado
from sockjs.tornado.session import ConnectionInfo
import tornado.ioloop
import ujson
import logging
import time
from zerver.models import UserProfile, get_user_profile_by_id, get_client
from zerver.lib.queue import queue_json_publish
from zerver.lib.actions import check_send_message, extract_recipients
from zerver.decorator import JsonableError
from zerver.lib.utils import statsd
from zerver.middleware import record_request_start_data, record_request_stop_data, \
record_request_restart_data, write_log_line, format_timedelta
from zerver.lib.redis_utils import get_redis_client
from zerver.lib.sessions import get_session_user
from zerver.tornado.event_queue import get_client_descriptor
from zerver.tornado.exceptions import BadEventQueueIdError
logger = logging.getLogger('zulip.socket')
def get_user_profile(session_id):
# type: (Optional[Text]) -> Optional[UserProfile]
if session_id is None:
return None
try:
djsession = djSession.objects.get(expire_date__gt=timezone_now(),
session_key=session_id)
except djSession.DoesNotExist:
return None
try:
return get_user_profile_by_id(get_session_user(djsession))
except (UserProfile.DoesNotExist, KeyError):
return None
connections = dict() # type: Dict[Union[int, str], SocketConnection]
def get_connection(id):
# type: (Union[int, str]) -> Optional[SocketConnection]
return connections.get(id)
def register_connection(id, conn):
# type: (Union[int, str], SocketConnection) -> None
# Kill any old connections if they exist
if id in connections:
connections[id].close()
conn.client_id = id
connections[conn.client_id] = conn
def deregister_connection(conn):
# type: (SocketConnection) -> None
assert conn.client_id is not None
del connections[conn.client_id]
redis_client = get_redis_client()
def req_redis_key(req_id):
# type: (Text) -> Text
return u'socket_req_status:%s' % (req_id,)
class CloseErrorInfo(object):
def __init__(self, status_code, err_msg):
# type: (int, str) -> None
self.status_code = status_code
self.err_msg = err_msg
class SocketConnection(sockjs.tornado.SockJSConnection):
client_id = None # type: Optional[Union[int, str]]
def on_open(self, info):
# type: (ConnectionInfo) -> None
log_data = dict(extra='[transport=%s]' % (self.session.transport_name,))
record_request_start_data(log_data)
ioloop = tornado.ioloop.IOLoop.instance()
self.authenticated = False
self.session.user_profile = None
self.close_info = None # type: Optional[CloseErrorInfo]
self.did_close = False
try:
self.browser_session_id = info.get_cookie(settings.SESSION_COOKIE_NAME).value
self.csrf_token = info.get_cookie(settings.CSRF_COOKIE_NAME).value
except AttributeError:
# The request didn't contain the necessary cookie values. We can't
# close immediately because sockjs-tornado doesn't expect a close
# inside on_open(), so do it on the next tick.
self.close_info = CloseErrorInfo(403, "Initial cookie lacked required values")
ioloop.add_callback(self.close)
return
def auth_timeout():
# type: () -> None
self.close_info = CloseErrorInfo(408, "Timeout while waiting for authentication")
self.close()
self.timeout_handle = ioloop.add_timeout(time.time() + 10, auth_timeout)
write_log_line(log_data, path='/socket/open', method='SOCKET',
remote_ip=info.ip, email='unknown', client_name='?')
def authenticate_client(self, msg):
# type: (Dict[str, Any]) -> None
if self.authenticated:
self.session.send_message({'req_id': msg['req_id'], 'type': 'response',
'response': {'result': 'error', 'msg': 'Already authenticated'}})
return
user_profile = get_user_profile(self.browser_session_id)
if user_profile is None:
raise JsonableError(_('Unknown or missing session'))
self.session.user_profile = user_profile
if not _compare_salted_tokens(msg['request']['csrf_token'], self.csrf_token):
raise JsonableError(_('CSRF token does not match that in cookie'))
if 'queue_id' not in msg['request']:
raise JsonableError(_("Missing 'queue_id' argument"))
queue_id = msg['request']['queue_id']
client = get_client_descriptor(queue_id)
if client is None:
raise BadEventQueueIdError(queue_id)
if user_profile.id != client.user_profile_id:
raise JsonableError(_("You are not the owner of the queue with id '%s'") % (queue_id,))
self.authenticated = True
register_connection(queue_id, self)
response = {'req_id': msg['req_id'], 'type': 'response',
'response': {'result': 'success', 'msg': ''}}
status_inquiries = msg['request'].get('status_inquiries')
if status_inquiries is not None:
results = {} # type: Dict[str, Dict[str, str]]
for inquiry in status_inquiries:
status = redis_client.hgetall(req_redis_key(inquiry)) # type: Dict[bytes, bytes]
if len(status) == 0:
result = {'status': 'not_received'}
elif b'response' not in status:
result = {'status': status[b'status'].decode('utf-8')}
else:
result = {'status': status[b'status'].decode('utf-8'),
'response': ujson.loads(status[b'response'])}
results[str(inquiry)] = result
response['response']['status_inquiries'] = results
self.session.send_message(response)
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.remove_timeout(self.timeout_handle)
def on_message(self, msg_raw):
# type: (str) -> None
log_data = dict(extra='[transport=%s' % (self.session.transport_name,))
record_request_start_data(log_data)
msg = ujson.loads(msg_raw)
if self.did_close:
logger.info("Received message on already closed socket! transport=%s user=%s client_id=%s"
% (self.session.transport_name,
self.session.user_profile.email if self.session.user_profile is not None else 'unknown',
self.client_id))
self.session.send_message({'req_id': msg['req_id'], 'type': 'ack'})
if msg['type'] == 'auth':
log_data['extra'] += ']'
try:
self.authenticate_client(msg)
# TODO: Fill in the correct client
write_log_line(log_data, path='/socket/auth', method='SOCKET',
remote_ip=self.session.conn_info.ip,
email=self.session.user_profile.email,
client_name='?')
except JsonableError as e:
response = e.to_json()
self.session.send_message({'req_id': msg['req_id'], 'type': 'response',
'response': response})
write_log_line(log_data, path='/socket/auth', method='SOCKET',
remote_ip=self.session.conn_info.ip,
email='unknown', client_name='?',
status_code=403, error_content=ujson.dumps(response))
return
else:
if not self.authenticated:
response = {'result': 'error', 'msg': "Not yet authenticated"}
self.session.send_message({'req_id': msg['req_id'], 'type': 'response',
'response': response})
write_log_line(log_data, path='/socket/service_request', method='SOCKET',
remote_ip=self.session.conn_info.ip,
email='unknown', client_name='?',
status_code=403, error_content=ujson.dumps(response))
return
redis_key = req_redis_key(msg['req_id'])
with redis_client.pipeline() as pipeline:
pipeline.hmset(redis_key, {'status': 'received'})
pipeline.expire(redis_key, 60 * 60 * 24)
pipeline.execute()
record_request_stop_data(log_data)
queue_json_publish("message_sender",
dict(request=msg['request'],
req_id=msg['req_id'],
server_meta=dict(user_id=self.session.user_profile.id,
client_id=self.client_id,
return_queue="tornado_return",
log_data=log_data,
request_environ=dict(REMOTE_ADDR=self.session.conn_info.ip))),
fake_message_sender)
def on_close(self):
# type: () -> None
log_data = dict(extra='[transport=%s]' % (self.session.transport_name,))
record_request_start_data(log_data)
if self.close_info is not None:
write_log_line(log_data, path='/socket/close', method='SOCKET',
remote_ip=self.session.conn_info.ip, email='unknown',
client_name='?', status_code=self.close_info.status_code,
error_content=self.close_info.err_msg)
else:
deregister_connection(self)
email = self.session.user_profile.email \
if self.session.user_profile is not None else 'unknown'
write_log_line(log_data, path='/socket/close', method='SOCKET',
remote_ip=self.session.conn_info.ip, email=email,
client_name='?')
self.did_close = True
def fake_message_sender(event):
# type: (Dict[str, Any]) -> None
"""This function is used only for Casper and backend tests, where
rabbitmq is disabled"""
log_data = dict() # type: Dict[str, Any]
record_request_start_data(log_data)
req = event['request']
try:
sender = get_user_profile_by_id(event['server_meta']['user_id'])
client = get_client("website")
msg_id = check_send_message(sender, client, req['type'],
extract_recipients(req['to']),
req['subject'], req['content'],
local_id=req.get('local_id', None),
sender_queue_id=req.get('queue_id', None))
resp = {"result": "success", "msg": "", "id": msg_id}
except JsonableError as e:
resp = {"result": "error", "msg": str(e)}
server_meta = event['server_meta']
server_meta.update({'worker_log_data': log_data,
'time_request_finished': time.time()})
result = {'response': resp, 'req_id': event['req_id'],
'server_meta': server_meta}
respond_send_message(result)
def respond_send_message(data):
# type: (Mapping[str, Any]) -> None
log_data = data['server_meta']['log_data']
record_request_restart_data(log_data)
worker_log_data = data['server_meta']['worker_log_data']
forward_queue_delay = worker_log_data['time_started'] - log_data['time_stopped']
return_queue_delay = log_data['time_restarted'] - data['server_meta']['time_request_finished']
service_time = data['server_meta']['time_request_finished'] - worker_log_data['time_started']
log_data['extra'] += ', queue_delay: %s/%s, service_time: %s]' % (
format_timedelta(forward_queue_delay), format_timedelta(return_queue_delay),
format_timedelta(service_time))
client_id = data['server_meta']['client_id']
connection = get_connection(client_id)
if connection is None:
logger.info("Could not find connection to send response to! client_id=%s" % (client_id,))
else:
connection.session.send_message({'req_id': data['req_id'], 'type': 'response',
'response': data['response']})
# TODO: Fill in client name
# TODO: Maybe fill in the status code correctly
write_log_line(log_data, path='/socket/service_request', method='SOCKET',
remote_ip=connection.session.conn_info.ip,
email=connection.session.user_profile.email, client_name='?')
# We disable the eventsource and htmlfile transports because they cannot
# securely send us the zulip.com cookie, which we use as part of our
# authentication scheme.
sockjs_router = sockjs.tornado.SockJSRouter(SocketConnection, "/sockjs",
{'sockjs_url': 'https://%s/static/third/sockjs/sockjs-0.3.4.js' % (
settings.EXTERNAL_HOST,),
'disabled_transports': ['eventsource', 'htmlfile']})
def get_sockjs_router():
# type: () -> sockjs.tornado.SockJSRouter
return sockjs_router
| 44.126582
| 115
| 0.606067
|
d75f79543c37ba1e97e60dd64342f0b326973fe8
| 1,621
|
py
|
Python
|
progress.py
|
rockie-yang/japanese-learning
|
d85e99a54b166c996f33f166be06e19f2bec8d49
|
[
"MIT"
] | null | null | null |
progress.py
|
rockie-yang/japanese-learning
|
d85e99a54b166c996f33f166be06e19f2bec8d49
|
[
"MIT"
] | null | null | null |
progress.py
|
rockie-yang/japanese-learning
|
d85e99a54b166c996f33f166be06e19f2bec8d49
|
[
"MIT"
] | null | null | null |
__author__ = 'Rockie Yang'
import os
import codecs
class Progress():
def __init__(self, words, progress_file = "progress.data"):
self.progress_file = progress_file
# self.practise_dict = practise_dict
self.progress_data = {}
self.words = words
def init_progress(self):
for word in self.words:
self.progress_data[word] = (0, 0)
def load_progress(self):
""" if the progress file exist update with the progress data"""
if os.path.isfile(self.progress_file):
with codecs.open(self.progress_file, 'r', 'utf-8') as f:
for line in f.readlines():
items = line.split()
key = items[0]
progress = int(items[1])
self.progress_data[key] = progress
def save_progress(self):
# write back progress data
with codecs.open(self.progress_file, "w", 'utf-8') as f:
for key in self.progress_data:
progress = self.progress_data[key]
f.write("%s %s\n" % (key, progress))
def inc(self, word):
self.progress_data[word] += 1
def learnt(self):
learnt_word = [word for word,progress in self.progress_data.items() if progress > 0]
return learnt_word
def least_familar_words(self, num):
learnt_word = self.learnt()
learnt_word.sort()
return learnt_word[:num]
def all_in_range(self, score_range):
meet = (score_range.begin < score < score_range.end for score in self.progress_data.values())
return all(meet)
| 30.584906
| 101
| 0.590993
|
a07e2f972c5e31aedbbea133630ec4dc194e1650
| 703
|
py
|
Python
|
server/djangoapp/admin.py
|
tquatrep/agfzb-CloudAppDevelopment_Capstone
|
52042821a17ff83ed3ecf292081755f40c67d6b9
|
[
"Apache-2.0"
] | null | null | null |
server/djangoapp/admin.py
|
tquatrep/agfzb-CloudAppDevelopment_Capstone
|
52042821a17ff83ed3ecf292081755f40c67d6b9
|
[
"Apache-2.0"
] | null | null | null |
server/djangoapp/admin.py
|
tquatrep/agfzb-CloudAppDevelopment_Capstone
|
52042821a17ff83ed3ecf292081755f40c67d6b9
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import CarMake, CarModel
# Register your models here.
# CarModelInline class
class CarModelInline(admin.StackedInline):
model = CarModel
extra = 1
# CarModelAdmin class
class CarModelAdmin(admin.ModelAdmin):
fields = ['car_make', 'name', 'dealer_id', 'car_type', 'year']
#inlines = [CarModelInline]
# CarMakeInline class
class CarMakeInline(admin.StackedInline):
model = CarMake
extra = 1
# CarMakeAdmin class
class CarMakeAdmin(admin.ModelAdmin):
fields = ['name', 'description']
#inlines = [CarMakeInline]
# Register models here
admin.site.register(CarMake, CarMakeAdmin)
admin.site.register(CarModel, CarModelAdmin)
| 24.241379
| 66
| 0.74111
|
9466aa9488029a4bca4b3cac8c8fd1ba42dd6d7c
| 261
|
py
|
Python
|
samples/modules/module_use_2.py
|
nakednamor/naked-python
|
6580afe41c867888a08c5394d32c2bb4c60fa6d0
|
[
"MIT"
] | null | null | null |
samples/modules/module_use_2.py
|
nakednamor/naked-python
|
6580afe41c867888a08c5394d32c2bb4c60fa6d0
|
[
"MIT"
] | null | null | null |
samples/modules/module_use_2.py
|
nakednamor/naked-python
|
6580afe41c867888a08c5394d32c2bb4c60fa6d0
|
[
"MIT"
] | null | null | null |
# you can import only specific methods from a module
from module_definition import method_a, method_b
# if you import like this, you don't need to use the module_name when
# calling the method - instead you can simply use the method-name
method_a()
method_b()
| 32.625
| 69
| 0.785441
|
241626f46719391987ad5f3f50fe4392cb8253af
| 416
|
py
|
Python
|
engines.py
|
haydenshively/AME-261
|
1cf686835e4eead3a5d23cae65dd7644515fe665
|
[
"MIT"
] | 1
|
2022-03-29T23:59:23.000Z
|
2022-03-29T23:59:23.000Z
|
engines.py
|
haydenshively/AME-261
|
1cf686835e4eead3a5d23cae65dd7644515fe665
|
[
"MIT"
] | null | null | null |
engines.py
|
haydenshively/AME-261
|
1cf686835e4eead3a5d23cae65dd7644515fe665
|
[
"MIT"
] | null | null | null |
import csv
engine_db = {}
with open('engines.csv', newline='') as f:
csv_reader = csv.reader(f, delimiter=',')
line = 0
for row in csv_reader:
if line != 0 and row[0] != '':
engine_db[row[0]] = {
'thrust': float(row[1]),
'weight': float(row[2]),
'cj': float(row[3]),
'cost': int(row[4]),
}
line += 1
| 27.733333
| 45
| 0.4375
|
bd0be94bfdb327a1431e21eae2c1b221653e13fa
| 1,191
|
py
|
Python
|
lyftbutton/api/dashbutton.py
|
jairtrejo/lyftbutton-api
|
4c7d6b8b1ad6b613a23fa30e5e080b78d515762b
|
[
"MIT"
] | null | null | null |
lyftbutton/api/dashbutton.py
|
jairtrejo/lyftbutton-api
|
4c7d6b8b1ad6b613a23fa30e5e080b78d515762b
|
[
"MIT"
] | null | null | null |
lyftbutton/api/dashbutton.py
|
jairtrejo/lyftbutton-api
|
4c7d6b8b1ad6b613a23fa30e5e080b78d515762b
|
[
"MIT"
] | null | null | null |
import attr
from lyftbutton.dashbutton import DashButton
from lyftbutton.repository import LyftButton
from lyftbutton.utils.lambdafn import Response, api_handler
@api_handler
def get_dash_button(auth_context=None):
if not auth_context:
return Response(status_code=403)
return LyftButton.find(lyft_id=auth_context["lyft_id"]).dash_button
@api_handler(model=DashButton)
def edit_dash_button(new_button, auth_context=None):
if not auth_context:
return Response(status_code=403)
lyft_button = LyftButton.find(lyft_id=auth_context["lyft_id"])
dash_button = lyft_button.dash_button if lyft_button else DashButton()
button = DashButton(
**{
field: getattr(new_button, field, None)
or getattr(dash_button, field, None)
for field in attr.fields_dict(DashButton).keys()
}
)
lyft_button.dash_button = button
return button
@api_handler
def delete_dash_button(auth_context=None):
if not auth_context:
return Response(status_code=403)
lyft_button = LyftButton.find(lyft_id=auth_context["lyft_id"])
lyft_button.dash_button = None
return Response(status_code=204)
| 25.340426
| 74
| 0.729639
|
3bbeed5ee1fcf057fc8bbe7c092ddc283a76d309
| 3,161
|
py
|
Python
|
migrations/versions/a37f76ceca7a_initial_migration.py
|
Muriithijoe/Pitch
|
94fdc9dcd0880528873c8b5d96589f66f744f60d
|
[
"Unlicense"
] | null | null | null |
migrations/versions/a37f76ceca7a_initial_migration.py
|
Muriithijoe/Pitch
|
94fdc9dcd0880528873c8b5d96589f66f744f60d
|
[
"Unlicense"
] | null | null | null |
migrations/versions/a37f76ceca7a_initial_migration.py
|
Muriithijoe/Pitch
|
94fdc9dcd0880528873c8b5d96589f66f744f60d
|
[
"Unlicense"
] | null | null | null |
"""Initial Migration
Revision ID: a37f76ceca7a
Revises: 1050f7f8b2b3
Create Date: 2018-10-17 11:54:15.778282
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a37f76ceca7a'
down_revision = '1050f7f8b2b3'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('comments', sa.Column('name', sa.String(length=255), nullable=True))
op.drop_constraint('comments_user_id_fkey', 'comments', type_='foreignkey')
op.drop_column('comments', 'pitch_id')
op.drop_column('comments', 'user_id')
op.drop_column('comments', 'comment_content')
op.add_column('pitches', sa.Column('comments_id', sa.Integer(), nullable=True))
op.add_column('pitches', sa.Column('description', sa.String(), nullable=True))
op.add_column('pitches', sa.Column('title', sa.String(length=255), nullable=True))
op.drop_constraint('pitches_user_id_fkey', 'pitches', type_='foreignkey')
op.create_foreign_key(None, 'pitches', 'comments', ['comments_id'], ['id'])
op.drop_column('pitches', 'pitch_content')
op.drop_column('pitches', 'user_id')
op.drop_column('pitches', 'pitch_category')
op.add_column('users', sa.Column('pass_hash', sa.String(length=255), nullable=True))
op.drop_index('ix_users_username', table_name='users')
op.drop_column('users', 'profile_pic_path')
op.drop_column('users', 'pass_secure')
op.drop_column('users', 'bio')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('bio', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('users', sa.Column('pass_secure', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('users', sa.Column('profile_pic_path', sa.VARCHAR(), autoincrement=False, nullable=True))
op.create_index('ix_users_username', 'users', ['username'], unique=False)
op.drop_column('users', 'pass_hash')
op.add_column('pitches', sa.Column('pitch_category', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('pitches', sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('pitches', sa.Column('pitch_content', sa.VARCHAR(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'pitches', type_='foreignkey')
op.create_foreign_key('pitches_user_id_fkey', 'pitches', 'users', ['user_id'], ['id'])
op.drop_column('pitches', 'title')
op.drop_column('pitches', 'description')
op.drop_column('pitches', 'comments_id')
op.add_column('comments', sa.Column('comment_content', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('comments', sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('comments', sa.Column('pitch_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key('comments_user_id_fkey', 'comments', 'users', ['user_id'], ['id'])
op.drop_column('comments', 'name')
# ### end Alembic commands ###
| 50.174603
| 117
| 0.708004
|
c506bcd0294a28d2b751323de1851dd66b9a3006
| 4,975
|
py
|
Python
|
Python/bokeh_practice/GridPlot_app.py
|
freestone-lab/irm
|
bacc43d159a750c8ad3c6e5c5d34962f15eed822
|
[
"BSD-3-Clause"
] | null | null | null |
Python/bokeh_practice/GridPlot_app.py
|
freestone-lab/irm
|
bacc43d159a750c8ad3c6e5c5d34962f15eed822
|
[
"BSD-3-Clause"
] | null | null | null |
Python/bokeh_practice/GridPlot_app.py
|
freestone-lab/irm
|
bacc43d159a750c8ad3c6e5c5d34962f15eed822
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
logging.basicConfig(level=logging.DEBUG)
import numpy as np
# For now, hack adding it to the path...
import sys
sys.path.append('../')
import correlation_coefficient as cc
rpdf = cc.rpdf
from bokeh.plotting import figure
from bokeh.models import Plot, ColumnDataSource, GridPlot
from bokeh.properties import Instance
from bokeh.server.app import bokeh_app
from bokeh.server.utils.plugins import object_page
from bokeh.models.widgets import HBox, VBox, Slider, TextInput, VBoxForm, Tabs, Panel
class GridPlot_app(HBox):
extra_generated_classes = [["GridPlot_app", "GridPlot_app", "HBox"]]
slider_row = Instance(HBox)
plot_row = Instance(HBox)
rho = Instance(Slider)
N = Instance(Slider)
grid = Instance(GridPlot)
source = Instance(ColumnDataSource)
plot = Instance(Plot)
tab = Instance(Tabs)
panel1 = Instance(Panel)
panel2 = Instance(Panel)
@classmethod
def create(cls):
obj = cls()
obj.source = ColumnDataSource(data=dict(x=[], y=[]))
obj.rho = Slider(title="rho",
name='rho',
value=0.0,
start=-1.0,
end=1.1,
step=0.1)
obj.N = Slider(title="N",
name='N',
value=26,
start=1,
end=52,
step=1)
obj.slider_row = HBox(children=[obj.rho, obj.N])
#obj.children.append(obj.slider_row)
def create_plot():
toolset = "crosshair,pan,reset,resize,save,wheel_zoom"
plot = figure(title_text_font_size="12pt",
plot_height=200,
plot_width=200,
tools="tap",
title='Hey',
x_range=[-1, 1],
y_range=[0, 0.015],
min_border=2)
plot.line('x', 'y', source=obj.source,
line_width=3,
line_alpha=0.6)
plot.axis.major_label_text_font_size = '0pt'
plot.axis.major_tick_line_color = None # turn off major ticks
plot.axis[0].ticker.num_minor_ticks = 0 # turn off minor ticks
plot.axis[1].ticker.num_minor_ticks = 0
return plot
plots = []
for row in range(5):
row_plots = []
for col in range(5):
if col >= row:
row_plots.append(create_plot())
else:
row_plots.append(None)
plots.append(row_plots)
grid = GridPlot(children=plots,
toolbar_location=None)
obj.grid = grid
obj.plot_row = HBox(children=[obj.grid])
#obj.children.append(obj.plot_row)
obj.panel1 = Panel(child=obj.grid, title="Panel1")
obj.panel2 = Panel(child=obj.slider_row, title="Panel2")
obj.tab = Tabs(tabs=[obj.panel1, obj.panel2])
print(dir(obj.tab))
obj.children.append(obj.tab)
return obj
def setup_events(self):
"""Attaches the on_change event to the value property of the widget.
The callback is set to the input_change method of this app.
"""
super(GridPlot_app, self).setup_events()
# Slider event registration
#print("hi", self.rho.value)
#print(getattr(self, 'rho').value)
#self.rho.on_change('value', self, 'input_change')
#for w in ["rho", "N"]:
# getattr(self, w).on_change('value', self, 'input_change')
#print(dir(self.grid))
#self.grid.on_change('click', self, 'input_change')
#getattr(self, "grid").on_change('click', self, 'input_change')
print("hey")
def input_change(self, obj, attrname, old, new):
"""Executes whenever the input form changes.
It is responsible for updating the plot, or anything else you want.
Args:
obj : the object that changed
attrname : the attr that changed
old : old value of attr
new : new value of attr
"""
self.update_data()
def update_data(self):
"""Called each time that any watched property changes.
This updates the sin wave data with the most recent values of the
sliders. This is stored as two numpy arrays in a dict into the app's
data source property.
"""
# Generate the CC distribution
y = self.dr * rpdf(self.r,
self.rho.value,
self.N.value)
logging.debug("PARAMS: rho: %s N: %s",
self.rho.value,
self.N.value)
self.source.data = dict(x=self.r, y=y)
@bokeh_app.route("/IRM/ccDistribution/")
@object_page("rpdf")
def make_App():
app = GridPlot_app.create()
return app
| 31.891026
| 85
| 0.549347
|
fe47fddb915387fcacdbd576767eb3e381826c79
| 3,058
|
py
|
Python
|
src/loss.py
|
kw01sg/style-transfer-flask
|
3f1d888b5041612bb8be6958ca3be7c3271c9b56
|
[
"MIT"
] | 11
|
2019-06-30T06:15:31.000Z
|
2022-01-25T03:32:14.000Z
|
src/loss.py
|
kw01sg/style-transfer-flask
|
3f1d888b5041612bb8be6958ca3be7c3271c9b56
|
[
"MIT"
] | 7
|
2020-11-13T18:36:55.000Z
|
2022-02-10T00:32:41.000Z
|
src/loss.py
|
kw01sg/style-transfer-flask
|
3f1d888b5041612bb8be6958ca3be7c3271c9b56
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from src.utils import normalize_weights
def gram_matrix(input_tensor):
"""Expect input_tensor to have shape of n_batch * n_activation_height * n_activation_width * n_channel"""
return tf.einsum('abcd,abce->ade', input_tensor, input_tensor)
def style_content_loss(generated_outputs,
content_targets,
style_targets,
content_layer_weights,
style_layer_weights,
alpha,
beta):
"""
Calculates the weighted style and content loss between generated image and
content and style image
Args:
generated_outputs (Dict): Dictionary containing `content_outputs` and `style_outputs` outputs for generated image
content_targets (List(Tensor)): output of content layers for target content image
style_targets (List(Tensor)): output of style layers for target style image
content_layer_weights (List[float]): List of weights of each content output towards content loss
style_layer_weights (List[float]): List of weights of each style output towards style loss
alpha (float): Weight of content loss towards total loss
beta (float): Weight of style loss towards total loss
Returns:
loss: weighted style and content loss
"""
# Calculate content loss
content_loss = calculate_content_loss(
content_targets, generated_outputs['content_outputs'], content_layer_weights)
# calculate style loss
style_loss = calculate_style_loss(
style_targets, generated_outputs['style_outputs'], style_layer_weights)
# calculate total weighted loss
return alpha*content_loss + beta*style_loss
def calculate_content_loss(original_content, generated_content, content_layer_weights):
content_loss = 0.5 * tf.reduce_sum([weight * ((original - generated) ** 2) for original,
generated, weight in zip(original_content, generated_content, content_layer_weights)])
return content_loss
def calculate_style_loss(original_style, generated_style, style_layer_weights):
normalized_weights = normalize_weights(style_layer_weights)
gram_original = [gram_matrix(layer) for layer in original_style]
gram_generated = [gram_matrix(layer) for layer in generated_style]
style_loss = 0
for i in range(len(original_style)):
layer = original_style[i]
# Layers have shape of n_batch * n_activation_height * n_activation_width * n_channel
num_channel = layer.shape[-1]
activation_size = layer.shape[1] * layer.shape[2]
style_loss = style_loss + (normalized_weights[i] * tf.reduce_sum(
(gram_generated[i] - gram_original[i]) ** 2) / (4 * num_channel**2 * activation_size**2))
return style_loss
def calculate_variation_loss(image):
x_var = image[:, :, 1:, :] - image[:, :, :-1, :]
y_var = image[:, 1:, :, :] - image[:, :-1, :, :]
return tf.reduce_mean((x_var**2)) + tf.reduce_mean((y_var**2))
| 41.890411
| 126
| 0.688031
|
1f5cc8fd29fd92cf2b6a48e6ca9f06347f3755d7
| 5,526
|
py
|
Python
|
WANNRelease/prettyNeatWann/domain/config.py
|
adafok/brain-tokyo-workshop
|
df581f33d1420076e54aa4243d96b4bdfd7fe49c
|
[
"Apache-2.0"
] | 1,097
|
2019-07-15T20:43:30.000Z
|
2022-03-31T15:04:39.000Z
|
WANNRelease/prettyNeatWann/domain/config.py
|
icefire-luo/brain-tokyo-workshop
|
2d50504c14625312a0cd3d569f08d20d51e87bd9
|
[
"Apache-2.0"
] | 37
|
2019-08-14T13:47:03.000Z
|
2022-03-17T01:04:21.000Z
|
WANNRelease/prettyNeatWann/domain/config.py
|
icefire-luo/brain-tokyo-workshop
|
2d50504c14625312a0cd3d569f08d20d51e87bd9
|
[
"Apache-2.0"
] | 335
|
2019-08-06T04:37:37.000Z
|
2022-03-26T19:02:12.000Z
|
from collections import namedtuple
import numpy as np
Game = namedtuple('Game', ['env_name', 'time_factor', 'actionSelect',
'input_size', 'output_size', 'layers', 'i_act', 'h_act',
'o_act', 'weightCap','noise_bias','output_noise','max_episode_length','in_out_labels'])
games = {}
# -- Car Racing --------------------------------------------------------- -- #
# > 32 latent vectors (includes past frames)
vae_racing_stack = Game(env_name='VAERacingStack-v0',
actionSelect='all', # all, soft, hard
input_size=32,
output_size=3,
time_factor=0,
layers=[10, 0],
i_act=np.full(32,1),
h_act=[1,2,3,4,5,6,7,8,9,10],
o_act=np.full(3,1),
weightCap = 2.0,
noise_bias=0.0,
max_episode_length = 500,
output_noise=[False, False, False],
in_out_labels = ['latent01','latent02','latent03','latent04','latent05',\
'latent06','latent07','latent08','latent09','latent10',\
'latent11','latent12','latent13','latent14','latent15',\
'latent16','latent17','latent18','latent19','latent20',\
'latent21','latent22','latent23','latent24','latent25',\
'latent26','latent27','latent28','latent29','latent30',\
'latent31','latent32','steer' ,'gas' ,'brakes']
)
games['vae_racing_stack'] = vae_racing_stack
# > 16 latent vectors (current frame only)
vae_racing = vae_racing_stack._replace(\
env_name='VAERacing-v0', input_size=16, i_act=np.full(16,1),\
in_out_labels = ['latent01','latent02','latent03','latent04','latent05',\
'latent06','latent07','latent08','latent09','latent10',\
'latent11','latent12','latent13','latent14','latent15',\
'latent16','steer' ,'gas' ,'brakes'] )
games['vae_racing'] = vae_racing
# -- Digit Classification ------------------------------------------------ -- #
# > Scikit learn digits data set
classify = Game(env_name='Classify_digits',
actionSelect='softmax', # all, soft, hard
input_size=64,
output_size=10,
time_factor=0,
layers=[128,9],
i_act=np.full(64,1),
h_act=[1,3,4,5,6,7,8,9,10], # No step function
o_act=np.full(10,1),
weightCap = 2.0,
noise_bias=0.0,
output_noise=[False, False, False],
max_episode_length = 0,
in_out_labels = []
)
L = [list(range(1, classify.input_size)),\
list(range(0, classify.output_size))]
label = [item for sublist in L for item in sublist]
classify = classify._replace(in_out_labels=label)
games['digits'] = classify
# > MNIST [28x28] data set
mnist784 = classify._replace(\
env_name='Classify_mnist784', input_size=784, i_act=np.full(784,1))
L = [list(range(1, mnist784.input_size)),\
list(range(0, mnist784.output_size))]
label = [item for sublist in L for item in sublist]
mnist784 = mnist784._replace(in_out_labels=label)
games['mnist784'] = mnist784
# > MNIST [16x16] data set
mnist256 = classify._replace(\
env_name='Classify_mnist256', input_size=256, i_act=np.full(256,1))
L = [list(range(1, mnist256.input_size)),\
list(range(0, mnist256.output_size))]
label = [item for sublist in L for item in sublist]
mnist256 = mnist256._replace(in_out_labels=label)
games['mnist256'] = mnist256
# -- Cart-pole Swingup --------------------------------------------------- -- #
# > Slower reaction speed
cartpole_swingup = Game(env_name='CartPoleSwingUp_Hard',
actionSelect='all', # all, soft, hard
input_size=5,
output_size=1,
time_factor=0,
layers=[5, 5],
i_act=np.full(5,1),
h_act=[1,2,3,4,5,6,7,8,9,10],
o_act=np.full(1,1),
weightCap = 2.0,
noise_bias=0.0,
output_noise=[False, False, False],
max_episode_length = 200,
in_out_labels = ['x','x_dot','cos(theta)','sin(theta)','theta_dot',
'force']
)
games['swingup_hard'] = cartpole_swingup
# > Normal reaction speed
cartpole_swingup = cartpole_swingup._replace(\
env_name='CartPoleSwingUp', max_episode_length=1000)
games['swingup'] = cartpole_swingup
# -- Bipedal Walker ------------------------------------------------------ -- #
# > Flat terrain
biped = Game(env_name='BipedalWalker-v2',
actionSelect='all', # all, soft, hard
input_size=24,
output_size=4,
time_factor=0,
layers=[40, 40],
i_act=np.full(24,1),
h_act=[1,2,3,4,5,6,7,8,9,10],
o_act=np.full(4,1),
weightCap = 2.0,
noise_bias=0.0,
output_noise=[False, False, False],
max_episode_length = 400,
in_out_labels = [
'hull_angle','hull_vel_angle','vel_x','vel_y',
'hip1_angle','hip1_speed','knee1_angle','knee1_speed','leg1_contact',
'hip2_angle','hip2_speed','knee2_angle','knee2_speed','leg2_contact',
'lidar_0','lidar_1','lidar_2','lidar_3','lidar_4',
'lidar_5','lidar_6','lidar_7','lidar_8','lidar_9',
'hip_1','knee_1','hip_2','knee_2']
)
games['biped'] = biped
# > Hilly Terrain
bipedmed = biped._replace(env_name='BipedalWalkerMedium-v2')
games['bipedmedium'] = bipedmed
# > Obstacles, hills, and pits
bipedhard = biped._replace(env_name='BipedalWalkerHardcore-v2')
games['bipedhard'] = bipedhard
# -- Bullet -------------------------------------------------------------- -- #
# > Quadruped ant
bullet_ant = Game(env_name='AntBulletEnv-v0',
actionSelect='all', # all, soft, hard
input_size=28,
output_size=8,
layers=[64, 32],
time_factor=1000,
i_act=np.full(28,1),
h_act=[1,2,3,4,5,6,7,8,9,10],
o_act=np.full(8,1),
weightCap = 2.0,
noise_bias=0.0,
output_noise=[False, False, True],
max_episode_length = 1000,
in_out_labels = []
)
games['bullet_ant'] = bullet_ant
| 31.942197
| 89
| 0.631198
|
995081937c53582bfe4f31a5b943ae3a7698ff18
| 35,539
|
py
|
Python
|
Lib/site-packages/debugpy/_vendored/pydevd/pydevd_file_utils.py
|
KarmaScripter/PiggyPy
|
25ba1d0c8933a0cb655f09db6c228f74f4d52894
|
[
"MIT"
] | null | null | null |
Lib/site-packages/debugpy/_vendored/pydevd/pydevd_file_utils.py
|
KarmaScripter/PiggyPy
|
25ba1d0c8933a0cb655f09db6c228f74f4d52894
|
[
"MIT"
] | null | null | null |
Lib/site-packages/debugpy/_vendored/pydevd/pydevd_file_utils.py
|
KarmaScripter/PiggyPy
|
25ba1d0c8933a0cb655f09db6c228f74f4d52894
|
[
"MIT"
] | null | null | null |
r'''
This module provides utilities to get the absolute filenames so that we can be sure that:
- The case of a file will match the actual file in the filesystem (otherwise breakpoints won't be hit).
- Providing means for the user to make path conversions when doing a remote debugging session in
one machine and debugging in another.
To do that, the PATHS_FROM_ECLIPSE_TO_PYTHON constant must be filled with the appropriate paths.
@note:
in this context, the server is where your python process is running
and the client is where eclipse is running.
E.g.:
If the server (your python process) has the structure
/user/projects/my_project/src/package/module1.py
and the client has:
c:\my_project\src\package\module1.py
the PATHS_FROM_ECLIPSE_TO_PYTHON would have to be:
PATHS_FROM_ECLIPSE_TO_PYTHON = [(r'c:\my_project\src', r'/user/projects/my_project/src')]
alternatively, this can be set with an environment variable from the command line:
set PATHS_FROM_ECLIPSE_TO_PYTHON=[['c:\my_project\src','/user/projects/my_project/src']]
@note: DEBUG_CLIENT_SERVER_TRANSLATION can be set to True to debug the result of those translations
@note: the case of the paths is important! Note that this can be tricky to get right when one machine
uses a case-independent filesystem and the other uses a case-dependent filesystem (if the system being
debugged is case-independent, 'normcase()' should be used on the paths defined in PATHS_FROM_ECLIPSE_TO_PYTHON).
@note: all the paths with breakpoints must be translated (otherwise they won't be found in the server)
@note: to enable remote debugging in the target machine (pydev extensions in the eclipse installation)
import pydevd;pydevd.settrace(host, stdoutToServer, stderrToServer, port, suspend)
see parameter docs on pydevd.py
@note: for doing a remote debugging session, all the pydevd_ files must be on the server accessible
through the PYTHONPATH (and the PATHS_FROM_ECLIPSE_TO_PYTHON only needs to be set on the target
machine for the paths that'll actually have breakpoints).
'''
from _pydev_bundle import pydev_log
from _pydevd_bundle.pydevd_constants import IS_PY2, IS_PY3K, DebugInfoHolder, IS_WINDOWS, IS_JYTHON
from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding
from _pydevd_bundle.pydevd_comm_constants import file_system_encoding, filesystem_encoding_is_utf8
from _pydev_bundle.pydev_log import error_once
import json
import os.path
import sys
import itertools
import ntpath
from functools import partial
_nt_os_normcase = ntpath.normcase
os_path_basename = os.path.basename
os_path_exists = os.path.exists
join = os.path.join
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError # noqa
try:
os_path_real_path = os.path.realpath # @UndefinedVariable
except:
# jython does not support os.path.realpath
# realpath is a no-op on systems without islink support
os_path_real_path = os.path.abspath
def _get_library_dir():
library_dir = None
try:
import sysconfig
library_dir = sysconfig.get_path('purelib')
except ImportError:
pass # i.e.: Only 2.7 onwards
if library_dir is None or not os_path_exists(library_dir):
for path in sys.path:
if os_path_exists(path) and os.path.basename(path) == 'site-packages':
library_dir = path
break
if library_dir is None or not os_path_exists(library_dir):
library_dir = os.path.dirname(os.__file__)
return library_dir
# Note: we can't call sysconfig.get_path from _apply_func_and_normalize_case (it deadlocks on Python 2.7) so, we
# need to get the library dir during module loading.
_library_dir = _get_library_dir()
# defined as a list of tuples where the 1st element of the tuple is the path in the client machine
# and the 2nd element is the path in the server machine.
# see module docstring for more details.
try:
PATHS_FROM_ECLIPSE_TO_PYTHON = json.loads(os.environ.get('PATHS_FROM_ECLIPSE_TO_PYTHON', '[]'))
except Exception:
pydev_log.critical('Error loading PATHS_FROM_ECLIPSE_TO_PYTHON from environment variable.')
pydev_log.exception()
PATHS_FROM_ECLIPSE_TO_PYTHON = []
else:
if not isinstance(PATHS_FROM_ECLIPSE_TO_PYTHON, list):
pydev_log.critical('Expected PATHS_FROM_ECLIPSE_TO_PYTHON loaded from environment variable to be a list.')
PATHS_FROM_ECLIPSE_TO_PYTHON = []
else:
# Converting json lists to tuple
PATHS_FROM_ECLIPSE_TO_PYTHON = [tuple(x) for x in PATHS_FROM_ECLIPSE_TO_PYTHON]
# example:
# PATHS_FROM_ECLIPSE_TO_PYTHON = [
# (r'd:\temp\temp_workspace_2\test_python\src\yyy\yyy',
# r'd:\temp\temp_workspace_2\test_python\src\hhh\xxx')
# ]
convert_to_long_pathname = lambda filename:filename
convert_to_short_pathname = lambda filename:filename
get_path_with_real_case = lambda filename:filename
if sys.platform == 'win32':
try:
import ctypes
from ctypes.wintypes import MAX_PATH, LPCWSTR, LPWSTR, DWORD
GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW # noqa
GetLongPathName.argtypes = [LPCWSTR, LPWSTR, DWORD]
GetLongPathName.restype = DWORD
GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW # noqa
GetShortPathName.argtypes = [LPCWSTR, LPWSTR, DWORD]
GetShortPathName.restype = DWORD
def _convert_to_long_pathname(filename):
buf = ctypes.create_unicode_buffer(MAX_PATH)
if IS_PY2 and isinstance(filename, str):
filename = filename.decode(getfilesystemencoding())
rv = GetLongPathName(filename, buf, MAX_PATH)
if rv != 0 and rv <= MAX_PATH:
filename = buf.value
if IS_PY2:
filename = filename.encode(getfilesystemencoding())
return filename
def _convert_to_short_pathname(filename):
buf = ctypes.create_unicode_buffer(MAX_PATH)
if IS_PY2 and isinstance(filename, str):
filename = filename.decode(getfilesystemencoding())
rv = GetShortPathName(filename, buf, MAX_PATH)
if rv != 0 and rv <= MAX_PATH:
filename = buf.value
if IS_PY2:
filename = filename.encode(getfilesystemencoding())
return filename
# Note that we have a cache for previous list dirs... the only case where this may be an
# issue is if the user actually changes the case of an existing file on windows while
# the debugger is executing (as this seems very unlikely and the cache can save a
# reasonable time -- especially on mapped drives -- it seems nice to have it).
_listdir_cache = {}
def _resolve_listing(resolved, iter_parts, cache=_listdir_cache):
while True: # Note: while True to make iterative and not recursive
try:
resolve_lowercase = next(iter_parts) # must be lowercase already
except StopIteration:
return resolved
resolved_lower = resolved.lower()
resolved_joined = cache.get((resolved_lower, resolve_lowercase))
if resolved_joined is None:
dir_contents = cache.get(resolved_lower)
if dir_contents is None:
dir_contents = cache[resolved_lower] = os.listdir(resolved)
for filename in dir_contents:
if filename.lower() == resolve_lowercase:
resolved_joined = os.path.join(resolved, filename)
cache[(resolved_lower, resolve_lowercase)] = resolved_joined
break
else:
raise FileNotFoundError('Unable to find: %s in %s' % (
resolve_lowercase, resolved))
resolved = resolved_joined
def _get_path_with_real_case(filename):
# Note: this previously made:
# convert_to_long_pathname(convert_to_short_pathname(filename))
# but this is no longer done because we can't rely on getting the shortname
# consistently (there are settings to disable it on Windows).
# So, using approach which resolves by listing the dir.
if IS_PY2 and isinstance(filename, unicode): # noqa
filename = filename.encode(getfilesystemencoding())
if '~' in filename:
filename = convert_to_long_pathname(filename)
if filename.startswith('<') or not os_path_exists(filename):
return filename # Not much we can do.
drive, parts = os.path.splitdrive(os.path.normpath(filename))
drive = drive.upper()
while parts.startswith(os.path.sep):
parts = parts[1:]
drive += os.path.sep
parts = parts.lower().split(os.path.sep)
try:
return _resolve_listing(drive, iter(parts))
except FileNotFoundError:
_listdir_cache.clear()
# Retry once after clearing the cache we have.
try:
return _resolve_listing(drive, iter(parts))
except FileNotFoundError:
if os_path_exists(filename):
# This is really strange, ask the user to report as error.
pydev_log.critical(
'pydev debugger: critical: unable to get real case for file. Details:\n'
'filename: %s\ndrive: %s\nparts: %s\n'
'(please create a ticket in the tracker to address this).',
filename, drive, parts
)
pydev_log.exception()
# Don't fail, just return the original file passed.
return filename
# Check that it actually works
_get_path_with_real_case(__file__)
except:
# Something didn't quite work out, leave no-op conversions in place.
if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2:
pydev_log.exception()
else:
convert_to_long_pathname = _convert_to_long_pathname
convert_to_short_pathname = _convert_to_short_pathname
get_path_with_real_case = _get_path_with_real_case
elif IS_JYTHON and IS_WINDOWS:
def get_path_with_real_case(filename):
from java.io import File # noqa
f = File(filename)
ret = f.getCanonicalPath()
if IS_PY2 and not isinstance(ret, str):
return ret.encode(getfilesystemencoding())
return ret
if IS_JYTHON:
def _normcase_windows(filename):
return filename.lower()
else:
def _normcase_windows(filename):
# `normcase` doesn't lower case on Python 2 for non-English locale, so we should do it manually.
if '~' in filename:
filename = convert_to_long_pathname(filename)
filename = _nt_os_normcase(filename)
return filename.lower()
def _normcase_linux(filename):
return filename # no-op
_filename_normalization = os.environ.get('PYDEVD_FILENAME_NORMALIZATION', '').lower()
if _filename_normalization == 'lower':
# Note: this is mostly for testing (forcing to always lower-case all contents
# internally -- used to mimick Windows normalization on Linux).
def _normcase_lower(filename):
return filename.lower()
_default_normcase = _normcase_lower
elif _filename_normalization == 'none':
# Disable any filename normalization may be an option on Windows if the
# user is having issues under some circumstances.
_default_normcase = _normcase_linux
elif IS_WINDOWS:
_default_normcase = _normcase_windows
else:
_default_normcase = _normcase_linux
def normcase(s, NORMCASE_CACHE={}):
try:
return NORMCASE_CACHE[s]
except:
normalized = NORMCASE_CACHE[s] = _default_normcase(s)
return normalized
_ide_os = 'WINDOWS' if IS_WINDOWS else 'UNIX'
_normcase_from_client = normcase
DEBUG_CLIENT_SERVER_TRANSLATION = os.environ.get('DEBUG_PYDEVD_PATHS_TRANSLATION', 'False').lower() in ('1', 'true')
def set_ide_os(os):
'''
We need to set the IDE os because the host where the code is running may be
actually different from the client (and the point is that we want the proper
paths to translate from the client to the server).
:param os:
'UNIX' or 'WINDOWS'
'''
global _ide_os
global _normcase_from_client
prev = _ide_os
if os == 'WIN': # Apparently PyCharm uses 'WIN' (https://github.com/fabioz/PyDev.Debugger/issues/116)
os = 'WINDOWS'
assert os in ('WINDOWS', 'UNIX')
if DEBUG_CLIENT_SERVER_TRANSLATION:
print('pydev debugger: client OS: %s' % (os,))
_normcase_from_client = normcase
if os == 'WINDOWS':
# Client in Windows and server in Unix, we need to normalize the case.
if not IS_WINDOWS:
_normcase_from_client = _normcase_windows
else:
# Client in Unix and server in Windows, we can't normalize the case.
if IS_WINDOWS:
_normcase_from_client = _normcase_linux
if prev != os:
_ide_os = os
# We need to (re)setup how the client <-> server translation works to provide proper separators.
setup_client_server_paths(_last_client_server_paths_set)
# Caches filled as requested during the debug session.
NORM_PATHS_CONTAINER = {}
NORM_PATHS_AND_BASE_CONTAINER = {}
def canonical_normalized_path(filename):
'''
This returns a filename that is canonical and it's meant to be used internally
to store information on breakpoints and see if there's any hit on it.
Note that this version is only internal as it may not match the case and
may have symlinks resolved (and thus may not match what the user expects
in the editor).
'''
return get_abs_path_real_path_and_base_from_file(filename)[1]
def absolute_path(filename):
'''
Provides a version of the filename that's absolute (and NOT normalized).
'''
return get_abs_path_real_path_and_base_from_file(filename)[0]
def basename(filename):
'''
Provides the basename for a file.
'''
return get_abs_path_real_path_and_base_from_file(filename)[2]
# Returns tuple of absolute path and real path for given filename
def _abs_and_canonical_path(filename, NORM_PATHS_CONTAINER=NORM_PATHS_CONTAINER):
try:
return NORM_PATHS_CONTAINER[filename]
except:
if filename.__class__ != str:
raise AssertionError('Paths passed to _abs_and_canonical_path must be str. Found: %s (%s)' % (filename, type(filename)))
if os is None: # Interpreter shutdown
return filename, filename
os_path = os.path
if os_path is None: # Interpreter shutdown
return filename, filename
os_path_abspath = os_path.abspath
os_path_isabs = os_path.isabs
if os_path_abspath is None or os_path_isabs is None or os_path_real_path is None: # Interpreter shutdown
return filename, filename
isabs = os_path_isabs(filename)
normalize = False
abs_path = _apply_func_and_normalize_case(filename, os_path_abspath, isabs, normalize)
normalize = True
real_path = _apply_func_and_normalize_case(filename, os_path_real_path, isabs, normalize)
# cache it for fast db later
NORM_PATHS_CONTAINER[filename] = abs_path, real_path
return abs_path, real_path
def _get_relative_filename_abs_path(filename, func, os_path_exists=os_path_exists):
# If we have a relative path and the file does not exist when made absolute, try to
# resolve it based on the sys.path entries.
for p in sys.path:
r = func(os.path.join(p, filename))
if os_path_exists(r):
return r
# We couldn't find the real file for the relative path. Resolve it as if it was in
# a library (so that it's considered a library file and not a project file).
r = func(os.path.join(_library_dir, filename))
return r
def _apply_func_and_normalize_case(filename, func, isabs, normalize_case, os_path_exists=os_path_exists, join=join):
if filename.startswith('<'):
# Not really a file, rather a synthetic name like <string> or <ipython-...>;
# shouldn't be normalized.
return filename
r = func(filename)
if not isabs:
if not os_path_exists(r):
r = _get_relative_filename_abs_path(filename, func)
ind = r.find('.zip')
if ind == -1:
ind = r.find('.egg')
if ind != -1:
ind += 4
zip_path = r[:ind]
inner_path = r[ind:]
if inner_path.startswith('!'):
# Note (fabioz): although I can replicate this by creating a file ending as
# .zip! or .egg!, I don't really know what's the real-world case for this
# (still kept as it was added by @jetbrains, but it should probably be reviewed
# later on).
# Note 2: it goes hand-in-hand with 'exists'.
inner_path = inner_path[1:]
zip_path = zip_path + '!'
if inner_path.startswith('/') or inner_path.startswith('\\'):
inner_path = inner_path[1:]
if inner_path:
if normalize_case:
r = join(normcase(zip_path), inner_path)
else:
r = join(zip_path, inner_path)
return r
if normalize_case:
r = normcase(r)
return r
_ZIP_SEARCH_CACHE = {}
_NOT_FOUND_SENTINEL = object()
def exists(filename):
if os_path_exists(filename):
return True
if not os.path.isabs(filename):
filename = _get_relative_filename_abs_path(filename, os.path.abspath)
if os_path_exists(filename):
return True
ind = filename.find('.zip')
if ind == -1:
ind = filename.find('.egg')
if ind != -1:
ind += 4
zip_path = filename[:ind]
inner_path = filename[ind:]
if inner_path.startswith("!"):
# Note (fabioz): although I can replicate this by creating a file ending as
# .zip! or .egg!, I don't really know what's the real-world case for this
# (still kept as it was added by @jetbrains, but it should probably be reviewed
# later on).
# Note 2: it goes hand-in-hand with '_apply_func_and_normalize_case'.
inner_path = inner_path[1:]
zip_path = zip_path + '!'
zip_file_obj = _ZIP_SEARCH_CACHE.get(zip_path, _NOT_FOUND_SENTINEL)
if zip_file_obj is None:
return False
elif zip_file_obj is _NOT_FOUND_SENTINEL:
try:
import zipfile
zip_file_obj = zipfile.ZipFile(zip_path, 'r')
_ZIP_SEARCH_CACHE[zip_path] = zip_file_obj
except:
_ZIP_SEARCH_CACHE[zip_path] = _NOT_FOUND_SENTINEL
return False
try:
if inner_path.startswith('/') or inner_path.startswith('\\'):
inner_path = inner_path[1:]
_info = zip_file_obj.getinfo(inner_path.replace('\\', '/'))
return join(zip_path, inner_path)
except KeyError:
return False
else:
pydev_log.debug('os.path.exists(%r) returned False.', filename)
return False
try:
try:
code = os_path_real_path.func_code
except AttributeError:
code = os_path_real_path.__code__
if not os.path.isabs(code.co_filename):
pydev_log.critical('This version of python seems to be incorrectly compiled')
pydev_log.critical('(internal generated filenames are not absolute).')
pydev_log.critical('This may make the debugger miss breakpoints.')
pydev_log.critical('Related bug: http://bugs.python.org/issue1666807')
elif not exists(code.co_filename): # Note: checks for files inside .zip containers.
pydev_log.critical('It seems the debugger cannot resolve %s', code.co_filename)
pydev_log.critical('This may make the debugger miss breakpoints in the standard library.')
pydev_log.critical('Related bug: https://bugs.python.org/issue1180193')
except:
# Don't fail if there's something not correct here -- but at least print it to the user so that we can correct that
pydev_log.exception()
# Note: as these functions may be rebound, users should always import
# pydevd_file_utils and then use:
#
# pydevd_file_utils.map_file_to_client
# pydevd_file_utils.map_file_to_server
#
# instead of importing any of those names to a given scope.
def _path_to_expected_str(filename):
if IS_PY2:
if not filesystem_encoding_is_utf8 and hasattr(filename, "decode"):
# filename_in_utf8 is a byte string encoded using the file system encoding
# convert it to utf8
filename = filename.decode(file_system_encoding)
if not isinstance(filename, bytes):
filename = filename.encode('utf-8')
else: # py3
if isinstance(filename, bytes):
filename = filename.decode(file_system_encoding)
return filename
def _original_file_to_client(filename, cache={}):
try:
return cache[filename]
except KeyError:
translated = _path_to_expected_str(get_path_with_real_case(absolute_path(filename)))
cache[filename] = (translated, False)
return cache[filename]
def _original_map_file_to_server(filename):
# By default just mapping to the server does nothing if there are no mappings (usually
# afterwards the debugger must do canonical_normalized_path to get a normalized version).
return filename
map_file_to_client = _original_file_to_client
map_file_to_server = _original_map_file_to_server
def _fix_path(path, sep, add_end_sep=False):
if add_end_sep:
if not path.endswith('/') and not path.endswith('\\'):
path += '/'
else:
if path.endswith('/') or path.endswith('\\'):
path = path[:-1]
if sep != '/':
path = path.replace('/', sep)
return path
_last_client_server_paths_set = []
_source_reference_to_frame_id = {}
_source_reference_to_server_filename = {}
_line_cache_source_reference_to_server_filename = {}
_client_filename_in_utf8_to_source_reference = {}
_next_source_reference = partial(next, itertools.count(1))
def get_client_filename_source_reference(client_filename):
return _client_filename_in_utf8_to_source_reference.get(client_filename, 0)
def get_server_filename_from_source_reference(source_reference):
return _source_reference_to_server_filename.get(source_reference, '')
def create_source_reference_for_linecache(server_filename):
source_reference = _next_source_reference()
pydev_log.debug('Created linecache id source reference: %s for server filename: %s', source_reference, server_filename)
_line_cache_source_reference_to_server_filename[source_reference] = server_filename
return source_reference
def get_source_reference_filename_from_linecache(source_reference):
return _line_cache_source_reference_to_server_filename.get(source_reference)
def create_source_reference_for_frame_id(frame_id, original_filename):
source_reference = _next_source_reference()
pydev_log.debug('Created frame id source reference: %s for frame id: %s (%s)', source_reference, frame_id, original_filename)
_source_reference_to_frame_id[source_reference] = frame_id
return source_reference
def get_frame_id_from_source_reference(source_reference):
return _source_reference_to_frame_id.get(source_reference)
def setup_client_server_paths(paths):
'''paths is the same format as PATHS_FROM_ECLIPSE_TO_PYTHON'''
global map_file_to_client
global map_file_to_server
global _last_client_server_paths_set
global _next_source_reference
_last_client_server_paths_set = paths[:]
_source_reference_to_server_filename.clear()
_client_filename_in_utf8_to_source_reference.clear()
_next_source_reference = partial(next, itertools.count(1))
# Work on the client and server slashes.
python_sep = '\\' if IS_WINDOWS else '/'
eclipse_sep = '\\' if _ide_os == 'WINDOWS' else '/'
norm_filename_to_server_container = {}
norm_filename_to_client_container = {}
initial_paths = []
initial_paths_with_end_sep = []
paths_from_eclipse_to_python = []
paths_from_eclipse_to_python_with_end_sep = []
# Apply normcase to the existing paths to follow the os preferences.
for i, (path0, path1) in enumerate(paths):
if IS_PY2:
if isinstance(path0, unicode): # noqa
path0 = path0.encode(sys.getfilesystemencoding())
if isinstance(path1, unicode): # noqa
path1 = path1.encode(sys.getfilesystemencoding())
force_only_slash = path0.endswith(('/', '\\')) and path1.endswith(('/', '\\'))
if not force_only_slash:
path0 = _fix_path(path0, eclipse_sep, False)
path1 = _fix_path(path1, python_sep, False)
initial_paths.append((path0, path1))
paths_from_eclipse_to_python.append((_normcase_from_client(path0), normcase(path1)))
# Now, make a version with a slash in the end.
path0 = _fix_path(path0, eclipse_sep, True)
path1 = _fix_path(path1, python_sep, True)
initial_paths_with_end_sep.append((path0, path1))
paths_from_eclipse_to_python_with_end_sep.append((_normcase_from_client(path0), normcase(path1)))
# Fix things so that we always match the versions with a slash in the end first.
initial_paths = initial_paths_with_end_sep + initial_paths
paths_from_eclipse_to_python = paths_from_eclipse_to_python_with_end_sep + paths_from_eclipse_to_python
if not paths_from_eclipse_to_python:
# no translation step needed (just inline the calls)
map_file_to_client = _original_file_to_client
map_file_to_server = _original_map_file_to_server
return
# only setup translation functions if absolutely needed!
def _map_file_to_server(filename, cache=norm_filename_to_server_container):
# Eclipse will send the passed filename to be translated to the python process
# So, this would be 'NormFileFromEclipseToPython'
try:
return cache[filename]
except KeyError:
if eclipse_sep != python_sep:
# Make sure that the separators are what we expect from the IDE.
filename = filename.replace(python_sep, eclipse_sep)
# used to translate a path from the client to the debug server
translated = filename
translated_normalized = _normcase_from_client(filename)
for eclipse_prefix, server_prefix in paths_from_eclipse_to_python:
if translated_normalized.startswith(eclipse_prefix):
found_translation = True
if DEBUG_CLIENT_SERVER_TRANSLATION:
pydev_log.critical('pydev debugger: replacing to server: %s', filename)
translated = server_prefix + filename[len(eclipse_prefix):]
if DEBUG_CLIENT_SERVER_TRANSLATION:
pydev_log.critical('pydev debugger: sent to server: %s - matched prefix: %s', translated, eclipse_prefix)
break
else:
found_translation = False
# Note that when going to the server, we do the replace first and only later do the norm file.
if eclipse_sep != python_sep:
translated = translated.replace(eclipse_sep, python_sep)
if found_translation:
# Note: we don't normalize it here, this must be done as a separate
# step by the caller.
translated = absolute_path(translated)
else:
if not os_path_exists(translated):
if not translated.startswith('<'):
# This is a configuration error, so, write it always so
# that the user can fix it.
error_once('pydev debugger: unable to find translation for: "%s" in [%s] (please revise your path mappings).\n',
filename, ', '.join(['"%s"' % (x[0],) for x in paths_from_eclipse_to_python]))
else:
# It's possible that we had some round trip (say, we sent /usr/lib and received
# it back, so, having no translation is ok too).
# Note: we don't normalize it here, this must be done as a separate
# step by the caller.
translated = absolute_path(translated)
cache[filename] = translated
return translated
def _map_file_to_client(filename, cache=norm_filename_to_client_container):
# The result of this method will be passed to eclipse
# So, this would be 'NormFileFromPythonToEclipse'
try:
return cache[filename]
except KeyError:
abs_path = absolute_path(filename)
translated_proper_case = get_path_with_real_case(abs_path)
translated_normalized = normcase(abs_path)
path_mapping_applied = False
if translated_normalized.lower() != translated_proper_case.lower():
if DEBUG_CLIENT_SERVER_TRANSLATION:
pydev_log.critical(
'pydev debugger: translated_normalized changed path (from: %s to %s)',
translated_proper_case, translated_normalized)
for i, (eclipse_prefix, python_prefix) in enumerate(paths_from_eclipse_to_python):
if translated_normalized.startswith(python_prefix):
if DEBUG_CLIENT_SERVER_TRANSLATION:
pydev_log.critical('pydev debugger: replacing to client: %s', translated_normalized)
# Note: use the non-normalized version.
eclipse_prefix = initial_paths[i][0]
translated = eclipse_prefix + translated_proper_case[len(python_prefix):]
if DEBUG_CLIENT_SERVER_TRANSLATION:
pydev_log.critical('pydev debugger: sent to client: %s - matched prefix: %s', translated, python_prefix)
path_mapping_applied = True
break
else:
if DEBUG_CLIENT_SERVER_TRANSLATION:
pydev_log.critical('pydev debugger: to client: unable to find matching prefix for: %s in %s',
translated_normalized, [x[1] for x in paths_from_eclipse_to_python])
translated = translated_proper_case
if eclipse_sep != python_sep:
translated = translated.replace(python_sep, eclipse_sep)
translated = _path_to_expected_str(translated)
# The resulting path is not in the python process, so, we cannot do a normalize the path here,
# only at the beginning of this method.
cache[filename] = (translated, path_mapping_applied)
if translated not in _client_filename_in_utf8_to_source_reference:
if path_mapping_applied:
source_reference = 0
else:
source_reference = _next_source_reference()
pydev_log.debug('Created source reference: %s for untranslated path: %s', source_reference, filename)
_client_filename_in_utf8_to_source_reference[translated] = source_reference
_source_reference_to_server_filename[source_reference] = filename
return (translated, path_mapping_applied)
map_file_to_server = _map_file_to_server
map_file_to_client = _map_file_to_client
setup_client_server_paths(PATHS_FROM_ECLIPSE_TO_PYTHON)
# For given file f returns tuple of its absolute path, real path and base name
def get_abs_path_real_path_and_base_from_file(
filename, NORM_PATHS_AND_BASE_CONTAINER=NORM_PATHS_AND_BASE_CONTAINER):
try:
return NORM_PATHS_AND_BASE_CONTAINER[filename]
except:
f = filename
if not f:
# i.e.: it's possible that the user compiled code with an empty string (consider
# it as <string> in this case).
f = '<string>'
if f.startswith('<'):
return f, normcase(f), f
if _abs_and_canonical_path is None: # Interpreter shutdown
i = max(f.rfind('/'), f.rfind('\\'))
return (f, f, f[i + 1:])
if f is not None:
if f.endswith('.pyc'):
f = f[:-1]
elif f.endswith('$py.class'):
f = f[:-len('$py.class')] + '.py'
abs_path, canonical_normalized_filename = _abs_and_canonical_path(f)
try:
base = os_path_basename(canonical_normalized_filename)
except AttributeError:
# Error during shutdown.
i = max(f.rfind('/'), f.rfind('\\'))
base = f[i + 1:]
ret = abs_path, canonical_normalized_filename, base
NORM_PATHS_AND_BASE_CONTAINER[filename] = ret
return ret
def get_abs_path_real_path_and_base_from_frame(frame, NORM_PATHS_AND_BASE_CONTAINER=NORM_PATHS_AND_BASE_CONTAINER):
try:
return NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename]
except:
# This one is just internal (so, does not need any kind of client-server translation)
f = frame.f_code.co_filename
if f is not None and f.startswith (('build/bdist.', 'build\\bdist.')):
# files from eggs in Python 2.7 have paths like build/bdist.linux-x86_64/egg/<path-inside-egg>
f = frame.f_globals['__file__']
if get_abs_path_real_path_and_base_from_file is None:
# Interpreter shutdown
if not f:
# i.e.: it's possible that the user compiled code with an empty string (consider
# it as <string> in this case).
f = '<string>'
i = max(f.rfind('/'), f.rfind('\\'))
return f, f, f[i + 1:]
ret = get_abs_path_real_path_and_base_from_file(f)
# Also cache based on the frame.f_code.co_filename (if we had it inside build/bdist it can make a difference).
NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename] = ret
return ret
def get_fullname(mod_name):
if IS_PY3K:
import pkgutil
else:
from _pydev_imps import _pydev_pkgutil_old as pkgutil
try:
loader = pkgutil.get_loader(mod_name)
except:
return None
if loader is not None:
for attr in ("get_filename", "_get_filename"):
meth = getattr(loader, attr, None)
if meth is not None:
return meth(mod_name)
return None
def get_package_dir(mod_name):
for path in sys.path:
mod_path = join(path, mod_name.replace('.', '/'))
if os.path.isdir(mod_path):
return mod_path
return None
| 38.92552
| 136
| 0.659107
|
21875050d32f9dd018c443839e96c15189587144
| 7,494
|
py
|
Python
|
ros/src/twist_controller/twist_controller_mod.py
|
edufford/CarND-Capstone
|
dd466a3e9e1eeb3c3f4b37175b9694600d8cc8c8
|
[
"MIT"
] | 9
|
2018-03-07T01:38:31.000Z
|
2020-05-14T14:22:52.000Z
|
ros/src/twist_controller/twist_controller_mod.py
|
edufford/CarND-Capstone
|
dd466a3e9e1eeb3c3f4b37175b9694600d8cc8c8
|
[
"MIT"
] | 95
|
2018-02-27T11:37:30.000Z
|
2019-02-09T20:37:03.000Z
|
ros/src/twist_controller/twist_controller_mod.py
|
edufford/CarND-Capstone
|
dd466a3e9e1eeb3c3f4b37175b9694600d8cc8c8
|
[
"MIT"
] | 4
|
2018-02-26T21:20:17.000Z
|
2019-08-09T15:50:53.000Z
|
import time
import rospy
from lowpass import LowPassFilter
from pid import PID
from yaw_controller import YawController
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, default_update_interval, wheel_base, steer_ratio, min_speed, max_lat_accel, max_steer_angle, max_deceleration, max_throttle, fuel_capacity, vehicle_mass, wheel_radius, dyn_velo_proportional_control, dyn_velo_integral_control, dyn_braking_proportional_control, dyn_braking_integral_control):
self.current_timestep = None
self.previous_acceleration = 0.
self.max_throttle = max_throttle
self.default_update_interval = default_update_interval
self.velocity_increase_limit_constant = 0.25
self.velocity_decrease_limit_constant = 0.05
self.braking_to_throttle_threshold_ratio = 4. / 3.
self.manual_braking_upper_velocity_limit = 1.4
self.prev_manual_braking_torque = 0
self.manual_braking_torque_to_stop = 700
self.manual_braking_torque_up_rate = 300
self.lpf_tau_throttle = 0.3
self.lpf_tau_brake = 0.3
self.lpf_tau_steering = 0.4
self.manual_braking = False
self.max_braking_torque = (
vehicle_mass + fuel_capacity * GAS_DENSITY) * abs(max_deceleration) * wheel_radius
rospy.logwarn('max_braking_torque = {:.1f} N'.format(self.max_braking_torque))
self.yaw_controller = YawController(
wheel_base, steer_ratio, min_speed, max_lat_accel, max_steer_angle)
self.setup_pid_controllers(dyn_velo_proportional_control, dyn_velo_integral_control,
dyn_braking_proportional_control, dyn_braking_integral_control)
self.throttle_lpf = LowPassFilter(self.lpf_tau_throttle,
default_update_interval)
self.brake_lpf = LowPassFilter(self.lpf_tau_brake,
default_update_interval)
self.steering_lpf = LowPassFilter(
self.lpf_tau_steering, default_update_interval)
def setup_pid_controllers(self, velo_p, velo_i, braking_p, braking_i):
rospy.loginfo("Initializing PID controllers with velo_P: {}, velo_I: {}, braking_P: {}, braking_I: {}"
.format(velo_p, velo_i, braking_p, braking_i))
# create velocity pid controller thresholded between min and max
# acceleration values
self.velocity_pid_controller = PID(
velo_p, velo_i, 0, 0, 1)
# create acceleration pid controller thresholded between 0% and 100%
# for throttle
self.braking_pid_controller = PID(
braking_p, braking_i, 0.0, 0.0, 10000)
def control(self, target_linear_velocity, target_angular_velocity, current_linear_velocity, is_decelerating):
# compute timestep
timestep = self.compute_timestep()
velocity_error = target_linear_velocity - current_linear_velocity
if (target_linear_velocity == 0 and current_linear_velocity == 0):
# reset integrators if we're at a stop
self.reset()
limit_constant = self.velocity_increase_limit_constant if velocity_error > 0 else self.velocity_decrease_limit_constant
error_thresh = limit_constant * current_linear_velocity
throttle_command = 0
brake_command = 0
control_mode = "Coasting"
if is_decelerating and (target_linear_velocity < self.manual_braking_upper_velocity_limit and current_linear_velocity < self.manual_braking_upper_velocity_limit):
# vehicle is coming to a stop or is at a stop; apply fixed braking torque
# continuously, even if the vehicle is stopped
self.manual_braking = True
brake_command = self.prev_manual_braking_torque
# Ramp up manual braking torque
if brake_command < self.manual_braking_torque_to_stop:
brake_command += self.manual_braking_torque_up_rate
# Clip manual brake torque to braking_torque_to_full_stop
brake_command = min(brake_command, self.manual_braking_torque_to_stop)
self.velocity_pid_controller.reset()
control_mode = "Manual braking"
elif velocity_error < -1 * max(limit_constant * current_linear_velocity, 0.1):
# use brake if we want to slow down somewhat significantly
self.manual_braking = False
brake_command = self.braking_pid_controller.step(-velocity_error, timestep) if velocity_error < (-1 * limit_constant *
self.braking_to_throttle_threshold_ratio * current_linear_velocity) or (velocity_error < 0 and current_linear_velocity < 2.5) else 0
self.velocity_pid_controller.reset()
control_mode = "PID braking"
elif not is_decelerating or (current_linear_velocity > 5 and velocity_error > -1 * limit_constant * current_linear_velocity) or (current_linear_velocity < 5 and velocity_error > limit_constant * current_linear_velocity):
# use throttle if we want to speed up or if we want to slow down
# just slightly
# reset brake lpf to release manually held brake quickly
if self.manual_braking:
self.brake_lpf.reset()
self.manual_braking = False
throttle_command = self.velocity_pid_controller.step(
velocity_error, timestep)
self.braking_pid_controller.reset()
control_mode = "PID throttle"
# apply low pass filter and maximum limit on brake command
filtered_brake = min(
self.max_braking_torque, self.brake_lpf.filt(brake_command))
# do not apply throttle if any brake is applied
if filtered_brake < 50:
# brake is released, ok to apply throttle
filtered_brake = 0
else:
# brake is still applied, don't apply throttle
throttle_command = 0
self.velocity_pid_controller.reset()
# apply low pass filter and maximum limit on throttle command
filtered_throttle = min(
self.max_throttle, self.throttle_lpf.filt(throttle_command))
# Store final brake torque command for next manual braking torque calc
self.prev_manual_braking_torque = filtered_brake
rospy.loginfo('%s: current linear velocity %.2f, target linear velocity %.2f, is_decelerating %s, throttle_command %.2f, brake_command %.2f, error %.2f, thresh %.2f',
control_mode, current_linear_velocity, target_linear_velocity, is_decelerating, filtered_throttle, filtered_brake, velocity_error, error_thresh)
# Return throttle, brake, steer
return (filtered_throttle,
filtered_brake,
self.steering_lpf.filt(self.yaw_controller.get_steering(target_linear_velocity, target_angular_velocity, current_linear_velocity)))
def reset(self):
self.last_timestep = None
self.velocity_pid_controller.reset()
self.braking_pid_controller.reset()
def compute_timestep(self):
last_timestep = self.current_timestep
self.current_timestep = time.time()
if last_timestep == None:
last_timestep = self.current_timestep - self.default_update_interval
return self.current_timestep - last_timestep
| 47.732484
| 313
| 0.682146
|
e2c35e3a01fa119523e3f9b1e4c578643515aa92
| 7,718
|
py
|
Python
|
contrib/go/src/python/pants/contrib/go/subsystems/go_distribution.py
|
lahosken/pants
|
1b0340987c9b2eab9411416803c75b80736716e4
|
[
"Apache-2.0"
] | null | null | null |
contrib/go/src/python/pants/contrib/go/subsystems/go_distribution.py
|
lahosken/pants
|
1b0340987c9b2eab9411416803c75b80736716e4
|
[
"Apache-2.0"
] | null | null | null |
contrib/go/src/python/pants/contrib/go/subsystems/go_distribution.py
|
lahosken/pants
|
1b0340987c9b2eab9411416803c75b80736716e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import subprocess
from collections import OrderedDict, namedtuple
from pants.base.workunit import WorkUnit, WorkUnitLabel
from pants.binaries.binary_util import BinaryUtil
from pants.fs.archive import TGZ
from pants.subsystem.subsystem import Subsystem
from pants.util.contextutil import temporary_dir
from pants.util.memo import memoized_property
class GoDistribution(object):
"""Represents a self-bootstrapping Go distribution."""
class Factory(Subsystem):
options_scope = 'go-distribution'
@classmethod
def subsystem_dependencies(cls):
return (BinaryUtil.Factory,)
@classmethod
def register_options(cls, register):
register('--supportdir', advanced=True, default='bin/go',
help='Find the go distributions under this dir. Used as part of the path to lookup '
'the distribution with --binary-util-baseurls and --pants-bootstrapdir')
register('--version', advanced=True, default='1.8',
help='Go distribution version. Used as part of the path to lookup the distribution '
'with --binary-util-baseurls and --pants-bootstrapdir')
def create(self):
# NB: create is an instance method to allow the user to choose global or scoped.
# It's not unreasonable to imagine multiple go versions in play; for example: when
# transitioning from the 1.x series to the 2.x series.
binary_util = BinaryUtil.Factory.create()
options = self.get_options()
return GoDistribution(binary_util, options.supportdir, options.version)
def __init__(self, binary_util, relpath, version):
self._binary_util = binary_util
self._relpath = relpath
self._version = version
@property
def version(self):
"""Returns the version of the Go distribution.
:returns: The Go distribution version number string.
:rtype: string
"""
return self._version
@memoized_property
def goroot(self):
"""Returns the $GOROOT for this go distribution.
:returns: The Go distribution $GOROOT.
:rtype: string
"""
go_distribution = self._binary_util.select_binary(self._relpath, self.version, 'go.tar.gz')
distribution_workdir = os.path.dirname(go_distribution)
outdir = os.path.join(distribution_workdir, 'unpacked')
if not os.path.exists(outdir):
with temporary_dir(root_dir=distribution_workdir) as tmp_dist:
TGZ.extract(go_distribution, tmp_dist)
os.rename(tmp_dist, outdir)
return os.path.join(outdir, 'go')
def go_env(self, gopath=None):
"""Return an env dict that represents a proper Go environment mapping for this distribution."""
# Forcibly nullify the GOPATH if the command does not need one - this can prevent bad user
# GOPATHs from erroring out commands; see: https://github.com/pantsbuild/pants/issues/2321.
# NB: As of go 1.8, when GOPATH is unset (set to ''), it defaults to ~/go (assuming HOME is
# set - and we can't unset that since it might legitimately be used by the subcommand); so we
# set the GOPATH here to a valid value that nonetheless will fail to work if GOPATH is
# actually used by the subcommand.
no_gopath = os.devnull
return OrderedDict(GOROOT=self.goroot, GOPATH=gopath or no_gopath)
class GoCommand(namedtuple('GoCommand', ['cmdline', 'env'])):
"""Encapsulates a go command that can be executed."""
@classmethod
def _create(cls, goroot, cmd, go_env, args=None):
return cls([os.path.join(goroot, 'bin', 'go'), cmd] + (args or []), env=go_env)
def spawn(self, env=None, **kwargs):
"""
:param dict env: A custom environment to launch the Go command in. If `None` the current
environment is used.
:param **kwargs: Keyword arguments to pass through to `subprocess.Popen`.
:returns: A handle to the spawned go command subprocess.
:rtype: :class:`subprocess.Popen`
"""
env = (env or os.environ).copy()
env.update(self.env)
return subprocess.Popen(self.cmdline, env=env, **kwargs)
def check_output(self, env=None, **kwargs):
"""Returns the output of the executed Go command.
:param dict env: A custom environment to launch the Go command in. If `None` the current
environment is used.
:param **kwargs: Keyword arguments to pass through to `subprocess.check_output`.
:return str: Output of Go command.
:raises subprocess.CalledProcessError: Raises if Go command fails.
"""
env = (env or os.environ).copy()
env.update(self.env)
return subprocess.check_output(self.cmdline, env=env, **kwargs)
def __str__(self):
return (' '.join('{}={}'.format(k, v) for k, v in self.env.items()) +
' ' +
' '.join(self.cmdline))
def create_go_cmd(self, cmd, gopath=None, args=None):
"""Creates a Go command that is optionally targeted to a Go workspace.
:param string cmd: Go command to execute, e.g. 'test' for `go test`
:param string gopath: An optional $GOPATH which points to a valid Go workspace from which to run
the command.
:param list args: A list of arguments and flags to pass to the Go command.
:returns: A go command that can be executed later.
:rtype: :class:`GoDistribution.GoCommand`
"""
return self.GoCommand._create(self.goroot, cmd, go_env=self.go_env(gopath=gopath), args=args)
def execute_go_cmd(self, cmd, gopath=None, args=None, env=None,
workunit_factory=None, workunit_name=None, workunit_labels=None, **kwargs):
"""Runs a Go command that is optionally targeted to a Go workspace.
If a `workunit_factory` is supplied the command will run in a work unit context.
:param string cmd: Go command to execute, e.g. 'test' for `go test`
:param string gopath: An optional $GOPATH which points to a valid Go workspace from which to run
the command.
:param list args: An optional list of arguments and flags to pass to the Go command.
:param dict env: A custom environment to launch the Go command in. If `None` the current
environment is used.
:param workunit_factory: An optional callable that can produce a `WorkUnit` context
:param string workunit_name: An optional name for the work unit; defaults to the `cmd`
:param list workunit_labels: An optional sequence of labels for the work unit.
:param **kwargs: Keyword arguments to pass through to `subprocess.Popen`.
:returns: A tuple of the exit code and the go command that was run.
:rtype: (int, :class:`GoDistribution.GoCommand`)
"""
go_cmd = self.GoCommand._create(self.goroot, cmd, go_env=self.go_env(gopath=gopath), args=args)
if workunit_factory is None:
return go_cmd.spawn(**kwargs).wait()
else:
name = workunit_name or cmd
labels = [WorkUnitLabel.TOOL] + (workunit_labels or [])
with workunit_factory(name=name, labels=labels, cmd=str(go_cmd)) as workunit:
process = go_cmd.spawn(env=env,
stdout=workunit.output('stdout'),
stderr=workunit.output('stderr'),
**kwargs)
returncode = process.wait()
workunit.set_outcome(WorkUnit.SUCCESS if returncode == 0 else WorkUnit.FAILURE)
return returncode, go_cmd
| 45.4
| 100
| 0.680228
|
d906806bd6c9b301631f68150d3e607d3168058d
| 21,974
|
py
|
Python
|
jstc/test_compiler.py
|
canaryhealth/jstc
|
d4be1f213e041b80708e8a7e40edfe2ae308b637
|
[
"MIT"
] | null | null | null |
jstc/test_compiler.py
|
canaryhealth/jstc
|
d4be1f213e041b80708e8a7e40edfe2ae308b637
|
[
"MIT"
] | null | null | null |
jstc/test_compiler.py
|
canaryhealth/jstc
|
d4be1f213e041b80708e8a7e40edfe2ae308b637
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# file: $Id$
# auth: Philip J Grabner <phil@canary.md>
# date: 2016/09/15
# copy: (C) Copyright 2016-EOT Canary Health, Inc., All Rights Reserved.
#------------------------------------------------------------------------------
import unittest
import os
import textwrap
from aadict import aadict
import fso
#------------------------------------------------------------------------------
class TestCompiler(unittest.TestCase):
maxDiff = None
#----------------------------------------------------------------------------
def test_fragments(self):
import jstc.compiler
compiler = jstc.compiler.Compiler()
hooks = aadict(name_transform=compiler._name_transform)
self.assertEqual(
list(compiler.fragments('foo/bar.jst', '', 'i am a template.', hooks)),
[('i am a template.', aadict(name='foo/bar', type='.jst'))])
self.assertEqual(
list(compiler.fragments('foo/bar.jst', '', '''\
##! zig
i am the zig template.
##! __here__
i am the root template.
''', hooks)),
[
(' i am the zig template.\n', aadict(name='foo/bar/zig', type='.jst')),
(' i am the root template.\n', aadict(name='foo/bar', type='.jst')),
])
#----------------------------------------------------------------------------
def test_attributes(self):
import jstc.compiler
compiler = jstc.compiler.Compiler()
hooks = aadict(name_transform=compiler._name_transform)
self.assertEqual(
list(compiler.fragments('foo/bar.jst', '', '''\
##! zig; channels: "public,protected"
i am the zig template.
##! __here__; public; protected
i am the root template.
##! zag; type: text/jst; !public; !protected
i am the zag template.
''', hooks)),
[
(' i am the zig template.\n', aadict(name='foo/bar/zig', type='.jst', channels='public,protected')),
(' i am the root template.\n', aadict(name='foo/bar', type='.jst', public=True, protected=True)),
(' i am the zag template.\n', aadict(name='foo/bar/zag', type='text/jst', public=False, protected=False)),
])
#----------------------------------------------------------------------------
def writecontent(self, files, dedent=True):
for name, content in files.items():
path = os.path.join(os.path.dirname(__file__), name)
pdir = os.path.dirname(path)
if not os.path.isdir(pdir):
os.makedirs(pdir)
with open(path, 'wb') as fp:
fp.write(textwrap.dedent(content))
#----------------------------------------------------------------------------
def test_render_simple(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
overrides=dict(inline=True, precompile=False))
with fso.push() as overlay:
self.writecontent({
'test/common/hello.hbs':
'''\
##! __here__
Hello, world!
##! name
Hello, {{name}}!
'''
})
self.assertEqual(
compiler.render_assets('jstc:test/common/hello.hbs', 'test'),
'''\
<script type="text/x-handlebars" data-template-name="common/hello">Hello, world!</script>\
<script type="text/x-handlebars" data-template-name="common/hello/name">Hello, {{name}}!</script>\
''')
#----------------------------------------------------------------------------
def test_render_trim_deprecated(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
overrides=dict(inline=True, precompile=False))
with fso.push() as overlay:
self.writecontent({
'test.hbs':
'''\
##! 0-default
<span>
text
</span>
##! 1-trim; trim
<span>
text
</span>
##! 2-notrim; !trim
<span>
text
</span>
'''
})
self.assertEqual(
compiler.render_assets('jstc:test.hbs'),
'''\
<script type="text/x-handlebars" data-template-name="test/0-default"><span>
text
</span></script>\
<script type="text/x-handlebars" data-template-name="test/1-trim"><span>
text
</span></script>\
<script type="text/x-handlebars" data-template-name="test/2-notrim"> <span>
text
</span>
</script>\
''')
#----------------------------------------------------------------------------
def test_render_space_default(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
overrides=dict(inline=True, precompile=False))
with fso.push() as overlay:
self.writecontent({
'test.hbs':
'''\
##! default
{{#if value}}
<span>
{{value}}
</span>
{{else}}
<span>default</span>
{{/if}}
'''
})
self.assertEqual(
compiler.render_assets('jstc:test.hbs'),
'''\
<script type="text/x-handlebars" data-template-name="test/default">{{#if value}}
<span>
{{value}}
</span>
{{else}}
<span>default</span>
{{/if}}</script>\
''')
#----------------------------------------------------------------------------
def test_render_space_preserve(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
overrides=dict(inline=True, precompile=False))
with fso.push() as overlay:
self.writecontent({
'test.hbs':
'''\
##! preserve; space: preserve
{{#if value}}
<span>
{{value}}
</span>
{{else}}
<span>default</span>
{{/if}}
'''
})
self.assertEqual(
compiler.render_assets('jstc:test.hbs'),
'''\
<script type="text/x-handlebars" data-template-name="test/preserve"> {{#if value}}
<span>
{{value}}
</span>
{{else}}
<span>default</span>
{{/if}}
</script>\
''')
#----------------------------------------------------------------------------
def test_render_space_trim(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
overrides=dict(inline=True, precompile=False))
with fso.push() as overlay:
self.writecontent({
'test.hbs':
'''\
##! trim; space: trim
{{#if value}}
<span>
{{value}}
</span>
{{else}}
<span>default</span>
{{/if}}
'''
})
self.assertEqual(
compiler.render_assets('jstc:test.hbs'),
'''\
<script type="text/x-handlebars" data-template-name="test/trim">{{#if value}}
<span>
{{value}}
</span>
{{else}}
<span>default</span>
{{/if}}</script>\
''')
#----------------------------------------------------------------------------
def test_render_space_dedent(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
overrides=dict(inline=True, precompile=False))
with fso.push() as overlay:
self.writecontent({
'test.hbs':
'''\
##! dedent; space: dedent
{{#if value}}
<span>
{{value}}
</span>
{{else}}
<span>default</span>
{{/if}}
'''
})
self.assertEqual(
compiler.render_assets('jstc:test.hbs'),
'''\
<script type="text/x-handlebars" data-template-name="test/dedent">{{#if value}}
<span>
{{value}}
</span>
{{else}}
<span>default</span>
{{/if}}</script>\
''')
#----------------------------------------------------------------------------
def test_render_space_collapse_complete(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
overrides=dict(inline=True, precompile=False))
with fso.push() as overlay:
self.writecontent({
'test.hbs':
'''\
##! collapse/complete; space: collapse
{{#if value}}
<span>
{{value}}
</span>
{{else}}
<span>default</span>
{{/if}}
'''
})
self.assertEqual(
compiler.render_assets('jstc:test.hbs'),
'''\
<script type="text/x-handlebars" data-template-name="test/collapse/complete">{{#if value}}<span>{{value}}</span>{{else}}<span>default</span>{{/if}}</script>\
''')
#----------------------------------------------------------------------------
def test_render_space_collapse_htmlSpace(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
overrides=dict(inline=True, precompile=False))
with fso.push() as overlay:
self.writecontent({
'test.hbs':
'''\
##! collapse/htmlspace; space: collapse
{{#if value}}
<span >
{{value}}
</span >
{{else}}
<span>default</span >
{{/if}}
'''
})
self.assertEqual(
compiler.render_assets('jstc:test.hbs'),
'''\
<script type="text/x-handlebars" data-template-name="test/collapse/htmlspace">{{#if value}}<span> {{value}}</span> {{else}}<span>default</span> {{/if}}</script>\
''')
#----------------------------------------------------------------------------
def test_render_space_collapse_hbsSpace(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
overrides=dict(inline=True, precompile=False))
with fso.push() as overlay:
self.writecontent({
'test.hbs':
'''\
##! collapse/hbsspace; space: collapse
{{#if value }}
<span>
{{value }}
</span>
{{else }}
<span>default</span>
{{/if }}
'''
})
self.assertEqual(
compiler.render_assets('jstc:test.hbs'),
'''\
<script type="text/x-handlebars" data-template-name="test/collapse/hbsspace">{{#if value}} <span>{{value}} </span>{{else}} <span>default</span>{{/if}} </script>\
''')
#----------------------------------------------------------------------------
def test_comments(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
overrides=dict(inline=True, precompile=False))
with fso.push() as overlay:
self.writecontent({
'test/application.hbs':
'''\
<div>
## TODO: super-secret comment!
Nothing to see here.
</div>
'''
})
self.assertEqual(
compiler.render_assets('jstc:test/application.hbs', 'test'),
'''\
<script type="text/x-handlebars" data-template-name="application"><div>
Nothing to see here.
</div>\
</script>\
''')
#----------------------------------------------------------------------------
def test_root(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
overrides=dict(inline=True, precompile=False))
with fso.push() as overlay:
self.writecontent({
'test/one/template.hbs': 'template "one".',
'test/two/template.hbs': 'template "two".',
})
self.assertEqual(
compiler.render_assets('jstc:test/one/template.hbs', 'test/one'),
'''\
<script type="text/x-handlebars" data-template-name="template">template "one".</script>\
''')
self.assertEqual(
compiler.render_assets('jstc:test/two/template.hbs', 'test/two'),
'''\
<script type="text/x-handlebars" data-template-name="template">template "two".</script>\
''')
self.assertEqual(
compiler.render_assets(
['jstc:test/one/template.hbs', 'jstc:test/two/template.hbs'], 'test'),
'''\
<script type="text/x-handlebars" data-template-name="one/template">template "one".</script>\
<script type="text/x-handlebars" data-template-name="two/template">template "two".</script>\
''')
#----------------------------------------------------------------------------
def test_collision_error(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
overrides=dict(inline=True, precompile=False))
with fso.push() as overlay:
self.writecontent({
'test/one/template.hbs': 'template "one".',
'test/two/template.hbs': 'template "two".',
})
with self.assertRaises(jstc.TemplateCollision) as cm:
compiler.render_assets(
['jstc:test/one/template.hbs', 'jstc:test/two/template.hbs'],
['test/one', 'test/two'])
self.assertEqual(
str(cm.exception),
''''text/x-handlebars' template 'template' is already defined''')
#----------------------------------------------------------------------------
def test_collision_ignore(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
defaults=dict(collision='ignore'),
overrides=dict(inline=True, precompile=False),
)
with fso.push() as overlay:
self.writecontent({
'test/one/template.hbs': 'template "one".',
'test/two/template.hbs': 'template "two".',
})
self.assertEqual(
compiler.render_assets(
['jstc:test/one/template.hbs', 'jstc:test/two/template.hbs'],
['test/one', 'test/two']),
'''\
<script type="text/x-handlebars" data-template-name="template">template "one".</script>\
''')
#----------------------------------------------------------------------------
def test_collision_override(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
defaults=dict(collision='override'),
overrides=dict(inline=True, precompile=False),
)
with fso.push() as overlay:
self.writecontent({
'test/one/template.hbs': 'template "one".',
'test/two/template.hbs': 'template "two".',
})
self.assertEqual(
compiler.render_assets(
['jstc:test/one/template.hbs', 'jstc:test/two/template.hbs'],
['test/one', 'test/two']),
'''\
<script type="text/x-handlebars" data-template-name="template">template "two".</script>\
''')
#----------------------------------------------------------------------------
def test_collision_pertemplate(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
defaults=dict(collision='ignore'),
overrides=dict(inline=True, precompile=False),
)
with fso.push() as overlay:
self.writecontent({
'test/one/template.hbs':
'''\
##! a
template "one/a".
##! b
template "one/b".
''',
'test/two/template.hbs':
'''\
##! a; collision: ignore
template "two/a".
##! b; collision: override
template "two/b".
''',
})
self.assertEqual(
compiler.render_assets(
['jstc:test/one/template.hbs', 'jstc:test/two/template.hbs'],
['test/one', 'test/two']),
'''\
<script type="text/x-handlebars" data-template-name="template/a">template "one/a".</script>\
<script type="text/x-handlebars" data-template-name="template/b">template "two/b".</script>\
''')
#----------------------------------------------------------------------------
def test_precompile(self):
import jstc
with fso.push() as overlay:
self.writecontent({
'test/hello.hbs': 'hello, world!',
'test/hello/name.hbs': 'hello, {{name}}!',
})
compiled = jstc.render_assets('jstc:test/**.hbs', force_inline=True, force_precompile=True)
if 'text/x-handlebars' in compiled:
raise unittest.SkipTest(
'handlebars executable not available (use "npm install handlebars")')
self.assertMultiLineEqual(
compiled,
'''\
<script type="text/javascript" >(function(){var t=Handlebars.template,ts=Handlebars.templates=Handlebars.templates||{};ts["hello"]=t({"compiler":[7,">= 4.0.0"],"main":function(container,depth0,helpers,partials,data) {
return "hello, world!";
},"useData":true});ts["hello/name"]=t({"compiler":[7,">= 4.0.0"],"main":function(container,depth0,helpers,partials,data) {
var helper;
return "hello, "
+ container.escapeExpression(((helper = (helper = helpers.name || (depth0 != null ? depth0.name : depth0)) != null ? helper : helpers.helperMissing),(typeof helper === "function" ? helper.call(depth0 != null ? depth0 : {},{"name":"name","hash":{},"data":data}) : helper)))
+ "!";
},"useData":true});})();</script>''')
#----------------------------------------------------------------------------
def test_asset_filter(self):
import jstc
with fso.push() as overlay:
self.writecontent({
'test/hello.hbs': 'hello!',
'test/goodbye.hbs': 'so long!',
})
self.assertEqual(
jstc.render_assets('jstc:test/**.hbs', force_inline=True, force_precompile=False),
'''\
<script type="text/x-handlebars" data-template-name="goodbye">so long!</script>\
<script type="text/x-handlebars" data-template-name="hello">hello!</script>\
''')
self.assertEqual(
jstc.render_assets(
'jstc:test/**.hbs', force_inline=True, force_precompile=False,
asset_filter=lambda name: name == 'test/hello.hbs'),
'''\
<script type="text/x-handlebars" data-template-name="hello">hello!</script>\
''')
self.assertEqual(
jstc.render_assets('jstc:test/**.hbs', force_inline=True, force_precompile=False,
asset_filter=lambda name: name != 'test/hello.hbs'),
'''\
<script type="text/x-handlebars" data-template-name="goodbye">so long!</script>\
''')
#----------------------------------------------------------------------------
def test_name_transform(self):
import jstc
with fso.push() as overlay:
self.writecontent({
'test/hello.hbs': 'hello!',
'test/goodbye.hbs': 'so long!',
})
def mynt(name, root):
return (name[2:].replace('d', 'd-').split('.')[0], 'text/x-mustache')
self.assertEqual(
jstc.render_assets('jstc:test/**.hbs', force_inline=True, force_precompile=False,
name_transform=mynt),
'''\
<script type="text/x-mustache" data-template-name="st/good-bye">so long!</script>\
<script type="text/x-mustache" data-template-name="st/hello">hello!</script>\
''')
#----------------------------------------------------------------------------
def test_template_transform(self):
import jstc
with fso.push() as overlay:
self.writecontent({
'test/hello.hbs': 'hello!',
'test/goodbye.hbs': 'so long!',
})
def mytt(text, attrs):
if attrs.name == 'hello':
text = 'hello, world!'
attrs.id = 'HW'
else:
attrs.type = 'template/jst'
return (text, attrs)
self.assertEqual(
jstc.render_assets('jstc:test/**.hbs', force_inline=True, force_precompile=False,
template_transform=mytt),
'''\
<script type="template/jst" data-template-name="goodbye">so long!</script>\
<script type="text/x-handlebars" data-template-name="hello" id="HW">hello, world!</script>\
''')
#----------------------------------------------------------------------------
def test_template_filter(self):
import jstc
with fso.push() as overlay:
self.writecontent({
'test/hello.hbs': 'hello!',
'test/goodbye.hbs': '''\
##! __here__
so long!
##! friend
ciao!
'''
})
self.assertEqual(
jstc.render_assets('jstc:test/**.hbs', force_inline=True, force_precompile=False),
'''\
<script type="text/x-handlebars" data-template-name="goodbye">so long!</script>\
<script type="text/x-handlebars" data-template-name="goodbye/friend">ciao!</script>\
<script type="text/x-handlebars" data-template-name="hello">hello!</script>\
''')
self.assertEqual(
jstc.render_assets('jstc:test/**.hbs', force_inline=True, force_precompile=False,
template_filter=lambda text, attrs: 'ciao' not in text),
'''\
<script type="text/x-handlebars" data-template-name="goodbye">so long!</script>\
<script type="text/x-handlebars" data-template-name="hello">hello!</script>\
''')
#----------------------------------------------------------------------------
def test_script_wrapper(self):
import jstc
with fso.push() as overlay:
self.writecontent({
'test/hello.hbs': 'hello, world!',
'test/hello/name.hbs': 'hello, {{name}}!',
})
compiled = jstc.render_assets(
'jstc:test/**.hbs', force_inline=True, force_precompile=True,
script_wrapper = lambda script, *args, **kw: '<SCRIPT>' + script + '</SCRIPT>')
if 'text/x-handlebars' in compiled:
raise unittest.SkipTest(
'handlebars executable not available (use "npm install handlebars")')
self.assertMultiLineEqual(
compiled,
'''\
<SCRIPT>(function(){var t=Handlebars.template,ts=Handlebars.templates=Handlebars.templates||{};ts["hello"]=t({"compiler":[7,">= 4.0.0"],"main":function(container,depth0,helpers,partials,data) {
return "hello, world!";
},"useData":true});ts["hello/name"]=t({"compiler":[7,">= 4.0.0"],"main":function(container,depth0,helpers,partials,data) {
var helper;
return "hello, "
+ container.escapeExpression(((helper = (helper = helpers.name || (depth0 != null ? depth0.name : depth0)) != null ? helper : helpers.helperMissing),(typeof helper === "function" ? helper.call(depth0 != null ? depth0 : {},{"name":"name","hash":{},"data":data}) : helper)))
+ "!";
},"useData":true});})();</SCRIPT>''')
#------------------------------------------------------------------------------
# end of $Id$
# $ChangeLog$
#------------------------------------------------------------------------------
| 34.990446
| 276
| 0.51388
|
789364317fca14b8a5c905f2ae517530c7309670
| 27,267
|
py
|
Python
|
bert4keras/snippets.py
|
Atakey/bert4keras
|
af6811b182abdd32b2a143a8f303a7a7cbaf76b0
|
[
"Apache-2.0"
] | 1
|
2021-08-29T16:21:41.000Z
|
2021-08-29T16:21:41.000Z
|
bert4keras/snippets.py
|
Atakey/bert4keras
|
af6811b182abdd32b2a143a8f303a7a7cbaf76b0
|
[
"Apache-2.0"
] | null | null | null |
bert4keras/snippets.py
|
Atakey/bert4keras
|
af6811b182abdd32b2a143a8f303a7a7cbaf76b0
|
[
"Apache-2.0"
] | null | null | null |
#! -*- coding: utf-8 -*-
# 代码合集
import os, sys, six, re, json
import logging
import numpy as np
from collections import defaultdict
from bert4keras.backend import K, keras, tf
_open_ = open
is_py2 = six.PY2
if not is_py2:
basestring = str
def to_array(*args):
"""批量转numpy的array
"""
results = [np.array(a) for a in args]
if len(args) == 1:
return results[0]
else:
return results
def is_string(s):
"""判断是否是字符串
"""
return isinstance(s, basestring)
def strQ2B(ustring):
"""全角符号转对应的半角符号
"""
rstring = ''
for uchar in ustring:
inside_code = ord(uchar)
# 全角空格直接转换
if inside_code == 12288:
inside_code = 32
# 全角字符(除空格)根据关系转化
elif (inside_code >= 65281 and inside_code <= 65374):
inside_code -= 65248
rstring += unichr(inside_code)
return rstring
def string_matching(s, keywords):
"""判断s是否至少包含keywords中的至少一个字符串
"""
for k in keywords:
if re.search(k, s):
return True
return False
def convert_to_unicode(text, encoding='utf-8', errors='ignore'):
"""字符串转换为unicode格式(假设输入为utf-8格式)
"""
if is_py2:
if isinstance(text, str):
text = text.decode(encoding, errors=errors)
else:
if isinstance(text, bytes):
text = text.decode(encoding, errors=errors)
return text
def convert_to_str(text, encoding='utf-8', errors='ignore'):
"""字符串转换为str格式(假设输入为utf-8格式)
"""
if is_py2:
if isinstance(text, unicode):
text = text.encode(encoding, errors=errors)
else:
if isinstance(text, bytes):
text = text.decode(encoding, errors=errors)
return text
class open:
"""模仿python自带的open函数
作用:1.主要是为了同时兼容py2和py3;2.增加了索引功能,方便读取大文件。
"""
def __init__(
self, name, mode='r', encoding=None, errors='strict', indexable=False
):
self.name = name
if is_py2:
self.file = _open_(name, mode)
else:
self.file = _open_(name, mode, encoding=encoding, errors=errors)
self.encoding = encoding
self.errors = errors
self.iterator = None
if indexable:
if is_string(indexable) and os.path.exists(indexable):
self.offsets = json.load(_open_(indexable))
else:
self.create_indexes()
if is_string(indexable):
json.dump(self.offsets, _open_(indexable, 'w'))
def create_indexes(self):
print('creating indexes ...')
self.offsets, offset = [], 0
pbar = keras.utils.Progbar(os.path.getsize(self.name))
while self.readline():
self.offsets.append(offset)
offset = self.tell()
pbar.update(offset)
self.seek(0)
print('indexes created.')
def __getitem__(self, key):
self.seek(self.offsets[key])
l = self.readline()
if self.encoding:
l = convert_to_unicode(l, self.encoding, self.errors)
return l
def __len__(self):
return len(self.offsets)
def __iter__(self):
if hasattr(self, 'offsets'):
for i in range(len(self)):
yield self[i]
else:
for l in self.file:
if self.encoding:
l = convert_to_unicode(l, self.encoding, self.errors)
yield l
def next(self):
if self.iterator is None:
self.iterator = self.__iter__()
return next(self.iterator)
def __next__(self):
return self.next()
def read(self):
text = self.file.read()
if self.encoding:
text = convert_to_unicode(text, self.encoding, self.errors)
return text
def readline(self):
text = self.file.readline()
if self.encoding:
text = convert_to_unicode(text, self.encoding, self.errors)
return text
def readlines(self):
if self.encoding:
return [
convert_to_unicode(text, self.encoding, self.errors)
for text in self.file.readlines()
]
else:
return self.file.readlines()
def write(self, text):
if self.encoding:
text = convert_to_str(text, self.encoding, self.errors)
self.file.write(text)
def flush(self):
self.file.flush()
def close(self):
self.file.close()
def tell(self):
return self.file.tell()
def seek(self, offset=0):
return self.file.seek(offset)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
def parallel_apply(
func,
iterable,
workers,
max_queue_size,
callback=None,
dummy=False,
random_seeds=True
):
"""多进程或多线程地将func应用到iterable的每个元素中。
注意这个apply是异步且无序的,也就是说依次输入a,b,c,但是
输出可能是func(c), func(a), func(b)。
参数:
callback: 处理单个输出的回调函数;
dummy: False是多进程/线性,True则是多线程/线性;
random_seeds: 每个进程的随机种子。
"""
if dummy:
from multiprocessing.dummy import Pool, Queue
else:
from multiprocessing import Pool, Queue
in_queue, out_queue, seed_queue = Queue(max_queue_size), Queue(), Queue()
if random_seeds is True:
random_seeds = [None] * workers
elif random_seeds is None or random_seeds is False:
random_seeds = []
for seed in random_seeds:
seed_queue.put(seed)
def worker_step(in_queue, out_queue):
"""单步函数包装成循环执行
"""
if not seed_queue.empty():
np.random.seed(seed_queue.get())
while True:
i, d = in_queue.get()
r = func(d)
out_queue.put((i, r))
# 启动多进程/线程
pool = Pool(workers, worker_step, (in_queue, out_queue))
if callback is None:
results = []
# 后处理函数
def process_out_queue():
out_count = 0
for _ in range(out_queue.qsize()):
i, d = out_queue.get()
out_count += 1
if callback is None:
results.append((i, d))
else:
callback(d)
return out_count
# 存入数据,取出结果
in_count, out_count = 0, 0
for i, d in enumerate(iterable):
in_count += 1
while True:
try:
in_queue.put((i, d), block=False)
break
except six.moves.queue.Full:
out_count += process_out_queue()
if in_count % max_queue_size == 0:
out_count += process_out_queue()
while out_count != in_count:
out_count += process_out_queue()
pool.terminate()
if callback is None:
results = sorted(results, key=lambda r: r[0])
return [r[1] for r in results]
def sequence_padding(inputs, length=None, value=0, seq_dims=1, mode='post'):
"""Numpy函数,将序列padding到同一长度
"""
if length is None:
length = np.max([np.shape(x)[:seq_dims] for x in inputs], axis=0)
elif not hasattr(length, '__getitem__'):
length = [length]
slices = [np.s_[:length[i]] for i in range(seq_dims)]
slices = tuple(slices) if len(slices) > 1 else slices[0]
pad_width = [(0, 0) for _ in np.shape(inputs[0])]
outputs = []
for x in inputs:
x = x[slices]
for i in range(seq_dims):
if mode == 'post':
pad_width[i] = (0, length[i] - np.shape(x)[i])
elif mode == 'pre':
pad_width[i] = (length[i] - np.shape(x)[i], 0)
else:
raise ValueError('"mode" argument must be "post" or "pre".')
x = np.pad(x, pad_width, 'constant', constant_values=value)
outputs.append(x)
return np.array(outputs)
def truncate_sequences(maxlen, indices, *sequences):
"""截断总长度至不超过maxlen
"""
sequences = [s for s in sequences if s]
if not isinstance(indices, (list, tuple)):
indices = [indices] * len(sequences)
while True:
lengths = [len(s) for s in sequences]
if sum(lengths) > maxlen:
i = np.argmax(lengths)
sequences[i].pop(indices[i])
else:
return sequences
def text_segmentate(text, maxlen, seps='\n', strips=None):
"""将文本按照标点符号划分为若干个短句
"""
text = text.strip().strip(strips)
if seps and len(text) > maxlen:
pieces = text.split(seps[0])
text, texts = '', []
for i, p in enumerate(pieces):
if text and p and len(text) + len(p) > maxlen - 1:
texts.extend(text_segmentate(text, maxlen, seps[1:], strips))
text = ''
if i + 1 == len(pieces):
text = text + p
else:
text = text + p + seps[0]
if text:
texts.extend(text_segmentate(text, maxlen, seps[1:], strips))
return texts
else:
return [text]
def is_one_of(x, ys):
"""判断x是否在ys之中
等价于x in ys,但有些情况下x in ys会报错
"""
for y in ys:
if x is y:
return True
return False
class DataGenerator(object):
"""数据生成器模版
"""
def __init__(self, data, batch_size=32, buffer_size=None):
self.data = data
self.batch_size = batch_size
if hasattr(self.data, '__len__'):
self.steps = len(self.data) // self.batch_size
if len(self.data) % self.batch_size != 0:
self.steps += 1
else:
self.steps = None
self.buffer_size = buffer_size or batch_size * 1000
def __len__(self):
return self.steps
def sample(self, random=False):
"""采样函数,每个样本同时返回一个is_end标记
"""
if random:
if self.steps is None:
def generator():
caches, isfull = [], False
for d in self.data:
caches.append(d)
if isfull:
i = np.random.randint(len(caches))
yield caches.pop(i)
elif len(caches) == self.buffer_size:
isfull = True
while caches:
i = np.random.randint(len(caches))
yield caches.pop(i)
else:
def generator():
for i in np.random.permutation(len(self.data)):
yield self.data[i]
data = generator()
else:
data = iter(self.data)
d_current = next(data)
for d_next in data:
yield False, d_current
d_current = d_next
yield True, d_current
def __iter__(self, random=False):
raise NotImplementedError
def forfit(self, random=True):
while True:
for d in self.__iter__(random):
yield d
def to_dataset(self, types, shapes, names=None, padded_batch=False):
"""转为tf.data.Dataset格式
如果传入names的话,自动把数据包装成dict形式。
"""
if names is None:
generator = self.forfit
else:
if is_string(names):
warps = lambda k, v: {k: v}
elif is_string(names[0]):
warps = lambda k, v: dict(zip(k, v))
else:
warps = lambda k, v: tuple(
dict(zip(i, j)) for i, j in zip(k, v)
)
def generator():
for d in self.forfit():
yield warps(names, d)
types = warps(names, types)
shapes = warps(names, shapes)
if padded_batch:
dataset = tf.data.Dataset.from_generator(
generator, output_types=types
)
dataset = dataset.padded_batch(self.batch_size, shapes)
else:
dataset = tf.data.Dataset.from_generator(
generator, output_types=types, output_shapes=shapes
)
dataset = dataset.batch(self.batch_size)
return dataset
class ViterbiDecoder(object):
"""Viterbi解码算法基类
"""
def __init__(self, trans, starts=None, ends=None):
self.trans = trans
self.num_labels = len(trans)
self.non_starts = []
self.non_ends = []
if starts is not None:
for i in range(self.num_labels):
if i not in starts:
self.non_starts.append(i)
if ends is not None:
for i in range(self.num_labels):
if i not in ends:
self.non_ends.append(i)
def decode(self, nodes):
"""nodes.shape=[seq_len, num_labels]
"""
# 预处理
nodes[0, self.non_starts] -= np.inf
nodes[-1, self.non_ends] -= np.inf
# 动态规划
labels = np.arange(self.num_labels).reshape((1, -1))
scores = nodes[0].reshape((-1, 1))
paths = labels
for l in range(1, len(nodes)):
M = scores + self.trans + nodes[l].reshape((1, -1))
idxs = M.argmax(0)
scores = M.max(0).reshape((-1, 1))
paths = np.concatenate([paths[:, idxs], labels], 0)
# 最优路径
return paths[:, scores[:, 0].argmax()]
def softmax(x, axis=-1):
"""numpy版softmax
"""
x = x - x.max(axis=axis, keepdims=True)
x = np.exp(x)
return x / x.sum(axis=axis, keepdims=True)
class AutoRegressiveDecoder(object):
"""通用自回归生成模型解码基类
包含beam search和random sample两种策略
"""
def __init__(self, start_id, end_id, maxlen, minlen=1):
self.start_id = start_id
self.end_id = end_id
self.maxlen = maxlen
self.minlen = minlen
self.models = {}
if start_id is None:
self.first_output_ids = np.empty((1, 0), dtype=int)
else:
self.first_output_ids = np.array([[self.start_id]])
@staticmethod
def wraps(default_rtype='probas', use_states=False):
"""用来进一步完善predict函数
目前包含:1. 设置rtype参数,并做相应处理;
2. 确定states的使用,并做相应处理;
3. 设置温度参数,并做相应处理。
"""
def actual_decorator(predict):
def new_predict(
self,
inputs,
output_ids,
states,
temperature=1,
rtype=default_rtype
):
assert rtype in ['probas', 'logits']
prediction = predict(self, inputs, output_ids, states)
if not use_states:
prediction = (prediction, None)
if default_rtype == 'logits':
prediction = (
softmax(prediction[0] / temperature), prediction[1]
)
elif temperature != 1:
probas = np.power(prediction[0], 1.0 / temperature)
probas = probas / probas.sum(axis=-1, keepdims=True)
prediction = (probas, prediction[1])
if rtype == 'probas':
return prediction
else:
return np.log(prediction[0] + 1e-12), prediction[1]
return new_predict
return actual_decorator
def last_token(self, model):
"""创建一个只返回最后一个token输出的新Model
"""
if model not in self.models:
outputs = [
keras.layers.Lambda(lambda x: x[:, -1])(output)
for output in model.outputs
]
self.models[model] = keras.models.Model(model.inputs, outputs)
return self.models[model]
def predict(self, inputs, output_ids, states=None):
"""用户需自定义递归预测函数
说明:定义的时候,需要用wraps方法进行装饰,传入default_rtype和use_states,
其中default_rtype为字符串logits或probas,probas时返回归一化的概率,
rtype=logits时则返回softmax前的结果或者概率对数。
返回:二元组 (得分或概率, states)
"""
raise NotImplementedError
def beam_search(self, inputs, topk, states=None, temperature=1, min_ends=1):
"""beam search解码
说明:这里的topk即beam size;
返回:最优解码序列。
"""
inputs = [np.array([i]) for i in inputs]
output_ids, output_scores = self.first_output_ids, np.zeros(1)
for step in range(self.maxlen):
scores, states = self.predict(
inputs, output_ids, states, temperature, 'logits'
) # 计算当前得分
if step == 0: # 第1步预测后将输入重复topk次
inputs = [np.repeat(i, topk, axis=0) for i in inputs]
scores = output_scores.reshape((-1, 1)) + scores # 综合累积得分
indices = scores.argpartition(-topk, axis=None)[-topk:] # 仅保留topk
indices_1 = indices // scores.shape[1] # 行索引
indices_2 = (indices % scores.shape[1]).reshape((-1, 1)) # 列索引
output_ids = np.concatenate([output_ids[indices_1], indices_2],
1) # 更新输出
output_scores = np.take_along_axis(
scores, indices, axis=None
) # 更新得分
is_end = output_ids[:, -1] == self.end_id # 标记是否以end标记结束
end_counts = (output_ids == self.end_id).sum(1) # 统计出现的end标记
if output_ids.shape[1] >= self.minlen: # 最短长度判断
best = output_scores.argmax() # 得分最大的那个
if is_end[best] and end_counts[best] >= min_ends: # 如果已经终止
return output_ids[best] # 直接输出
else: # 否则,只保留未完成部分
flag = ~is_end | (end_counts < min_ends) # 标记未完成序列
if not flag.all(): # 如果有已完成的
inputs = [i[flag] for i in inputs] # 扔掉已完成序列
output_ids = output_ids[flag] # 扔掉已完成序列
output_scores = output_scores[flag] # 扔掉已完成序列
end_counts = end_counts[flag] # 扔掉已完成end计数
topk = flag.sum() # topk相应变化
# 达到长度直接输出
return output_ids[output_scores.argmax()]
def random_sample(
self,
inputs,
n,
topk=None,
topp=None,
states=None,
temperature=1,
min_ends=1
):
"""随机采样n个结果
说明:非None的topk表示每一步只从概率最高的topk个中采样;而非None的topp
表示每一步只从概率最高的且概率之和刚好达到topp的若干个token中采样。
返回:n个解码序列组成的list。
"""
inputs = [np.array([i]) for i in inputs]
output_ids = self.first_output_ids
results = []
for step in range(self.maxlen):
probas, states = self.predict(
inputs, output_ids, states, temperature, 'probas'
) # 计算当前概率
probas /= probas.sum(axis=1, keepdims=True) # 确保归一化
if step == 0: # 第1步预测后将结果重复n次
probas = np.repeat(probas, n, axis=0)
inputs = [np.repeat(i, n, axis=0) for i in inputs]
output_ids = np.repeat(output_ids, n, axis=0)
if topk is not None:
k_indices = probas.argpartition(-topk,
axis=1)[:, -topk:] # 仅保留topk
probas = np.take_along_axis(probas, k_indices, axis=1) # topk概率
probas /= probas.sum(axis=1, keepdims=True) # 重新归一化
if topp is not None:
p_indices = probas.argsort(axis=1)[:, ::-1] # 从高到低排序
probas = np.take_along_axis(probas, p_indices, axis=1) # 排序概率
cumsum_probas = np.cumsum(probas, axis=1) # 累积概率
flag = np.roll(cumsum_probas >= topp, 1, axis=1) # 标记超过topp的部分
flag[:, 0] = False # 结合上面的np.roll,实现平移一位的效果
probas[flag] = 0 # 后面的全部置零
probas /= probas.sum(axis=1, keepdims=True) # 重新归一化
sample_func = lambda p: np.random.choice(len(p), p=p) # 按概率采样函数
sample_ids = np.apply_along_axis(sample_func, 1, probas) # 执行采样
sample_ids = sample_ids.reshape((-1, 1)) # 对齐形状
if topp is not None:
sample_ids = np.take_along_axis(
p_indices, sample_ids, axis=1
) # 对齐原id
if topk is not None:
sample_ids = np.take_along_axis(
k_indices, sample_ids, axis=1
) # 对齐原id
output_ids = np.concatenate([output_ids, sample_ids], 1) # 更新输出
is_end = output_ids[:, -1] == self.end_id # 标记是否以end标记结束
end_counts = (output_ids == self.end_id).sum(1) # 统计出现的end标记
if output_ids.shape[1] >= self.minlen: # 最短长度判断
flag = is_end & (end_counts >= min_ends) # 标记已完成序列
if flag.any(): # 如果有已完成的
for ids in output_ids[flag]: # 存好已完成序列
results.append(ids)
flag = (flag == False) # 标记未完成序列
inputs = [i[flag] for i in inputs] # 只保留未完成部分输入
output_ids = output_ids[flag] # 只保留未完成部分候选集
end_counts = end_counts[flag] # 只保留未完成部分end计数
if len(output_ids) == 0:
break
# 如果还有未完成序列,直接放入结果
for ids in output_ids:
results.append(ids)
# 返回结果
return results
def insert_arguments(**arguments):
"""装饰器,为类方法增加参数
(主要用于类的__init__方法)
"""
def actual_decorator(func):
def new_func(self, *args, **kwargs):
for k, v in arguments.items():
if k in kwargs:
v = kwargs.pop(k)
setattr(self, k, v)
return func(self, *args, **kwargs)
return new_func
return actual_decorator
def delete_arguments(*arguments):
"""装饰器,为类方法删除参数
(主要用于类的__init__方法)
"""
def actual_decorator(func):
def new_func(self, *args, **kwargs):
for k in arguments:
if k in kwargs:
raise TypeError(
'%s got an unexpected keyword argument \'%s\'' %
(self.__class__.__name__, k)
)
return func(self, *args, **kwargs)
return new_func
return actual_decorator
def longest_common_substring(source, target):
"""最长公共子串(source和target的最长公共切片区间)
返回:子串长度, 所在区间(四元组)
注意:最长公共子串可能不止一个,所返回的区间只代表其中一个。
"""
c, l, span = defaultdict(int), 0, (0, 0, 0, 0)
for i, si in enumerate(source, 1):
for j, tj in enumerate(target, 1):
if si == tj:
c[i, j] = c[i - 1, j - 1] + 1
if c[i, j] > l:
l = c[i, j]
span = (i - l, i, j - l, j)
return l, span
def longest_common_subsequence(source, target):
"""最长公共子序列(source和target的最长非连续子序列)
返回:子序列长度, 映射关系(映射对组成的list)
注意:最长公共子序列可能不止一个,所返回的映射只代表其中一个。
"""
c = defaultdict(int)
for i, si in enumerate(source, 1):
for j, tj in enumerate(target, 1):
if si == tj:
c[i, j] = c[i - 1, j - 1] + 1
elif c[i, j - 1] > c[i - 1, j]:
c[i, j] = c[i, j - 1]
else:
c[i, j] = c[i - 1, j]
l, mapping = c[len(source), len(target)], []
i, j = len(source) - 1, len(target) - 1
while len(mapping) < l:
if source[i] == target[j]:
mapping.append((i, j))
i, j = i - 1, j - 1
elif c[i + 1, j] > c[i, j + 1]:
j = j - 1
else:
i = i - 1
return l, mapping[::-1]
def orthogonally_resize(a, new_shape, window=2):
"""简单的正交化缩放矩阵
"""
assert a.ndim == len(new_shape)
slices, a_norm, w = [], np.linalg.norm(a), window
for i, (d1, d2) in enumerate(zip(a.shape, new_shape)):
if d1 != d2:
k = d2 // d1 + int(d2 % d1 != 0)
if k > 1:
assert d1 % w == 0
a = a.reshape(a.shape[:i] + (d1 // w, w) + a.shape[i + 1:])
a = np.repeat(a, k, axis=i)
a = a.reshape(a.shape[:i] + (d1 * k,) + a.shape[i + 2:])
slices.append(np.s_[:d2])
a = a[tuple(slices)]
return a / np.linalg.norm(a) * a_norm
class WebServing(object):
"""简单的Web接口
用法:
arguments = {'text': (None, True), 'n': (int, False)}
web = WebServing(port=8864)
web.route('/gen_synonyms', gen_synonyms, arguments)
web.start()
# 然后访问 http://127.0.0.1:8864/gen_synonyms?text=你好
说明:
基于bottlepy简单封装,仅作为临时测试使用,不保证性能。
目前仅保证支持 Tensorflow 1.x + Keras <= 2.3.1。
欢迎有经验的开发者帮忙改进。
依赖:
pip install bottle
pip install paste
(如果不用 server='paste' 的话,可以不装paste库)
"""
def __init__(self, host='0.0.0.0', port=8000, server='paste'):
import bottle
self.host = host
self.port = port
self.server = server
self.graph = tf.get_default_graph()
self.sess = K.get_session()
self.set_session = K.set_session
self.bottle = bottle
def wraps(self, func, arguments, method='GET'):
"""封装为接口函数
参数:
func:要转换为接口的函数,需要保证输出可以json化,即需要
保证 json.dumps(func(inputs)) 能被执行成功;
arguments:声明func所需参数,其中key为参数名,value[0]为
对应的转换函数(接口获取到的参数值都是字符串
型),value[1]为该参数是否必须;
method:GET或者POST。
"""
def new_func():
outputs = {'code': 0, 'desc': u'succeeded', 'data': {}}
kwargs = {}
for key, value in arguments.items():
if method == 'GET':
result = self.bottle.request.GET.getunicode(key)
else:
result = self.bottle.request.POST.getunicode(key)
if result is None:
if value[1]:
outputs['code'] = 1
outputs['desc'] = 'lack of "%s" argument' % key
return json.dumps(outputs, ensure_ascii=False)
else:
if value[0] is not None:
result = value[0](result)
kwargs[key] = result
try:
with self.graph.as_default():
self.set_session(self.sess)
outputs['data'] = func(**kwargs)
except Exception as e:
outputs['code'] = 2
outputs['desc'] = str(e)
return json.dumps(outputs, ensure_ascii=False)
return new_func
def route(self, path, func, arguments, method='GET'):
"""添加接口
"""
func = self.wraps(func, arguments, method)
self.bottle.route(path, method=method)(func)
def start(self):
"""启动服务
"""
self.bottle.run(host=self.host, port=self.port, server=self.server)
class Hook:
"""注入uniout模块,实现import时才触发
"""
def __init__(self, module):
self.module = module
def __getattr__(self, attr):
"""使得 from bert4keras.backend import uniout
等效于 import uniout (自动识别Python版本,Python3
下则无操作。)
"""
if attr == 'uniout':
if is_py2:
import uniout
else:
return getattr(self.module, attr)
Hook.__name__ = __name__
sys.modules[__name__] = Hook(sys.modules[__name__])
del Hook
| 31.09122
| 80
| 0.523233
|
e21b4ed8dd7e78acf1562b401de08ecabc573262
| 3,097
|
py
|
Python
|
Connect-four/main.py
|
MehnoushFaghani/Ripples--PCP
|
e5e14f1c333f7b4b4935d71caf6c48089870b0f7
|
[
"MIT"
] | null | null | null |
Connect-four/main.py
|
MehnoushFaghani/Ripples--PCP
|
e5e14f1c333f7b4b4935d71caf6c48089870b0f7
|
[
"MIT"
] | null | null | null |
Connect-four/main.py
|
MehnoushFaghani/Ripples--PCP
|
e5e14f1c333f7b4b4935d71caf6c48089870b0f7
|
[
"MIT"
] | null | null | null |
# "By getting help of Konstantine Tsafatinos"
import numpy as np
from typing import Optional, Callable
from agents.common import PlayerAction, BoardPiece, SavedState, GenMove
import timeit
# from agents.agent_minimax.minimax import generate_move_minimax
# from agents.agent_random.random import generate_move_random
from agents.agent_MCTS.MCTS import generate_move_MCTS
def user_move(board: np.ndarray, _player: BoardPiece, saved_state: Optional[SavedState]):
action = PlayerAction(-1)
while not 0 <= action < board.shape[1]:
try:
action = PlayerAction(input("Column? "))
except:
pass
return action, saved_state
def human_vs_agent(
generate_move_1: GenMove,
generate_move_2: GenMove = user_move,
player_1: str = "Player 1",
player_2: str = "Player 2",
args_1: tuple = (),
args_2: tuple = (),
init_1: Callable = lambda board, player: None,
init_2: Callable = lambda board, player: None,
):
import time
from agents.common import PLAYER1, PLAYER2, GameState
from agents.common import initialize_game_state, pretty_print_board, apply_player_action, check_end_state
players = (PLAYER1, PLAYER2)
for play_first in (1, -1):
for init, player in zip((init_1, init_2)[::play_first], players):
init(initialize_game_state(), player)
saved_state = {PLAYER1: None, PLAYER2: None}
board = initialize_game_state()
gen_moves = (generate_move_1, generate_move_2)[::play_first]
player_names = (player_1, player_2)[::play_first]
gen_args = (args_1, args_2)[::play_first]
playing = True
while playing:
for player, player_name, gen_move, args in zip(
players, player_names, gen_moves, gen_args,
):
t0 = time.time()
print(pretty_print_board(board))
print(
f'{player_name} you are playing with {"X" if player == PLAYER1 else "O"}'
)
action, saved_state[player] = gen_move(
board.copy(), player, saved_state[player], *args
)
print(f"Move time: {time.time() - t0:.3f}s")
board, r_board = apply_player_action(board, action,
player, True)
end_state = check_end_state(board, player)
if end_state != GameState.STILL_PLAYING:
print(pretty_print_board(board))
if end_state == GameState.IS_DRAW:
print("Game ended in draw")
else:
print(
f'{player_name} won playing {"X" if player == PLAYER1 else "O"}'
)
playing = False
break
if __name__ == "__main__":
# human_vs_agent(user_move)
# human_vs_agent(generate_move_random)
# human_vs_agent(generate_move_minimax)
human_vs_agent(generate_move_MCTS)
| 38.234568
| 109
| 0.589603
|
2d323c9357993ceb4e5ff8af8789220beff166c7
| 3,327
|
py
|
Python
|
command_history_wordcloud/command_history_wordcloud.py
|
hatappo/command_history_wordcloud
|
b888aa990ecf66e44260efb622ba6b4e67b2879f
|
[
"MIT"
] | 3
|
2017-03-30T08:46:03.000Z
|
2020-06-09T15:34:52.000Z
|
command_history_wordcloud/command_history_wordcloud.py
|
hatappo/command_history_wordcloud
|
b888aa990ecf66e44260efb622ba6b4e67b2879f
|
[
"MIT"
] | 5
|
2019-06-04T07:59:51.000Z
|
2022-03-11T23:18:28.000Z
|
command_history_wordcloud/command_history_wordcloud.py
|
hatappo/command_history_wordcloud
|
b888aa990ecf66e44260efb622ba6b4e67b2879f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re, codecs, argparse
from pprint import pformat
from os import path
from subprocess import Popen, PIPE, STDOUT
from logging import getLogger, StreamHandler, Formatter, DEBUG
from wordcloud import WordCloud
####################
# Initialization
####################
logger = getLogger(__name__)
handler = StreamHandler()
handler.setLevel(DEBUG)
formatter = Formatter('%(asctime)s %(levelname)7s [%(name)s] %(message)s')
handler.setFormatter(formatter)
logger.setLevel(DEBUG)
logger.addHandler(handler)
####################
# Constants
####################
command_pattern = re.compile("[\s;=<].+$")
####################
# Functions
####################
def create_history_frequencies():
home = path.expanduser('~')
logger.debug("user home path = '%s'" % home)
shell_byte = Popen("echo $SHELL", shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT).communicate()[0]
shell_path = shell_byte.decode("utf-8").strip()
shell_name = shell_path.rsplit("/", 1)[-1]
logger.debug("shell path = '%s'" % shell_path)
logger.debug("shell name = '%s'" % shell_name)
words = {}
if shell_name in ["bash", "sh", "ksh"]:
if shell_name in ["ksh"]: filepath = home + "/.sh_history"
elif shell_name in ["bash", "sh"]: filepath = home + "/.bash_history"
else: raise Exception()
with codecs.open(filepath, "r", encoding='utf-8', errors='ignore') as f:
for line in f:
word = command_pattern.sub("", line).strip()
words[word] = words.get(word, 0) + 1
elif shell_name in ["zsh"]:
with codecs.open(home + "/.zsh_history", "r", encoding='utf-8', errors='ignore') as f:
for line in f:
parts = line.split(";", 1)
if len(parts) < 2: continue
word = command_pattern.sub("", parts[1]).strip()
words[word] = words.get(word, 0) + 1
elif shell_name in ["csh"]:
logger.warning("Not implemented!") # TODO:
else:
raise Exception("Unknown shell : '%1'" % shell)
return tuple(words.items())
def load_stop_words(path):
if path == None: return set()
with codecs.open(path, "r", encoding='utf-8', errors='ignore') as f:
return set(f.read().split())
def create_wordcloud(frequencies, stop_words):
if len(frequencies) == 0: Exception("No history is found.")
logger.debug("word frequencies count = %s" % len(frequencies))
logger.debug("stop words = %s" % pformat(stop_words))
wordcloud = WordCloud(background_color="black", width=900, height=600, stopwords=stop_words).generate_from_frequencies(frequencies)
image = wordcloud.to_image()
image.show()
def main():
# Command line args
parser = argparse.ArgumentParser(description="Parse and count your command line history, and generates a word-cloud image from that.")
parser.add_argument("-s,", "--stop_words", nargs="?", help="File path that has words you don't want to use. The words must be separated by space or LineFeed.")
args = parser.parse_args()
stop_words_file = args.stop_words
logger.debug("stop words file = '%s'" % stop_words_file)
# main
logger.info("Started %s." % path.basename(__file__))
words = create_history_frequencies()
stop_words = load_stop_words(stop_words_file)
create_wordcloud(words, stop_words)
logger.info("Finished %s." % path.basename(__file__))
####################
# Main
####################
if __name__ == '__main__':
main()
| 32.300971
| 160
| 0.662158
|
34db83f3867a1a73267441fe711d649a4fdb8259
| 2,239
|
py
|
Python
|
tools/giws/datatypes/IntBufferDataGiws.py
|
sguazt/dcsxx-testbed
|
e7210f0c7f54256d5bf0c90297e0c4f9eaf82da0
|
[
"Apache-2.0"
] | null | null | null |
tools/giws/datatypes/IntBufferDataGiws.py
|
sguazt/dcsxx-testbed
|
e7210f0c7f54256d5bf0c90297e0c4f9eaf82da0
|
[
"Apache-2.0"
] | null | null | null |
tools/giws/datatypes/IntBufferDataGiws.py
|
sguazt/dcsxx-testbed
|
e7210f0c7f54256d5bf0c90297e0c4f9eaf82da0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python -u
# Copyright or Copr. INRIA/Scilab - Sylvestre LEDRU
#
# Sylvestre LEDRU - <sylvestre.ledru@scilab.org> <sylvestre@ledru.info>
#
# This software is a computer program whose purpose is to generate C++ wrapper
# for Java objects/methods.
#
# This software is governed by the CeCILL license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL
# license as circulated by CEA, CNRS and INRIA at the following URL
# "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
#
# For more information, see the file COPYING
from dataBufferGiws import dataBufferGiws
from configGiws import configGiws
from JNIFrameWork import JNIFrameWork
class IntBufferDataGiws(dataBufferGiws):
__isArray=True
nativeType="int"
def getTypeSignature(self):
return "Ljava/nio/IntBuffer;"
def getJavaTypeSyntax(self):
return "jobject"
def getRealJavaType(self):
return "java.lang.IntBuffer"
def getDescription(self):
return "Java IntBuffer"
def getJavaBufferType(self):
return "IntBuffer"
| 37.316667
| 79
| 0.769093
|
7d5f8a23d8cd9daffc8406367dbf8e0e85e4aca9
| 3,380
|
py
|
Python
|
test_block.py
|
mjoniak/adder
|
c84cb4927b91e6cd7801677ed683795a2026b2a6
|
[
"MIT"
] | null | null | null |
test_block.py
|
mjoniak/adder
|
c84cb4927b91e6cd7801677ed683795a2026b2a6
|
[
"MIT"
] | 5
|
2018-02-07T10:24:51.000Z
|
2018-02-07T10:25:54.000Z
|
test_block.py
|
mjoniak/adder
|
c84cb4927b91e6cd7801677ed683795a2026b2a6
|
[
"MIT"
] | null | null | null |
from collections import namedtuple
from unittest import TestCase
from block import Block, source, negation, alternative, conjunction, nand, nor, xor
Point = namedtuple('Point', ['x', 'y'])
class ConnectorTest(TestCase):
def test_inside_connector_no_connectors(self):
block = Block(0, 0, input_connectors_n=0, output_connectors_n=0)
conn = block.inside_connector(Point(0, 0))
self.assertIsNone(conn)
def test_inside_connector_input(self):
block = Block(0, 0, input_connectors_n=1, output_connectors_n=0)
left_middle = Point(0, Block.HEIGHT / 2)
conn = block.inside_connector(left_middle)
self.assertTrue(conn.contains(left_middle))
def test_inside_connector_output(self):
block = source()
right_middle = Point(Block.WIDTH, Block.HEIGHT / 2)
conn = block.inside_connector(right_middle)
self.assertTrue(conn.contains(right_middle))
def test_source_block_pushes_signal(self):
source_block = source()
source_block.push()
self.assertEqual(1, source_block.outputs[0].value)
def test_connector_pushes_to_next(self):
source_block = source()
second_block = Block(0, 0, input_connectors_n=1, output_connectors_n=0)
source_block.outputs[0].connect_with(second_block.inputs[0])
source_block.push()
for c2 in second_block.inputs:
self.assertEqual(1, c2.value)
def test_negation(self):
source_block = source(0)
not_block = negation()
source_block.outputs[0].connect_with(not_block.inputs[0])
source_block.push()
not_block.push()
self.assertEqual(1, not_block.outputs[0].value)
def test_alternative(self):
self.assertEqual(0, test_block(0, 0, alternative))
self.assertEqual(1, test_block(1, 0, alternative))
self.assertEqual(1, test_block(0, 1, alternative))
self.assertEqual(1, test_block(1, 1, alternative))
def test_conjunction(self):
self.assertEqual(0, test_block(0, 0, conjunction))
self.assertEqual(0, test_block(1, 0, conjunction))
self.assertEqual(0, test_block(0, 1, conjunction))
self.assertEqual(1, test_block(1, 1, conjunction))
def test_nand(self):
self.assertEqual(1, test_block(0, 0, nand))
self.assertEqual(1, test_block(1, 0, nand))
self.assertEqual(1, test_block(0, 1, nand))
self.assertEqual(0, test_block(1, 1, nand))
def test_nor(self):
self.assertEqual(1, test_block(0, 0, nor))
self.assertEqual(0, test_block(1, 0, nor))
self.assertEqual(0, test_block(0, 1, nor))
self.assertEqual(0, test_block(1, 1, nor))
def test_xor(self):
self.assertEqual(0, test_block(0, 0, xor))
self.assertEqual(1, test_block(1, 0, xor))
self.assertEqual(1, test_block(0, 1, xor))
self.assertEqual(0, test_block(1, 1, xor))
def test_block(first_input, second_input, block_factory) -> int:
first_source_block = source(value=first_input)
second_source_block = source(value=second_input)
block = block_factory()
first_source_block.outputs[0].connect_with(block.inputs[0])
second_source_block.outputs[0].connect_with(block.inputs[1])
first_source_block.push()
second_source_block.push()
block.push()
return block.outputs[0].value
| 36.73913
| 83
| 0.676036
|
53c6f49e002caa6233ee8f981d8c685cdef9cc6c
| 14,189
|
py
|
Python
|
lib/membase/api/esrest_client.py
|
ramalingam-cb/testrunner
|
81cea7a5a493cf0c67fca7f97c667cd3c6ad2142
|
[
"Apache-2.0"
] | null | null | null |
lib/membase/api/esrest_client.py
|
ramalingam-cb/testrunner
|
81cea7a5a493cf0c67fca7f97c667cd3c6ad2142
|
[
"Apache-2.0"
] | null | null | null |
lib/membase/api/esrest_client.py
|
ramalingam-cb/testrunner
|
81cea7a5a493cf0c67fca7f97c667cd3c6ad2142
|
[
"Apache-2.0"
] | null | null | null |
from membase.api.rest_client import RestConnection, Bucket, BucketStats, OtpNode, Node
from remote.remote_util import RemoteMachineShellConnection
from TestInput import TestInputSingleton
from pyes import ES, managers, query
import logger
import time
import requests
log = logger.Logger.get_logger()
# EsRestConnection: subclasses RestConnection for use against elastic-search nodes.
# Instance created by membase.api.rest_client.RestConnection
# when elastic-search endpoint is detected so it is not necessary to
# directly import this module into tests
class EsRestConnection(RestConnection):
def __init__(self, serverInfo, proto = "http"):
#serverInfo can be a json object
#only connect pyes to master es node
#in the case that other nodes are taken down
#because http requests will fail
# TODO: dynamic master node detection
if isinstance(serverInfo, dict):
self.ip = serverInfo["ip"]
self.rest_username = serverInfo["username"]
self.rest_password = serverInfo["password"]
self.username = serverInfo["es_username"]
self.password = serverInfo["es_password"]
self.port = 9091 #serverInfo["port"]
else:
self.ip = serverInfo.ip
self.rest_username = serverInfo.rest_username
self.rest_password = serverInfo.rest_password
self.username = serverInfo.es_username
self.password = serverInfo.es_password
self.port = 9091 # serverInfo.port
self.baseUrl = "http://{0}:{1}/".format(self.ip, self.port)
self.capiBaseUrl = self.baseUrl
self.esHttpUrl = "http://{0}:9200".format(self.ip)
self.http_port = str(int(self.port) + 109)
self.proto = proto
self.conn = ES(server=self.esHttpUrl)
self.manager = managers.Cluster(self.conn)
self.test_params = TestInputSingleton.input
self.docs = None
def get_index_stats(self):
return ES.index_stats()
def get_indices(self):
schema = self.conn.indices.get_mapping()
indices_full_list = schema.get_all_indices()
just_indices = [index for index in indices_full_list if not index.startswith(".")]
return just_indices
def get_indices_as_buckets(self, doc_type='couchbaseDocument'):
buckets = []
indices = self.get_indices()
for index in indices:
bucket = Bucket()
q = query.MatchAllQuery()
docs = self.conn.search(q,index,doc_type)
bucket.name = index
bucket.type = "es"
bucket.port = self.port
bucket.authType = None
bucket.saslPassword = self.password
bucket.nodes = list()
#vBucketServerMap
bucketStats = BucketStats()
bucketStats.itemCount = docs.count()
bucket.stats = bucketStats
buckets.append(bucket)
bucket.master_id = "es@"+self.ip
return buckets
def get_bucket(self, bucket_name, doc_type='couchbaseDocument'):
for bucket in self.get_indices_as_buckets(doc_type):
if bucket.name == bucket_name:
return bucket
return
def get_buckets(self):
return self.get_indices_as_buckets()
def delete_index(self, name):
self.conn.indices.delete_index(name)
return self.conn.indices.exists_index(name)
def create_index(self, name):
if self.conn.indices.exists_index(name):
self.delete_index(name)
self.conn.indices.create_index(name)
return self.conn.indices.exists_index(name)
def delete_bucket(self, name):
return self.delete_index(name)
def create_bucket(self, *args, **kwargs):
name = 'default'
if len(args) > 0:
name = args[0]
else:
name = kwargs['bucket']
return self.create_index(name)
def is_ns_server_running(self, timeout_in_seconds=360):
return True
def node_statuses(self, timeout=120):
otp_nodes = []
for node in self.get_nodes():
#get otp,get status
otp_node = OtpNode(id=node.id,
status=node.status)
otp_node.ip = node.ip
otp_node.port = node.port
otp_node.replication = None
otp_nodes.append(node)
return otp_nodes
def get_nodes_self(self, timeout=120):
for node in self.get_nodes():
# force to return master node
if node.port == 9091:
return node
return
def get_nodes(self):
es_nodes = []
nodes = self.manager.state()['nodes']
status = self.manager.health()['status']
if status == "green":
status = "healthy"
for node_key in nodes:
nodeInfo = nodes[node_key]
ex_params = self.get_node_params(nodeInfo)
nodeInfo.update({'ssh_password' : ex_params.ssh_password,
'ssh_username' : ex_params.ssh_username})
nodeInfo['key'] = node_key
node = ESNode(nodeInfo)
node.status = status
es_nodes.append(node)
return es_nodes
def get_node_params(self, info):
ip, port = parse_addr(info["transport_address"])
clusters = self.test_params.clusters
master_node = None
for _id in clusters:
for node in clusters[_id]:
if node.ip == ip and int(node.port) == port:
return node
if int(node.port) == 9091:
master_node = node
# use params from master node
return master_node
def search_term(self, key, indices=["default"]):
result = None
params = {"term":{"_id":key}}
query = ES.Search(params)
row = self.conn.search(query, indices = indices)
if row.total > 0:
result = row[0]
return result
def term_exists(self, key, indices=["default"]):
return self.search_term(key, indices = indices) is not None
def all_docs(self, keys_only = False, indices=["default"],size=10000):
q = query.MatchAllQuery()
docs = self.conn.search(q,indices=indices,doc_types='couchbaseDocument')
res_docs = []
for row in docs:
if keys_only:
row = row['meta']['id']
res_docs.append(row)
return res_docs
# check if a key exists by checking all known nodes
# See - CBES-17
# for use when it seems nodes are out of sync
def search_all_nodes(self, key, indices=["default"]):
doc = None
for index in indices:
for _node in self.get_nodes():
ip, port = (_node.ip, _node.ht_port)
r = requests.get('http://%s:%s/%s/couchbaseDocument/%s?preference=_only_node:%s' %\
(ip, port, index, key, _node.key))
if r.status_code == 200 :
if r.json()['_id'] == key:
doc = r.json()
break
return doc
def fetch_bucket_stats(self, bucket_name='default'):
bucket = self.get_bucket(bucket_name=bucket_name)
return bucket.stats
def start_replication(self, *args, **kwargs):
return "es",self.ip
def _rebalance_progress(self, *args, **kwargs):
return 100
def _rebalance_progress_status(self, *args, **kwargs):
return 'not running'
def get_vbuckets(self, *args, **kwargs):
return ()
def replace_template(self, node, file):
f = open(file, 'r')
template = f.read().replace('\n', ' ')
api = "http://{0}:9200/_template/couchbase".format(node.ip)
status, content, header = self._http_request(api, 'PUT', template)
if status:
log.info('uploaded couchbase template: '+file)
else:
log.error('template upload failed: {0}'.format(content))
def add_node(self, user='', password='', remoteIp='', port='8091',zone_name='', services=None):
pass
def update_configuration(self, node, commands):
rmc = RemoteMachineShellConnection(node)
shell = rmc._ssh_client.invoke_shell()
for command in commands:
log.info('Adding elastic search config {0} on node {1}'.format(command, self.ip))
shell.send('echo "{0}" >> ~/elasticsearch/config/elasticsearch.yml \n'.format(command))
while not shell.recv_ready():
time.sleep(2)
rc = shell.recv(1024)
log.info(rc)
def reset_configuration(self, node, count=1):
rmc = RemoteMachineShellConnection(node)
shell = rmc._ssh_client.invoke_shell()
log.info('Removing last {0} lines from elastic search config on node {1}'.format(count, self.ip))
shell.send('head -n -{0} ~/elasticsearch/config/elasticsearch.yml > temp ; mv temp ~/elasticsearch/config/elasticsearch.yml \n'.format(count))
while not shell.recv_ready():
time.sleep(2)
rc = shell.recv(1024)
log.info(rc)
def start_es_node(self, node):
rmc = RemoteMachineShellConnection(node)
shell=rmc._ssh_client.invoke_shell()
es_kill = "pkill -f elasticsearch;"
shell.send(es_kill+' \n')
while not shell.recv_ready():
time.sleep(2)
rc = shell.recv(1024)
log.info(rc)
log.info("Sleep for 30 seconds")
time.sleep(30)
# define es exec path if not in $PATH environment
es_bin = "~/elasticsearch/bin/elasticsearch -Dtransport.couchbase=TRACE -Dcom.couchbase=TRACE > /var/log/es.log 2>&1 &"
if 'es_bin' in TestInputSingleton.input.test_params:
es_bin = TestInputSingleton.input.test_params['es_bin']
# connect to remote node
log.info('Starting node: %s:%s' % (node.ip, node.port))
# start es service
shell.send(es_bin+' \n')
while not shell.recv_ready():
time.sleep(2)
rc = shell.recv(1024)
log.info(rc)
log.info("Sleep for 5 seconds before the node can appear")
time.sleep(5)
# wait for new node
tries = 0
while tries < 10:
for cluster_node in self.get_nodes():
if cluster_node.ip == node.ip and cluster_node.port == int(node.port):
return
else:
log.info('Waiting for new node to appear')
time.sleep(5)
tries = tries + 1
raise Exception("failed to add node to cluster: %s:%s" % (node.ip,node.port))
def log_client_error(self, post):
# cannot post req errors to 9091
pass
def vbucket_map_ready(self, *args, **kwargs):
return True
def init_cluster(self, *args, **kwargs):
pass
def init_cluster_memoryQuota(self, *args, **kwargs):
pass
def set_reb_cons_view(self, *args, **kwargs):
pass
def set_reb_index_waiting(self, *args, **kwargs):
pass
def set_rebalance_index_pausing(self, *args, **kwargs):
pass
def set_max_parallel_indexers(self, *args, **kwargs):
pass
def set_max_parallel_replica_indexers(self, *args, **kwargs):
pass
def log_client_error(self, post):
# cannot post req errors to 9091
pass
def vbucket_map_ready(self, *args, **kwargs):
return True
def init_cluster(self, *args, **kwargs):
pass
def init_cluster_memoryQuota(self, *args, **kwargs):
pass
def set_reb_cons_view(self, *args, **kwargs):
pass
def set_reb_index_waiting(self, *args, **kwargs):
pass
def set_rebalance_index_pausing(self, *args, **kwargs):
pass
def set_max_parallel_indexers(self, *args, **kwargs):
pass
def set_max_parallel_replica_indexers(self, *args, **kwargs):
pass
def rebalance(self, otpNodes, ejectedNodes):
# shutdown ejected nodes
# wait for shards to be rebalanced
nodesToShutdown = \
[node for node in self.get_nodes() if node.id in ejectedNodes]
for node in nodesToShutdown:
self.eject_node(node)
def eject_node(self, node):
api = "http://%s:9200/_cluster/nodes/local/_shutdown?delay=0s" % (node.ip)
status, content, header = self._http_request(api, 'POST', '')
if status:
log.info('ejected node: '+node.ip)
else:
log.error('rebalance operation failed: {0}'.format(content))
def monitorRebalance(self, stop_if_loop=False):
# since removed nodes are shutdown use master node for monitoring
return self.get_nodes_self()
def get_pools_info(self):
return {'pools' : []}
def add_remote_cluster(self, *args, **kwargs):
# detect 2:1 mapping and do spectial cluster add
# otherwise run super method
pass
def remove_all_remote_clusters(self):
pass
def remove_all_replications(self):
pass
def is_cluster_mixed(self):
return False
def set_internalSetting(self, param, value):
return {'ok' : True}
def parse_addr(addr):
ip = addr[addr.rfind('/')+1:addr.rfind(':')]
port = addr[addr.rfind(':')+1:-1]
return str(ip), int(port)
class ESNode(Node):
def __init__(self, info):
super(ESNode, self).__init__()
self.key = str(info['key'])
self.ip, self.port = parse_addr(info["transport_address"])
self.tr_ip, self.tr_port = parse_addr(info["transport_address"])
self.port = 9091
if 'http_address' in info:
self.ht_ip, self.ht_port = parse_addr(info["http_address"])
# truncate after space, or comma
name = str(info['name'][:info['name'].find(' ')])
name = name[:name.find(',')]
self.id = "es_%s@%s" % (name, self.ip)
self.ssh_username = info['ssh_username']
self.ssh_password = info['ssh_password']
self.ssh_key = ''
| 32.10181
| 152
| 0.597717
|
688d84a74f969b1c00ebf03eca6f3c6c8a4d6970
| 8,785
|
py
|
Python
|
localstack/utils/aws/aws_models.py
|
OwnZones/localstack
|
490408953e2a32d8402a997ea7eb36568c983b7a
|
[
"Apache-2.0"
] | null | null | null |
localstack/utils/aws/aws_models.py
|
OwnZones/localstack
|
490408953e2a32d8402a997ea7eb36568c983b7a
|
[
"Apache-2.0"
] | 1
|
2019-07-01T12:08:26.000Z
|
2019-07-01T12:08:26.000Z
|
localstack/utils/aws/aws_models.py
|
OwnZones/localstack
|
490408953e2a32d8402a997ea7eb36568c983b7a
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import time
import json
import six
if six.PY3:
long = int
class Component(object):
def __init__(self, id, env=None):
self.id = id
self.env = env
self.created_at = None
def name(self):
return self.id
def __repr__(self):
return self.__str__()
def __str__(self):
return '<%s:%s>' % (self.__class__.__name__, self.id)
class KinesisStream(Component):
def __init__(self, id, params=None, num_shards=1, connection=None):
super(KinesisStream, self).__init__(id)
params = params or {}
self.shards = []
self.stream_name = params.get('name', self.name())
self.num_shards = params.get('shards', num_shards)
self.conn = connection
self.stream_info = params
def name(self):
return self.id.split(':stream/')[-1]
def connect(self, connection):
self.conn = connection
def describe(self):
r = self.conn.describe_stream(StreamName=self.stream_name)
return r.get('StreamDescription')
def create(self, raise_on_error=False):
try:
self.conn.create_stream(StreamName=self.stream_name, ShardCount=self.num_shards)
except Exception as e:
# TODO catch stream already exists exception, otherwise rethrow
if raise_on_error:
raise e
def get_status(self):
description = self.describe()
return description.get('StreamStatus')
def put(self, data, key):
if not isinstance(data, str):
data = json.dumps(data)
return self.conn.put_record(StreamName=self.stream_name, Data=data, PartitionKey=key)
def read(self, amount=-1, shard='shardId-000000000001'):
if not self.conn:
raise Exception('Please create the Kinesis connection first.')
s_iterator = self.conn.get_shard_iterator(self.stream_name, shard, 'TRIM_HORIZON')
record = self.conn.get_records(s_iterator['ShardIterator'])
while True:
try:
if record['NextShardIterator'] is None:
break
else:
next_entry = self.conn.get_records(record['NextShardIterator'])
if len(next_entry['Records']):
print(next_entry['Records'][0]['Data'])
record = next_entry
except Exception as e:
print('Error reading from Kinesis stream "%s": %s' (self.stream_name, e))
def wait_for(self):
GET_STATUS_SLEEP_SECS = 5
GET_STATUS_RETRIES = 50
for i in range(0, GET_STATUS_RETRIES):
try:
status = self.get_status()
if status == 'ACTIVE':
return
except Exception:
# swallowing this exception should be ok, as we are in a retry loop
pass
time.sleep(GET_STATUS_SLEEP_SECS)
raise Exception('Failed to get active status for stream "%s", giving up' % self.stream_name)
def destroy(self):
self.conn.delete_stream(StreamName=self.stream_name)
time.sleep(2)
class KinesisShard(Component):
MAX_KEY = '340282366920938463463374607431768211455'
def __init__(self, id):
super(KinesisShard, self).__init__(id)
self.stream = None
self.start_key = '0'
self.end_key = KinesisShard.MAX_KEY # 128 times '1' binary as decimal
self.child_shards = []
def print_tree(self, indent=''):
print('%s%s' % (indent, self))
for c in self.child_shards:
c.print_tree(indent=indent + ' ')
def length(self):
return long(self.end_key) - long(self.start_key)
def percent(self):
return 100.0 * self.length() / float(KinesisShard.MAX_KEY)
def __str__(self):
return ('Shard(%s, length=%s, percent=%s, start=%s, end=%s)' %
(self.id, self.length(), self.percent(), self.start_key,
self.end_key))
@staticmethod
def sort(shards):
def compare(x, y):
s1 = long(x.start_key)
s2 = long(y.start_key)
if s1 < s2:
return -1
elif s1 > s2:
return 1
else:
return 0
return sorted(shards, cmp=compare)
@staticmethod
def max(shards):
max_shard = None
max_length = long(0)
for s in shards:
if s.length() > max_length:
max_shard = s
max_length = s.length()
return max_shard
class FirehoseStream(KinesisStream):
def __init__(self, id):
super(FirehoseStream, self).__init__(id)
self.destinations = []
def name(self):
return self.id.split(':deliverystream/')[-1]
class LambdaFunction(Component):
def __init__(self, arn):
super(LambdaFunction, self).__init__(arn)
self.event_sources = []
self.targets = []
self.versions = {}
self.aliases = {}
self.envvars = {}
self.runtime = None
self.handler = None
self.cwd = None
def get_version(self, version):
return self.versions.get(version)
def name(self):
return self.id.split(':function:')[-1]
def function(self, qualifier=None):
if not qualifier:
qualifier = '$LATEST'
version = qualifier if qualifier in self.versions else \
self.aliases.get(qualifier).get('FunctionVersion')
return self.versions.get(version).get('Function')
def qualifier_exists(self, qualifier):
return qualifier in self.aliases or qualifier in self.versions
def __str__(self):
return '<%s:%s>' % (self.__class__.__name__, self.name())
class DynamoDB(Component):
def __init__(self, id, env=None):
super(DynamoDB, self).__init__(id, env=env)
self.count = -1
self.bytes = -1
def name(self):
return self.id.split(':table/')[-1]
class DynamoDBStream(Component):
def __init__(self, id):
super(DynamoDBStream, self).__init__(id)
self.table = None
class DynamoDBItem(Component):
def __init__(self, id, table=None, keys=None):
super(DynamoDBItem, self).__init__(id)
self.table = table
self.keys = keys
def __eq__(self, other):
if not isinstance(other, DynamoDBItem):
return False
return (other.table == self.table and
other.id == self.id and
other.keys == self.keys)
def __hash__(self):
return hash(self.table) + hash(self.id) + hash(self.keys)
class ElasticSearch(Component):
def __init__(self, id):
super(ElasticSearch, self).__init__(id)
self.indexes = []
self.endpoint = None
def name(self):
return self.id.split(':domain/')[-1]
class SqsQueue(Component):
def __init__(self, id):
super(SqsQueue, self).__init__(id)
def name(self):
return self.id.split(':')[-1]
class S3Bucket(Component):
def __init__(self, id):
super(S3Bucket, self).__init__(id)
self.notifications = []
def name(self):
return self.id.split('arn:aws:s3:::')[-1]
class S3Notification(Component):
def __init__(self, id):
super(S3Notification, self).__init__(id)
self.target = None
self.trigger = None
class EventSource(Component):
def __init__(self, id):
super(EventSource, self).__init__(id)
@staticmethod
def get(obj, pool=None, type=None):
pool = pool or {}
if not obj:
return None
if isinstance(obj, Component):
obj = obj.id
if obj in pool:
return pool[obj]
inst = None
if obj.startswith('arn:aws:kinesis:'):
inst = KinesisStream(obj)
if obj.startswith('arn:aws:lambda:'):
inst = LambdaFunction(obj)
elif obj.startswith('arn:aws:dynamodb:'):
if '/stream/' in obj:
table_id = obj.split('/stream/')[0]
table = DynamoDB(table_id)
inst = DynamoDBStream(obj)
inst.table = table
else:
inst = DynamoDB(obj)
elif type:
for o in EventSource.filter_type(pool, type):
if o.name() == obj:
return o
if type == ElasticSearch:
if o.endpoint == obj:
return o
else:
print("Unexpected object name: '%s'" % obj)
return inst
@staticmethod
def filter_type(pool, type):
return [obj for obj in six.itervalues(pool) if isinstance(obj, type)]
| 29.479866
| 100
| 0.576551
|
da65495e06fb50dfa719b3a8af0478428d5e9dc6
| 11,583
|
py
|
Python
|
tools/attack/util/dist_utils.py
|
rajk97/Adversial-Attacks-on-densely-fused-point-clouds-for-6D-Pose-Estimation
|
e296e70c3a1286b7491995215369d9f39cfcba17
|
[
"MIT"
] | 36
|
2020-10-07T05:52:15.000Z
|
2022-03-11T03:05:32.000Z
|
tools/attack/util/dist_utils.py
|
rajk97/Adversial-Attacks-on-densely-fused-point-clouds-for-6D-Pose-Estimation
|
e296e70c3a1286b7491995215369d9f39cfcba17
|
[
"MIT"
] | 9
|
2021-01-04T02:11:36.000Z
|
2021-11-23T16:21:59.000Z
|
tools/attack/util/dist_utils.py
|
rajk97/Adversial-Attacks-on-densely-fused-point-clouds-for-6D-Pose-Estimation
|
e296e70c3a1286b7491995215369d9f39cfcba17
|
[
"MIT"
] | 6
|
2020-11-29T02:13:55.000Z
|
2021-12-06T08:19:16.000Z
|
import numpy as np
import torch
import torch.nn as nn
from util.set_distance import chamfer, hausdorff
class L2Dist(nn.Module):
def __init__(self):
"""Compute global L2 distance between two point clouds.
"""
super(L2Dist, self).__init__()
def forward(self, adv_pc, ori_pc,
weights=None, batch_avg=True):
"""Compute L2 distance between two point clouds.
Apply different weights for batch input for CW attack.
Args:
adv_pc (torch.FloatTensor): [B, K, 3] or [B, 3, K]
ori_pc (torch.FloatTensor): [B, K, 3] or [B, 3, k]
weights (torch.FloatTensor, optional): [B], if None, just use avg
batch_avg: (bool, optional): whether to avg over batch dim
"""
B = adv_pc.shape[0]
if weights is None:
weights = torch.ones((B,))
weights = weights.float().cuda()
dist = torch.sqrt(torch.sum(
(adv_pc - ori_pc) ** 2, dim=[1, 2])) # [B]
dist = dist * weights
if batch_avg:
return dist.mean()
return dist
class ChamferDist(nn.Module):
def __init__(self, method='adv2ori'):
"""Compute chamfer distance between two point clouds.
Args:
method (str, optional): type of chamfer. Defaults to 'adv2ori'.
"""
super(ChamferDist, self).__init__()
self.method = method
def forward(self, adv_pc, ori_pc,
weights=None, batch_avg=True):
"""Compute chamfer distance between two point clouds.
Args:
adv_pc (torch.FloatTensor): [B, K, 3]
ori_pc (torch.FloatTensor): [B, K, 3]
weights (torch.FloatTensor, optional): [B], if None, just use avg
batch_avg: (bool, optional): whether to avg over batch dim
"""
B = adv_pc.shape[0]
if weights is None:
weights = torch.ones((B,))
loss1, loss2 = chamfer(adv_pc, ori_pc) # [B], adv2ori, ori2adv
if self.method == 'adv2ori':
loss = loss1
elif self.method == 'ori2adv':
loss = loss2
else:
loss = (loss1 + loss2) / 2.
weights = weights.float().cuda()
loss = loss * weights
if batch_avg:
return loss.mean()
return loss
class HausdorffDist(nn.Module):
def __init__(self, method='adv2ori'):
"""Compute hausdorff distance between two point clouds.
Args:
method (str, optional): type of hausdorff. Defaults to 'adv2ori'.
"""
super(HausdorffDist, self).__init__()
self.method = method
def forward(self, adv_pc, ori_pc,
weights=None, batch_avg=True):
"""Compute hausdorff distance between two point clouds.
Args:
adv_pc (torch.FloatTensor): [B, K, 3]
ori_pc (torch.FloatTensor): [B, K, 3]
weights (torch.FloatTensor, optional): [B], if None, just use avg
batch_avg: (bool, optional): whether to avg over batch dim
"""
B = adv_pc.shape[0]
if weights is None:
weights = torch.ones((B,))
loss1, loss2 = hausdorff(adv_pc, ori_pc) # [B], adv2ori, ori2adv
if self.method == 'adv2ori':
loss = loss1
elif self.method == 'ori2adv':
loss = loss2
else:
loss = (loss1 + loss2) / 2.
weights = weights.float().cuda()
loss = loss * weights
if batch_avg:
return loss.mean()
return loss
class KNNDist(nn.Module):
def __init__(self, k=5, alpha=1.05):
"""Compute kNN distance punishment within a point cloud.
Args:
k (int, optional): kNN neighbor num. Defaults to 5.
alpha (float, optional): threshold = mean + alpha * std. Defaults to 1.05.
"""
super(KNNDist, self).__init__()
self.k = k
self.alpha = alpha
def forward(self, pc, weights=None, batch_avg=True):
"""KNN distance loss described in AAAI'20 paper.
Args:
adv_pc (torch.FloatTensor): [B, K, 3]
weights (torch.FloatTensor, optional): [B]. Defaults to None.
batch_avg: (bool, optional): whether to avg over batch dim
"""
# build kNN graph
B, K = pc.shape[:2]
pc = pc.transpose(2, 1) # [B, 3, K]
inner = -2. * torch.matmul(pc.transpose(2, 1), pc) # [B, K, K]
xx = torch.sum(pc ** 2, dim=1, keepdim=True) # [B, 1, K]
dist = xx + inner + xx.transpose(2, 1) # [B, K, K], l2^2
assert dist.min().item() >= -1e-6
# the min is self so we take top (k + 1)
neg_value, _ = (-dist).topk(k=self.k + 1, dim=-1)
# [B, K, k + 1]
value = -(neg_value[..., 1:]) # [B, K, k]
value = torch.mean(value, dim=-1) # d_p, [B, K]
with torch.no_grad():
mean = torch.mean(value, dim=-1) # [B]
std = torch.std(value, dim=-1) # [B]
# [B], penalty threshold for batch
threshold = mean + self.alpha * std
weight_mask = (value > threshold[:, None]).\
float().detach() # [B, K]
loss = torch.mean(value * weight_mask, dim=1) # [B]
# accumulate loss
if weights is None:
weights = torch.ones((B,))
weights = weights.float().cuda()
loss = loss * weights
if batch_avg:
return loss.mean()
return loss
class ChamferkNNDist(nn.Module):
def __init__(self, chamfer_method='adv2ori',
knn_k=5, knn_alpha=1.05,
chamfer_weight=5., knn_weight=3.):
"""Geometry-aware distance function of AAAI'20 paper.
Args:
chamfer_method (str, optional): chamfer. Defaults to 'adv2ori'.
knn_k (int, optional): k in kNN. Defaults to 5.
knn_alpha (float, optional): alpha in kNN. Defaults to 1.1.
chamfer_weight (float, optional): weight factor. Defaults to 5..
knn_weight (float, optional): weight factor. Defaults to 3..
"""
super(ChamferkNNDist, self).__init__()
self.chamfer_dist = ChamferDist(method=chamfer_method)
self.knn_dist = KNNDist(k=knn_k, alpha=knn_alpha)
self.w1 = chamfer_weight
self.w2 = knn_weight
def forward(self, adv_pc, ori_pc,
weights=None, batch_avg=True):
"""Adversarial constraint function of AAAI'20 paper.
Args:
adv_pc (torch.FloatTensor): [B, K, 3]
ori_pc (torch.FloatTensor): [B, K, 3]
weights (np.array): weight factors
batch_avg: (bool, optional): whether to avg over batch dim
"""
chamfer_loss = self.chamfer_dist(
adv_pc, ori_pc, weights=weights, batch_avg=batch_avg)
knn_loss = self.knn_dist(
adv_pc, weights=weights, batch_avg=batch_avg)
loss = chamfer_loss * self.w1 + knn_loss * self.w2
return loss
class FarthestDist(nn.Module):
def __init__(self):
"""Used in adding cluster attack.
"""
super(FarthestDist, self).__init__()
def forward(self, adv_pc, weights=None, batch_avg=True):
"""Compute the farthest pairwise point dist in each added cluster.
Args:
adv_pc (torch.FloatTensor): [B, num_add, cl_num_p, 3]
weights (np.array): weight factors
batch_avg: (bool, optional): whether to avg over batch dim
"""
B = adv_pc.shape[0]
if weights is None:
weights = torch.ones((B,))
delta_matrix = adv_pc[:, :, None, :, :] - adv_pc[:, :, :, None, :] + 1e-7
# [B, num_add, num_p, num_p, 3]
norm_matrix = torch.norm(delta_matrix, p=2, dim=-1) # [B, na, np, np]
max_matrix = torch.max(norm_matrix, dim=2)[0] # take the values of max
far_dist = torch.max(max_matrix, dim=2)[0] # [B, num_add]
far_dist = torch.sum(far_dist, dim=1) # [B]
weights = weights.float().cuda()
loss = far_dist * weights
if batch_avg:
return loss.mean()
return loss
class FarChamferDist(nn.Module):
def __init__(self, num_add, chamfer_method='adv2ori',
chamfer_weight=0.1):
"""Distance function used in generating adv clusters.
Consisting of a Farthest dist and a chamfer dist.
Args:
num_add (int): number of added clusters.
chamfer_method (str, optional): chamfer. Defaults to 'adv2ori'.
chamfer_weight (float, optional): weight factor. Defaults to 0.1.
"""
super(FarChamferDist, self).__init__()
self.num_add = num_add
self.far_dist = FarthestDist()
self.chamfer_dist = ChamferDist(method=chamfer_method)
self.cd_w = chamfer_weight
def forward(self, adv_pc, ori_pc,
weights=None, batch_avg=True):
"""Adversarial constraint function of CVPR'19 paper for adv clusters.
Args:
adv_pc (torch.FloatTensor): [B, num_add * cl_num_p, 3],
the added clusters
ori_pc (torch.FloatTensor): [B, K, 3]
weights (np.array): weight factors
batch_avg: (bool, optional): whether to avg over batch dim
"""
B = adv_pc.shape[0]
chamfer_loss = self.chamfer_dist(
adv_pc, ori_pc, weights=weights, batch_avg=batch_avg)
adv_clusters = adv_pc.view(B, self.num_add, -1, 3)
far_loss = self.far_dist(
adv_clusters, weights=weights, batch_avg=batch_avg)
loss = far_loss + chamfer_loss * self.cd_w
return loss
class L2ChamferDist(nn.Module):
def __init__(self, num_add, chamfer_method='adv2ori',
chamfer_weight=0.2):
"""Distance function used in generating adv objects.
Consisting of a L2 dist and a chamfer dist.
Args:
num_add (int): number of added objects.
chamfer_method (str, optional): chamfer. Defaults to 'adv2ori'.
chamfer_weight (float, optional): weight factor. Defaults to 0.2.
"""
super(L2ChamferDist, self).__init__()
self.num_add = num_add
self.chamfer_dist = ChamferDist(method=chamfer_method)
self.cd_w = chamfer_weight
self.l2_dist = L2Dist()
def forward(self, adv_pc, ori_pc, adv_obj, ori_obj,
weights=None, batch_avg=True):
"""Adversarial constraint function of CVPR'19 paper for adv objects.
Args:
adv_pc (torch.FloatTensor): [B, num_add * obj_num_p, 3],
the added objects after rot and shift
ori_pc (torch.FloatTensor): [B, K, 3]
adv_obj (torch.FloatTensor): [B, num_add, obj_num_p, 3],
the added objects after pert
ori_pc (torch.FloatTensor): [B, num_add, obj_num_p, 3],
the clean added objects
weights (np.array): weight factors
batch_avg: (bool, optional): whether to avg over batch dim
"""
B = adv_pc.shape[0]
chamfer_loss = self.chamfer_dist(
adv_pc, ori_pc, weights=weights, batch_avg=batch_avg)
l2_loss = self.l2_dist(
adv_obj.view(B, -1, 3), ori_obj.view(B, -1, 3),
weights=weights, batch_avg=batch_avg)
loss = l2_loss + self.cd_w * chamfer_loss
return loss
| 36.084112
| 86
| 0.563498
|
e1905291393fcadda73f9d66339e62379ac46428
| 363
|
py
|
Python
|
scraper/scraper/urls.py
|
ikeikeikeike/panglao-scraper
|
e7a4919c40c457ea73e89782e8b2dd3213129bcf
|
[
"MIT"
] | 1
|
2017-03-04T06:41:03.000Z
|
2017-03-04T06:41:03.000Z
|
scraper/scraper/urls.py
|
ikeikeikeike/panglao-scraper
|
e7a4919c40c457ea73e89782e8b2dd3213129bcf
|
[
"MIT"
] | null | null | null |
scraper/scraper/urls.py
|
ikeikeikeike/panglao-scraper
|
e7a4919c40c457ea73e89782e8b2dd3213129bcf
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponse
from django.conf.urls import url, include
# from django.contrib import admin
urlpatterns = [
# url(r'^admin/', admin.site.urls),
url(r'^api/', include('api.urls', namespace='api')),
url(r'^lifecycle/', include('lifecycle.urls', namespace='lifecycle')),
url(r'^ping', lambda request: HttpResponse("ok")),
]
| 30.25
| 74
| 0.677686
|
f3a829884bc26558acf612399ec0cd8b634837d5
| 15,611
|
py
|
Python
|
tasks/test.py
|
karim7262/datadog-agent
|
a48b9b58fb9f13e53782a936783da2cc4b7b0c7c
|
[
"Apache-2.0"
] | 1
|
2021-09-13T03:01:39.000Z
|
2021-09-13T03:01:39.000Z
|
tasks/test.py
|
karim7262/datadog-agent
|
a48b9b58fb9f13e53782a936783da2cc4b7b0c7c
|
[
"Apache-2.0"
] | null | null | null |
tasks/test.py
|
karim7262/datadog-agent
|
a48b9b58fb9f13e53782a936783da2cc4b7b0c7c
|
[
"Apache-2.0"
] | null | null | null |
"""
High level testing tasks
"""
from __future__ import print_function
import os
import fnmatch
import re
import operator
import sys
import yaml
import invoke
from invoke import task
from invoke.exceptions import Exit
from .utils import get_build_flags, get_version
from .go import fmt, lint, vet, misspell, ineffassign, lint_licenses, golangci_lint, generate
from .build_tags import get_default_build_tags, get_build_tags
from .agent import integration_tests as agent_integration_tests
from .dogstatsd import integration_tests as dsd_integration_tests
from .trace_agent import integration_tests as trace_integration_tests
from .cluster_agent import integration_tests as dca_integration_tests
#We use `basestring` in the code for compat with python2 unicode strings.
#This makes the same code work in python3 as well.
try:
basestring
except NameError:
basestring = str
PROFILE_COV = "profile.cov"
DEFAULT_TOOL_TARGETS = [
"./pkg",
"./cmd",
]
DEFAULT_TEST_TARGETS = [
"./pkg",
"./cmd",
]
@task()
def test(ctx, targets=None, coverage=False, build_include=None, build_exclude=None,
verbose=False, race=False, profile=False, fail_on_fmt=False,
rtloader_root=None, python_home_2=None, python_home_3=None, cpus=0, major_version='7',
python_runtimes='3', timeout=120, arch="x64", cache=True, skip_linters=False,
go_mod="vendor"):
"""
Run all the tools and tests on the given targets. If targets are not specified,
the value from `invoke.yaml` will be used.
Example invokation:
inv test --targets=./pkg/collector/check,./pkg/aggregator --race
"""
if isinstance(targets, basestring):
# when this function is called from the command line, targets are passed
# as comma separated tokens in a string
tool_targets = test_targets = targets.split(',')
elif targets is None:
tool_targets = DEFAULT_TOOL_TARGETS
test_targets = DEFAULT_TEST_TARGETS
else:
tool_targets = test_targets = targets
build_include = get_default_build_tags(process=True, arch=arch) if build_include is None else build_include.split(",")
build_exclude = [] if build_exclude is None else build_exclude.split(",")
build_tags = get_build_tags(build_include, build_exclude)
timeout = int(timeout)
# explicitly run these tasks instead of using pre-tasks so we can
# pass the `target` param (pre-tasks are invoked without parameters)
print("--- go generating:")
generate(ctx)
if skip_linters:
print("--- [skipping linters]")
else:
print("--- Linting filenames:")
lint_filenames(ctx)
print("--- Linting licenses:")
lint_licenses(ctx)
# Until all packages whitelisted in .golangci.yml are fixed and removed
# from the 'skip-dirs' list we need to keep using the old functions that
# lint without build flags (linting some file is better than no linting).
print("--- Vetting and linting (legacy):")
vet(ctx, targets=tool_targets, rtloader_root=rtloader_root, build_tags=build_tags, arch=arch)
fmt(ctx, targets=tool_targets, fail_on_fmt=fail_on_fmt)
lint(ctx, targets=tool_targets)
misspell(ctx, targets=tool_targets)
ineffassign(ctx, targets=tool_targets)
# for now we only run golangci_lint on Unix as the Windows env need more work
if sys.platform != 'win32':
print("--- golangci_lint:")
golangci_lint(ctx, targets=tool_targets, rtloader_root=rtloader_root, build_tags=build_tags)
with open(PROFILE_COV, "w") as f_cov:
f_cov.write("mode: count")
ldflags, gcflags, env = get_build_flags(ctx, rtloader_root=rtloader_root,
python_home_2=python_home_2, python_home_3=python_home_3, major_version=major_version,
python_runtimes='3', arch=arch)
if sys.platform == 'win32':
env['CGO_LDFLAGS'] += ' -Wl,--allow-multiple-definition'
if profile:
test_profiler = TestProfiler()
else:
test_profiler = None # Use stdout
race_opt = ""
covermode_opt = ""
build_cpus_opt = ""
if cpus:
build_cpus_opt = "-p {}".format(cpus)
if race:
# race doesn't appear to be supported on non-x64 platforms
if arch == "x86":
print("\n -- Warning... disabling race test, not supported on this platform --\n")
else:
race_opt = "-race"
if coverage:
if race:
# atomic is quite expensive but it's the only way to run
# both the coverage and the race detector at the same time
# without getting false positives from the cover counter
covermode_opt = "-covermode=atomic"
else:
covermode_opt = "-covermode=count"
matches = ["{}/...".format(t) for t in test_targets]
print("\n--- Running unit tests:")
coverprofile = ""
if coverage:
coverprofile = "-coverprofile={}".format(PROFILE_COV)
nocache = '-count=1' if not cache else ''
build_tags.append("test")
cmd = 'go test {verbose} -mod={go_mod} -vet=off -timeout {timeout}s -tags "{go_build_tags}" -gcflags="{gcflags}" '
cmd += '-ldflags="{ldflags}" {build_cpus} {race_opt} -short {covermode_opt} {coverprofile} {nocache} {pkg_folder}'
args = {
"go_mod": go_mod,
"go_build_tags": " ".join(build_tags),
"gcflags": gcflags,
"ldflags": ldflags,
"race_opt": race_opt,
"build_cpus": build_cpus_opt,
"covermode_opt": covermode_opt,
"coverprofile": coverprofile,
"pkg_folder": ' '.join(matches),
"timeout": timeout,
"verbose": '-v' if verbose else '',
"nocache": nocache,
}
ctx.run(cmd.format(**args), env=env, out_stream=test_profiler)
if coverage:
print("\n--- Test coverage:")
ctx.run("go tool cover -func {}".format(PROFILE_COV))
if profile:
print ("\n--- Top 15 packages sorted by run time:")
test_profiler.print_sorted(15)
@task
def lint_teamassignment(ctx):
"""
Make sure PRs are assigned a team label
"""
pr_url = os.environ.get("CIRCLE_PULL_REQUEST")
if pr_url:
import requests
pr_id = pr_url.rsplit('/')[-1]
res = requests.get("https://api.github.com/repos/DataDog/datadog-agent/issues/{}".format(pr_id))
issue = res.json()
if any([re.match('team/', l['name']) for l in issue.get('labels', {})]):
print("Team Assignment: %s" % l['name'])
return
print("PR %s requires team assignment" % pr_url)
raise Exit(code=1)
# The PR has not been created yet
else:
print("PR not yet created, skipping check for team assignment")
@task
def lint_milestone(ctx):
"""
Make sure PRs are assigned a milestone
"""
pr_url = os.environ.get("CIRCLE_PULL_REQUEST")
if pr_url:
import requests
pr_id = pr_url.rsplit('/')[-1]
res = requests.get("https://api.github.com/repos/DataDog/datadog-agent/issues/{}".format(pr_id))
pr = res.json()
if pr.get("milestone"):
print("Milestone: %s" % pr["milestone"].get("title", "NO_TITLE"))
return
print("PR %s requires a milestone" % pr_url)
raise Exit(code=1)
# The PR has not been created yet
else:
print("PR not yet created, skipping check for milestone")
@task
def lint_releasenote(ctx):
"""
Lint release notes with Reno
"""
# checking if a releasenote has been added/changed
pr_url = os.environ.get("CIRCLE_PULL_REQUEST")
if pr_url:
import requests
pr_id = pr_url.rsplit('/')[-1]
# first check 'changelog/no-changelog' label
res = requests.get("https://api.github.com/repos/DataDog/datadog-agent/issues/{}".format(pr_id))
issue = res.json()
if any([l['name'] == 'changelog/no-changelog' for l in issue.get('labels', {})]):
print("'changelog/no-changelog' label found on the PR: skipping linting")
return
# Then check that at least one note was touched by the PR
url = "https://api.github.com/repos/DataDog/datadog-agent/pulls/{}/files".format(pr_id)
# traverse paginated github response
while True:
res = requests.get(url)
files = res.json()
if any([f['filename'].startswith("releasenotes/notes/") or \
f['filename'].startswith("releasenotes-dca/notes/") for f in files]):
break
if 'next' in res.links:
url = res.links['next']['url']
else:
print("Error: No releasenote was found for this PR. Please add one using 'reno'"\
", or apply the label 'changelog/no-changelog' to the PR.")
raise Exit(code=1)
# The PR has not been created yet, let's compare with master (the usual base branch of the future PR)
else:
branch = os.environ.get("CIRCLE_BRANCH")
if branch is None:
print("No branch found, skipping reno linting")
else:
if re.match(r".*/.*", branch) is None:
print("{} is not a feature branch, skipping reno linting".format(branch))
else:
import requests
# Then check that in the diff with master, at least one note was touched
url = "https://api.github.com/repos/DataDog/datadog-agent/compare/master...{}".format(branch)
# traverse paginated github response
while True:
res = requests.get(url)
files = res.json().get("files", {})
if any([f['filename'].startswith("releasenotes/notes/") or \
f['filename'].startswith("releasenotes-dca/notes/") for f in files]):
break
if 'next' in res.links:
url = res.links['next']['url']
else:
print("Error: No releasenote was found for this PR. Please add one using 'reno'"\
", or apply the label 'changelog/no-changelog' to the PR.")
raise Exit(code=1)
ctx.run("reno lint")
@task
def lint_filenames(ctx):
"""
Scan files to ensure there are no filenames too long or containing illegal characters
"""
files = ctx.run("git ls-files -z", hide=True).stdout.split("\0")
failure = False
if sys.platform == 'win32':
print("Running on windows, no need to check filenames for illegal characters")
else:
print("Checking filenames for illegal characters")
forbidden_chars = '<>:"\\|?*'
for file in files:
if any(char in file for char in forbidden_chars):
print("Error: Found illegal character in path {}".format(file))
failure = True
print("Checking filename length")
# Approximated length of the prefix of the repo during the windows release build
prefix_length = 160
# Maximum length supported by the win32 API
max_length = 255
for file in files:
if prefix_length + len(file) > max_length:
print("Error: path {} is too long ({} characters too many)".format(file, prefix_length + len(file) - max_length))
failure = True
if failure:
raise Exit(code=1)
@task
def integration_tests(ctx, install_deps=False, race=False, remote_docker=False):
"""
Run all the available integration tests
"""
agent_integration_tests(ctx, install_deps, race, remote_docker)
dsd_integration_tests(ctx, install_deps, race, remote_docker)
dca_integration_tests(ctx, install_deps, race, remote_docker)
trace_integration_tests(ctx, install_deps, race, remote_docker)
@task
def e2e_tests(ctx, target="gitlab", image=""):
"""
Run e2e tests in several environments.
"""
choices = ["gitlab", "dev", "local"]
if target not in choices:
print('target %s not in %s' % (target, choices))
raise Exit(1)
if not os.getenv("DATADOG_AGENT_IMAGE"):
if not image:
print("define DATADOG_AGENT_IMAGE envvar or image flag")
raise Exit(1)
os.environ["DATADOG_AGENT_IMAGE"] = image
ctx.run("./test/e2e/scripts/setup-instance/00-entrypoint-%s.sh" % target)
class TestProfiler:
times = []
parser = re.compile("^ok\s+github.com\/DataDog\/datadog-agent\/(\S+)\s+([0-9\.]+)s", re.MULTILINE)
def write(self, txt):
# Output to stdout
sys.stdout.write(txt)
# Extract the run time
for result in self.parser.finditer(txt):
self.times.append((result.group(1), float(result.group(2))))
def flush(self):
sys.stdout.flush()
def reset(self):
self.out_buffer = ""
def print_sorted(self, limit=0):
if self.times:
sorted_times = sorted(self.times, key=operator.itemgetter(1), reverse=True)
if limit:
sorted_times = sorted_times[:limit]
for pkg, time in sorted_times:
print("{}s\t{}".format(time, pkg))
@task
def make_kitchen_gitlab_yml(ctx):
"""
Replaces .gitlab-ci.yml with one containing only the steps needed to run kitchen-tests
"""
with open('.gitlab-ci.yml') as f:
data = yaml.load(f, Loader=yaml.FullLoader)
data['stages'] = ['deps_build', 'binary_build', 'package_build', 'testkitchen_deploy', 'testkitchen_testing', 'testkitchen_cleanup']
for k,v in data.items():
if isinstance(v, dict) and v.get('stage', None) not in ([None] + data['stages']):
del data[k]
continue
if isinstance(v, dict) and v.get('stage', None) == 'binary_build' and k != 'build_system-probe-arm64' and k != 'build_system-probe-x64' and k != 'build_system-probe_with-bcc-arm64' and k != 'build_system-probe_with-bcc-x64':
del data[k]
continue
if 'except' in v:
del v['except']
if 'only' in v:
del v['only']
if len(v) == 0:
del data[k]
continue
for k,v in data.items():
if 'extends' in v:
extended = v['extends']
if extended not in data:
del data[k]
if 'needs' in v:
needed = v['needs']
new_needed = []
for n in needed:
if n in data:
new_needed.append(n)
v['needs'] = new_needed
with open('.gitlab-ci.yml', 'w') as f:
documents = yaml.dump(data, f, default_style='"')
@task
def check_gitlab_broken_dependencies(ctx):
"""
Checks that a gitlab job doesn't depend on (need) other jobs that will be excluded from the build,
since this would make gitlab fail when triggering a pipeline with those jobs excluded.
"""
with open('.gitlab-ci.yml') as f:
data = yaml.load(f, Loader=yaml.FullLoader)
def is_unwanted(job, version):
e = job.get('except',{})
return isinstance(e, dict) and '$RELEASE_VERSION_{} == ""'.format(version) in e.get('variables',{})
for version in [6,7]:
for k,v in data.items():
if isinstance(v, dict) and not is_unwanted(v, version) and "needs" in v:
needed = v['needs']
for need in needed:
if is_unwanted(data[need], version):
print("{} needs on {} but it won't be built for A{}".format(k, need, version))
| 35.887356
| 232
| 0.613542
|
81c5c0423742f7f9787eb2a396c275deb1a118ae
| 2,686
|
py
|
Python
|
airbyte-integrations/connectors/destination-scaffold-destination-python/destination_scaffold_destination_python/destination.py
|
onaio/airbyte
|
38302e82a25f1b66742c3febfbff0668556920f2
|
[
"MIT"
] | 22
|
2020-08-27T00:47:20.000Z
|
2020-09-17T15:39:39.000Z
|
airbyte-integrations/connectors/destination-scaffold-destination-python/destination_scaffold_destination_python/destination.py
|
onaio/airbyte
|
38302e82a25f1b66742c3febfbff0668556920f2
|
[
"MIT"
] | 116
|
2020-08-27T01:11:27.000Z
|
2020-09-19T02:47:52.000Z
|
airbyte-integrations/connectors/destination-scaffold-destination-python/destination_scaffold_destination_python/destination.py
|
onaio/airbyte
|
38302e82a25f1b66742c3febfbff0668556920f2
|
[
"MIT"
] | 1
|
2020-09-15T06:10:01.000Z
|
2020-09-15T06:10:01.000Z
|
#
# Copyright (c) 2022 Airbyte, Inc., all rights reserved.
#
from typing import Any, Iterable, Mapping
from airbyte_cdk import AirbyteLogger
from airbyte_cdk.destinations import Destination
from airbyte_cdk.models import AirbyteConnectionStatus, AirbyteMessage, ConfiguredAirbyteCatalog, Status
class DestinationScaffoldDestinationPython(Destination):
def write(
self, config: Mapping[str, Any], configured_catalog: ConfiguredAirbyteCatalog, input_messages: Iterable[AirbyteMessage]
) -> Iterable[AirbyteMessage]:
"""
TODO
Reads the input stream of messages, config, and catalog to write data to the destination.
This method returns an iterable (typically a generator of AirbyteMessages via yield) containing state messages received
in the input message stream. Outputting a state message means that every AirbyteRecordMessage which came before it has been
successfully persisted to the destination. This is used to ensure fault tolerance in the case that a sync fails before fully completing,
then the source is given the last state message output from this method as the starting point of the next sync.
:param config: dict of JSON configuration matching the configuration declared in spec.json
:param configured_catalog: The Configured Catalog describing the schema of the data being received and how it should be persisted in the
destination
:param input_messages: The stream of input messages received from the source
:return: Iterable of AirbyteStateMessages wrapped in AirbyteMessage structs
"""
pass
def check(self, logger: AirbyteLogger, config: Mapping[str, Any]) -> AirbyteConnectionStatus:
"""
Tests if the input configuration can be used to successfully connect to the destination with the needed permissions
e.g: if a provided API token or password can be used to connect and write to the destination.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this destination, content of this json is as specified in
the properties of the spec.json file
:return: AirbyteConnectionStatus indicating a Success or Failure
"""
try:
# TODO
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
return AirbyteConnectionStatus(status=Status.FAILED, message=f"An exception occurred: {repr(e)}")
| 49.740741
| 144
| 0.726359
|
c6a7e5a0791c50c5fd0534cb5335a8e93aa00e5f
| 64,150
|
bzl
|
Python
|
tools/build_variables.bzl
|
gml16/pytorch
|
fad60484572c490384c107cf625d484e34dc9bcf
|
[
"Intel"
] | 2
|
2022-02-14T13:56:03.000Z
|
2022-02-14T13:56:05.000Z
|
tools/build_variables.bzl
|
gml16/pytorch
|
fad60484572c490384c107cf625d484e34dc9bcf
|
[
"Intel"
] | null | null | null |
tools/build_variables.bzl
|
gml16/pytorch
|
fad60484572c490384c107cf625d484e34dc9bcf
|
[
"Intel"
] | null | null | null |
# In both open-source and fbcode builds, these are generated into
# torch/csrc/{autgrad,jit}/generated.i
GENERATED_CPP = [
"autograd/generated/Functions.cpp",
"autograd/generated/VariableType_0.cpp",
"autograd/generated/VariableType_1.cpp",
"autograd/generated/VariableType_2.cpp",
"autograd/generated/VariableType_3.cpp",
"autograd/generated/VariableType_4.cpp",
"autograd/generated/TraceType_0.cpp",
"autograd/generated/TraceType_1.cpp",
"autograd/generated/TraceType_2.cpp",
"autograd/generated/TraceType_3.cpp",
"autograd/generated/TraceType_4.cpp",
"autograd/generated/ADInplaceOrViewType_0.cpp",
"autograd/generated/ADInplaceOrViewType_1.cpp",
"autograd/generated/python_functions_0.cpp",
"autograd/generated/python_functions_1.cpp",
"autograd/generated/python_functions_2.cpp",
"autograd/generated/python_functions_3.cpp",
"autograd/generated/python_functions_4.cpp",
"autograd/generated/python_nn_functions.cpp",
"autograd/generated/python_fft_functions.cpp",
"autograd/generated/python_linalg_functions.cpp",
"autograd/generated/python_return_types.cpp",
"autograd/generated/python_sparse_functions.cpp",
"autograd/generated/python_special_functions.cpp",
"autograd/generated/python_torch_functions_0.cpp",
"autograd/generated/python_torch_functions_1.cpp",
"autograd/generated/python_torch_functions_2.cpp",
"autograd/generated/python_variable_methods.cpp",
]
# NVFuser runtime library
libtorch_nvfuser_runtime_sources = [
"torch/csrc/jit/codegen/cuda/runtime/bf16_support.cu",
"torch/csrc/jit/codegen/cuda/runtime/block_reduction.cu",
"torch/csrc/jit/codegen/cuda/runtime/block_sync_atomic.cu",
"torch/csrc/jit/codegen/cuda/runtime/block_sync_default.cu",
"torch/csrc/jit/codegen/cuda/runtime/broadcast.cu",
"torch/csrc/jit/codegen/cuda/runtime/fp16_support.cu",
"torch/csrc/jit/codegen/cuda/runtime/grid_broadcast.cu",
"torch/csrc/jit/codegen/cuda/runtime/grid_reduction.cu",
"torch/csrc/jit/codegen/cuda/runtime/grid_sync.cu",
"torch/csrc/jit/codegen/cuda/runtime/helpers.cu",
"torch/csrc/jit/codegen/cuda/runtime/index_utils.cu",
"torch/csrc/jit/codegen/cuda/runtime/random_numbers.cu",
"torch/csrc/jit/codegen/cuda/runtime/tensor.cu",
"torch/csrc/jit/codegen/cuda/runtime/welford.cu",
"torch/csrc/jit/codegen/cuda/runtime/warp.cu",
"aten/src/ATen/cuda/detail/PhiloxCudaStateRaw.cuh",
"aten/src/ATen/cuda/detail/UnpackRaw.cuh",
]
libtorch_nvfuser_generated_headers = ["{}.h".format(name.split("/")[-1].split(".")[0]) for name in libtorch_nvfuser_runtime_sources]
def libtorch_generated_sources(gencode_pattern):
return [gencode_pattern.format(name) for name in [
"autograd/generated/Functions.cpp",
"autograd/generated/VariableType_0.cpp",
"autograd/generated/VariableType_1.cpp",
"autograd/generated/VariableType_2.cpp",
"autograd/generated/VariableType_3.cpp",
"autograd/generated/VariableType_4.cpp",
"autograd/generated/TraceType_0.cpp",
"autograd/generated/TraceType_1.cpp",
"autograd/generated/TraceType_2.cpp",
"autograd/generated/TraceType_3.cpp",
"autograd/generated/TraceType_4.cpp",
"autograd/generated/ADInplaceOrViewType_0.cpp",
"autograd/generated/ADInplaceOrViewType_1.cpp",
]]
# copied from https://github.com/pytorch/pytorch/blob/f99a693cd9ff7a9b5fdc71357dac66b8192786d3/aten/src/ATen/core/CMakeLists.txt
jit_core_headers = [
"torch/csrc/utils/memory.h",
"torch/csrc/Export.h",
"torch/csrc/jit/frontend/source_range.h",
"torch/csrc/jit/serialization/callstack_debug_info_serialization.h",
"torch/csrc/jit/serialization/source_range_serialization.h",
"torch/csrc/jit/frontend/lexer.h",
"torch/csrc/jit/frontend/strtod.h",
"torch/csrc/jit/frontend/parser_constants.h",
"torch/csrc/jit/frontend/function_schema_parser.h",
"torch/csrc/jit/frontend/parse_string_literal.h",
"torch/csrc/jit/frontend/schema_type_parser.h",
"torch/csrc/jit/frontend/error_report.h",
"torch/csrc/jit/frontend/tree.h",
"torch/custom_class.h",
"torch/custom_class_detail.h",
"torch/library.h",
]
jit_core_sources = [
"torch/csrc/jit/frontend/error_report.cpp",
"torch/csrc/jit/frontend/function_schema_parser.cpp",
"torch/csrc/jit/frontend/lexer.cpp",
"torch/csrc/jit/frontend/schema_type_parser.cpp",
"torch/csrc/jit/frontend/strtod.cpp",
"torch/csrc/jit/frontend/source_range.cpp",
]
# copied from https://github.com/pytorch/pytorch/blob/0bde610c14b92d351b968a0228df29e92442b1cc/torch/CMakeLists.txt
# There are some common files used in both internal lite-interpreter and full-jit. Making a separate
# list for the shared files.
core_sources_common = [
"torch/csrc/autograd/autograd_meta.cpp",
"torch/csrc/autograd/forward_grad.cpp",
"torch/csrc/jit/frontend/edit_distance.cpp",
"torch/csrc/jit/mobile/compatibility/runtime_compatibility.cpp",
"torch/csrc/jit/mobile/type_parser.cpp",
"torch/csrc/jit/operator_upgraders/upgraders_guard.cpp",
"torch/csrc/jit/operator_upgraders/version_map.cpp",
"torch/csrc/jit/runtime/instruction.cpp",
"torch/csrc/jit/runtime/jit_exception.cpp",
"torch/csrc/jit/runtime/operator.cpp",
"torch/csrc/jit/mobile/register_ops_common_utils.cpp",
"torch/csrc/jit/runtime/print_handler.cpp",
"torch/csrc/jit/runtime/slice_indices_adjust.cpp",
"torch/csrc/jit/runtime/register_ops_utils.cpp",
"torch/csrc/jit/runtime/vararg_functions.cpp",
"torch/csrc/jit/mobile/promoted_prim_ops.cpp",
"torch/csrc/jit/mobile/prim_ops_registery.cpp",
"torch/csrc/profiler/util.cpp",
]
torch_unpickler_common = [
"torch/csrc/jit/serialization/import_read.cpp",
"torch/csrc/jit/serialization/unpickler.cpp",
]
libtorch_sources_common = sorted(core_sources_common + torch_unpickler_common)
# The profilers are not needed in the lite interpreter build.
libtorch_profiler_sources = [
"torch/csrc/autograd/profiler_legacy.cpp",
"torch/csrc/autograd/profiler_kineto.cpp",
"torch/csrc/profiler/api.cpp",
"torch/csrc/profiler/kineto_shim.cpp",
"torch/csrc/profiler/nvtx_observer.cpp",
"torch/csrc/monitor/counters.cpp",
"torch/csrc/monitor/events.cpp",
]
libtorch_edge_profiler_sources = libtorch_profiler_sources + [
"torch/csrc/jit/mobile/profiler_edge.cpp",
]
core_trainer_sources = [
"torch/csrc/autograd/anomaly_mode.cpp",
"torch/csrc/autograd/autograd.cpp",
"torch/csrc/autograd/autograd_not_implemented_fallback.cpp",
"torch/csrc/autograd/cpp_hook.cpp",
"torch/csrc/autograd/custom_function.cpp",
"torch/csrc/autograd/engine.cpp",
"torch/csrc/autograd/function.cpp",
"torch/csrc/autograd/function_hook.cpp",
"torch/csrc/autograd/functions/accumulate_grad.cpp",
"torch/csrc/autograd/functions/basic_ops.cpp",
"torch/csrc/autograd/functions/tensor.cpp",
"torch/csrc/autograd/functions/utils.cpp",
"torch/csrc/autograd/input_buffer.cpp",
"torch/csrc/autograd/record_function_ops.cpp",
"torch/csrc/autograd/saved_variable.cpp",
"torch/csrc/autograd/variable.cpp",
"torch/csrc/autograd/utils/warnings.cpp",
"torch/csrc/jit/frontend/name_mangler.cpp",
"torch/csrc/jit/ir/type_hashing.cpp",
"torch/csrc/jit/serialization/pickler.cpp",
"torch/csrc/jit/serialization/type_name_uniquer.cpp",
]
core_sources_full_mobile_no_backend_interface = [
"torch/csrc/jit/api/function_impl.cpp",
"torch/csrc/jit/api/module.cpp",
"torch/csrc/jit/api/object.cpp",
"torch/csrc/jit/backends/backend_debug_handler.cpp",
"torch/csrc/jit/backends/backend_detail.cpp",
"torch/csrc/jit/backends/backend_resolver.cpp",
"torch/csrc/jit/codegen/fuser/codegen.cpp",
"torch/csrc/jit/codegen/fuser/compiler.cpp",
"torch/csrc/jit/codegen/fuser/executor.cpp",
"torch/csrc/jit/codegen/fuser/fallback.cpp",
"torch/csrc/jit/codegen/fuser/interface.cpp",
"torch/csrc/jit/codegen/fuser/kernel_cache.cpp",
"torch/csrc/jit/frontend/builtin_functions.cpp",
"torch/csrc/jit/frontend/versioned_symbols.cpp",
"torch/csrc/jit/frontend/canonicalize_modified_loop.cpp",
"torch/csrc/jit/frontend/convert_to_ssa.cpp",
"torch/csrc/jit/frontend/exit_transforms.cpp",
"torch/csrc/jit/frontend/inline_loop_condition.cpp",
"torch/csrc/jit/frontend/ir_emitter.cpp",
"torch/csrc/jit/frontend/parser.cpp",
"torch/csrc/jit/frontend/schema_matching.cpp",
"torch/csrc/jit/frontend/script_type_parser.cpp",
"torch/csrc/jit/frontend/sugared_value.cpp",
"torch/csrc/jit/frontend/tracer.cpp",
"torch/csrc/jit/ir/alias_analysis.cpp",
"torch/csrc/jit/ir/attributes.cpp",
"torch/csrc/jit/ir/constants.cpp",
"torch/csrc/jit/ir/ir.cpp",
"torch/csrc/jit/ir/irparser.cpp",
"torch/csrc/jit/ir/node_hashing.cpp",
"torch/csrc/jit/ir/scope.cpp",
"torch/csrc/jit/ir/subgraph_matcher.cpp",
"torch/csrc/jit/jit_log.cpp",
"torch/csrc/jit/jit_opt_limit.cpp",
"torch/csrc/jit/mobile/nnc/aot_compiler.cpp",
"torch/csrc/jit/mobile/nnc/backend.cpp",
"torch/csrc/jit/mobile/nnc/context.cpp",
"torch/csrc/jit/mobile/nnc/registry.cpp",
"torch/csrc/jit/operator_upgraders/utils.cpp",
"torch/csrc/jit/operator_upgraders/upgraders.cpp",
"torch/csrc/jit/operator_upgraders/upgraders_entry.cpp",
"torch/csrc/jit/passes/annotate_warns.cpp",
"torch/csrc/jit/passes/bailout_graph.cpp",
"torch/csrc/jit/passes/batch_mm.cpp",
"torch/csrc/jit/passes/canonicalize.cpp",
"torch/csrc/jit/passes/canonicalize_graph_fuser_ops.cpp",
"torch/csrc/jit/passes/clear_profiling.cpp",
"torch/csrc/jit/passes/clear_undefinedness.cpp",
"torch/csrc/jit/passes/common_subexpression_elimination.cpp",
"torch/csrc/jit/passes/common_expression_hoisting.cpp",
"torch/csrc/jit/passes/concat_opt.cpp",
"torch/csrc/jit/passes/constant_pooling.cpp",
"torch/csrc/jit/passes/constant_propagation.cpp",
"torch/csrc/jit/passes/restore_mutation.cpp",
"torch/csrc/jit/passes/create_autodiff_subgraphs.cpp",
"torch/csrc/jit/passes/dead_code_elimination.cpp",
"torch/csrc/jit/passes/eliminate_no_ops.cpp",
"torch/csrc/jit/passes/remove_redundant_profiles.cpp",
"torch/csrc/jit/passes/remove_exceptions.cpp",
"torch/csrc/jit/passes/decompose_ops.cpp",
"torch/csrc/jit/passes/dtype_analysis.cpp",
"torch/csrc/jit/passes/device_type_analysis.cpp",
"torch/csrc/jit/passes/erase_number_types.cpp",
"torch/csrc/jit/passes/fixup_trace_scope_blocks.cpp",
"torch/csrc/jit/passes/freeze_module.cpp",
"torch/csrc/jit/passes/fuse_linear.cpp",
"torch/csrc/jit/passes/fuse_relu.cpp",
"torch/csrc/jit/passes/graph_fuser.cpp",
"torch/csrc/jit/passes/graph_rewrite_helper.cpp",
"torch/csrc/jit/passes/guard_elimination.cpp",
"torch/csrc/jit/passes/hoist_conv_packed_params.cpp",
"torch/csrc/jit/passes/inline_autodiff_subgraphs.cpp",
"torch/csrc/jit/passes/inline_forked_closures.cpp",
"torch/csrc/jit/passes/inline_fork_wait.cpp",
"torch/csrc/jit/passes/inliner.cpp",
"torch/csrc/jit/passes/inplace_check.cpp",
"torch/csrc/jit/passes/insert_guards.cpp",
"torch/csrc/jit/passes/lift_closures.cpp",
"torch/csrc/jit/passes/liveness.cpp",
"torch/csrc/jit/passes/loop_unrolling.cpp",
"torch/csrc/jit/passes/lower_grad_of.cpp",
"torch/csrc/jit/passes/lower_tuples.cpp",
"torch/csrc/jit/passes/normalize_ops.cpp",
"torch/csrc/jit/passes/peephole_dict_idioms.cpp",
"torch/csrc/jit/passes/peephole_list_idioms.cpp",
"torch/csrc/jit/passes/value_refinement_utils.cpp",
"torch/csrc/jit/passes/peephole_alias_sensitive.cpp",
"torch/csrc/jit/passes/pass_manager.cpp",
"torch/csrc/jit/passes/peephole.cpp",
"torch/csrc/jit/passes/peephole_non_tensor.cpp",
"torch/csrc/jit/passes/create_functional_graphs.cpp",
"torch/csrc/jit/passes/remove_mutation.cpp",
"torch/csrc/jit/passes/prepack_folding.cpp",
"torch/csrc/jit/passes/fold_conv_bn.cpp",
"torch/csrc/jit/passes/frozen_concat_linear.cpp",
"torch/csrc/jit/passes/frozen_conv_add_relu_fusion.cpp",
"torch/csrc/jit/passes/frozen_conv_folding.cpp",
"torch/csrc/jit/passes/frozen_linear_transpose.cpp",
"torch/csrc/jit/passes/frozen_ops_to_mkldnn.cpp",
"torch/csrc/jit/passes/frozen_graph_optimizations.cpp",
"torch/csrc/jit/passes/remove_expands.cpp",
"torch/csrc/jit/passes/remove_dropout.cpp",
"torch/csrc/jit/passes/requires_grad_analysis.cpp",
"torch/csrc/jit/passes/shape_analysis.cpp",
"torch/csrc/jit/passes/integer_value_refinement.cpp",
"torch/csrc/jit/passes/replacement_of_old_operators.cpp",
"torch/csrc/jit/passes/symbolic_shape_analysis.cpp",
"torch/csrc/jit/passes/symbolic_shape_runtime_fusion.cpp",
"torch/csrc/jit/passes/specialize_autogradzero.cpp",
"torch/csrc/jit/passes/update_differentiable_graph_requires_grad.cpp",
"torch/csrc/jit/passes/variadic_ops.cpp",
"torch/csrc/jit/passes/subgraph_rewrite.cpp",
"torch/csrc/jit/passes/tensorexpr_fuser.cpp",
"torch/csrc/jit/passes/utils/memory_dag.cpp",
"torch/csrc/jit/passes/utils/subgraph_utils.cpp",
"torch/csrc/jit/passes/utils/optimization_utils.cpp",
"torch/csrc/jit/passes/utils/op_registry.cpp",
"torch/csrc/jit/passes/xnnpack_rewrite.cpp",
"torch/csrc/jit/passes/vulkan_rewrite.cpp",
"torch/csrc/jit/passes/metal_rewrite.cpp",
"torch/csrc/jit/passes/quantization/helper.cpp",
"torch/csrc/jit/passes/quantization/quantization_type.cpp",
"torch/csrc/jit/passes/quantization/insert_observers.cpp",
"torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp",
"torch/csrc/jit/passes/quantization/dedup_module_uses.cpp",
"torch/csrc/jit/passes/quantization/finalize.cpp",
"torch/csrc/jit/passes/quantization/fusion_passes.cpp",
"torch/csrc/jit/python/update_graph_executor_opt.cpp",
"torch/csrc/jit/runtime/argument_spec.cpp",
"torch/csrc/jit/runtime/autodiff.cpp",
"torch/csrc/jit/runtime/graph_executor.cpp",
"torch/csrc/jit/runtime/interpreter/frame.cpp",
"torch/csrc/jit/runtime/interpreter/preprocess_graph.cpp",
"torch/csrc/jit/runtime/interpreter.cpp",
"torch/csrc/jit/runtime/logging.cpp",
"torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp",
"torch/csrc/jit/runtime/profiling_record.cpp",
"torch/csrc/jit/runtime/script_profile.cpp",
"torch/csrc/jit/runtime/symbolic_script.cpp",
"torch/csrc/jit/runtime/symbolic_shape_registry.cpp",
"torch/csrc/jit/runtime/symbolic_shape_registry_util.cpp",
"torch/csrc/jit/runtime/jit_trace.cpp",
"torch/csrc/jit/serialization/callstack_debug_info_serialization.cpp",
"torch/csrc/jit/serialization/import.cpp",
"torch/csrc/jit/serialization/import_export_helpers.cpp",
"torch/csrc/jit/serialization/import_source.cpp",
"torch/csrc/jit/serialization/pickle.cpp",
"torch/csrc/jit/serialization/python_print.cpp",
"torch/csrc/jit/serialization/source_range_serialization.cpp",
"torch/csrc/jit/tensorexpr/block_codegen.cpp",
"torch/csrc/jit/tensorexpr/bounds_inference.cpp",
"torch/csrc/jit/tensorexpr/bounds_overlap.cpp",
"torch/csrc/jit/tensorexpr/codegen.cpp",
"torch/csrc/jit/tensorexpr/cpp_codegen.cpp",
"torch/csrc/jit/tensorexpr/eval.cpp",
"torch/csrc/jit/tensorexpr/expr.cpp",
"torch/csrc/jit/tensorexpr/external_functions_registry.cpp",
"torch/csrc/jit/tensorexpr/graph_opt.cpp",
"torch/csrc/jit/tensorexpr/hash_provider.cpp",
"torch/csrc/jit/tensorexpr/intrinsic_symbols.cpp",
"torch/csrc/jit/tensorexpr/ir.cpp",
"torch/csrc/jit/tensorexpr/ir_cloner.cpp",
"torch/csrc/jit/tensorexpr/ir_mutator.cpp",
"torch/csrc/jit/tensorexpr/ir_printer.cpp",
"torch/csrc/jit/tensorexpr/ir_simplifier.cpp",
"torch/csrc/jit/tensorexpr/ir_verifier.cpp",
"torch/csrc/jit/tensorexpr/ir_visitor.cpp",
"torch/csrc/jit/tensorexpr/kernel.cpp",
"torch/csrc/jit/tensorexpr/llvm_codegen.cpp",
"torch/csrc/jit/tensorexpr/llvm_jit.cpp",
"torch/csrc/jit/tensorexpr/loopnest.cpp",
"torch/csrc/jit/tensorexpr/loopnest_randomization.cpp",
"torch/csrc/jit/tensorexpr/lowerings.cpp",
"torch/csrc/jit/tensorexpr/mem_dependency_checker.cpp",
"torch/csrc/jit/tensorexpr/operators/conv2d.cpp",
"torch/csrc/jit/tensorexpr/operators/matmul.cpp",
"torch/csrc/jit/tensorexpr/operators/misc.cpp",
"torch/csrc/jit/tensorexpr/operators/norm.cpp",
"torch/csrc/jit/tensorexpr/operators/pointwise.cpp",
"torch/csrc/jit/tensorexpr/operators/quantization.cpp",
"torch/csrc/jit/tensorexpr/operators/reduction.cpp",
"torch/csrc/jit/tensorexpr/operators/softmax.cpp",
"torch/csrc/jit/tensorexpr/reduction.cpp",
"torch/csrc/jit/tensorexpr/registerizer.cpp",
"torch/csrc/jit/tensorexpr/tensor.cpp",
"torch/csrc/jit/tensorexpr/types.cpp",
"torch/csrc/jit/tensorexpr/unique_name_manager.cpp",
"torch/csrc/jit/testing/file_check.cpp",
"torch/csrc/jit/testing/hooks_for_testing.cpp",
"torch/csrc/utils/tensor_flatten.cpp",
"torch/csrc/utils/variadic.cpp",
]
core_sources_full_mobile = core_sources_full_mobile_no_backend_interface + [
"torch/csrc/jit/backends/backend_debug_info.cpp",
"torch/csrc/jit/backends/backend_interface.cpp",
]
core_sources_full = core_sources_full_mobile + [
"torch/csrc/jit/runtime/static/fusion.cpp",
"torch/csrc/jit/runtime/static/impl.cpp",
"torch/csrc/jit/runtime/static/memory_planner.cpp",
"torch/csrc/jit/runtime/static/native_ops.cpp",
"torch/csrc/jit/runtime/static/ops.cpp",
"torch/csrc/jit/runtime/static/passes.cpp",
"torch/csrc/jit/runtime/static/te_wrapper.cpp",
"torch/csrc/jit/tensorexpr/external_functions.cpp",
"torch/csrc/jit/tensorexpr/external_functions_codegen.cpp",
]
lazy_tensor_core_sources = [
"torch/csrc/lazy/backend/backend_device.cpp",
"torch/csrc/lazy/backend/backend_interface.cpp",
"torch/csrc/lazy/backend/lowering_context.cpp",
"torch/csrc/lazy/core/config.cpp",
"torch/csrc/lazy/core/debug_util.cpp",
"torch/csrc/lazy/core/hash.cpp",
"torch/csrc/lazy/core/helpers.cpp",
"torch/csrc/lazy/core/ir.cpp",
"torch/csrc/lazy/core/ir_dump_util.cpp",
"torch/csrc/lazy/core/ir_metadata.cpp",
"torch/csrc/lazy/core/ir_util.cpp",
"torch/csrc/lazy/core/lazy_graph_executor.cpp",
"torch/csrc/lazy/core/lazy_view.cpp",
"torch/csrc/lazy/core/metrics.cpp",
"torch/csrc/lazy/core/multi_wait.cpp",
"torch/csrc/lazy/core/permutation_util.cpp",
"torch/csrc/lazy/core/shape.cpp",
"torch/csrc/lazy/core/tensor.cpp",
"torch/csrc/lazy/core/tensor_impl.cpp",
"torch/csrc/lazy/core/tensor_util.cpp",
"torch/csrc/lazy/core/thread_pool.cpp",
"torch/csrc/lazy/core/view_ops/as_strided.cpp",
"torch/csrc/lazy/core/view_ops/as_strided_view_update.cpp",
"torch/csrc/lazy/core/view_ops/diagonal.cpp",
"torch/csrc/lazy/core/view_ops/diagonal_view_update.cpp",
"torch/csrc/lazy/core/view_ops/narrow.cpp",
"torch/csrc/lazy/core/view_ops/narrow_view_update.cpp",
"torch/csrc/lazy/core/view_ops/permute.cpp",
"torch/csrc/lazy/core/view_ops/resize.cpp",
"torch/csrc/lazy/core/view_ops/select.cpp",
"torch/csrc/lazy/core/view_ops/squeeze.cpp",
"torch/csrc/lazy/core/view_ops/unsqueeze.cpp",
"torch/csrc/lazy/core/view_ops/select_view_update.cpp",
"torch/csrc/lazy/core/view_ops/view.cpp",
"torch/csrc/lazy/ts_backend/config.cpp",
"torch/csrc/lazy/ts_backend/ops/arithmetic_ir_ops.cpp",
"torch/csrc/lazy/ts_backend/ops/cast.cpp",
"torch/csrc/lazy/ts_backend/ops/device_data.cpp",
"torch/csrc/lazy/ts_backend/ops/expand.cpp",
"torch/csrc/lazy/ts_backend/ops/generic.cpp",
"torch/csrc/lazy/ts_backend/ops/scalar.cpp",
"torch/csrc/lazy/ts_backend/ts_node.cpp",
]
lazy_tensor_core_python_sources = [
"torch/csrc/lazy/python/init.cpp",
"torch/csrc/lazy/python/python_util.cpp",
]
libtorch_core_sources = sorted(
core_sources_common +
torch_unpickler_common +
core_sources_full +
core_trainer_sources +
libtorch_profiler_sources +
lazy_tensor_core_sources,
)
# These files are the only ones that are supported on Windows.
libtorch_distributed_base_sources = [
"torch/csrc/distributed/c10d/FileStore.cpp",
"torch/csrc/distributed/c10d/GlooDeviceFactory.cpp",
"torch/csrc/distributed/c10d/ParamCommsUtils.cpp",
"torch/csrc/distributed/c10d/PrefixStore.cpp",
"torch/csrc/distributed/c10d/ProcessGroup.cpp",
"torch/csrc/distributed/c10d/ProcessGroupGloo.cpp",
"torch/csrc/distributed/c10d/ProcessGroupMPI.cpp",
"torch/csrc/distributed/c10d/ProcessGroupWrapper.cpp",
"torch/csrc/distributed/c10d/Store.cpp",
"torch/csrc/distributed/c10d/TCPStore.cpp",
"torch/csrc/distributed/c10d/Utils.cpp",
"torch/csrc/distributed/c10d/comm.cpp",
"torch/csrc/distributed/c10d/default_comm_hooks.cpp",
"torch/csrc/distributed/c10d/exception.cpp",
"torch/csrc/distributed/c10d/logger.cpp",
"torch/csrc/distributed/c10d/reducer.cpp",
"torch/csrc/distributed/c10d/sequence_num.cpp",
"torch/csrc/distributed/c10d/socket.cpp",
]
# These files are only supported on Linux (and others) but not on Windows.
libtorch_distributed_extra_sources = [
"torch/csrc/distributed/autograd/autograd.cpp",
"torch/csrc/distributed/autograd/utils.cpp",
"torch/csrc/distributed/autograd/context/container.cpp",
"torch/csrc/distributed/autograd/context/context.cpp",
"torch/csrc/distributed/autograd/engine/dist_engine.cpp",
"torch/csrc/distributed/autograd/functions/recvrpc_backward.cpp",
"torch/csrc/distributed/autograd/functions/sendrpc_backward.cpp",
"torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.cpp",
"torch/csrc/distributed/autograd/rpc_messages/propagate_gradients_req.cpp",
"torch/csrc/distributed/autograd/rpc_messages/propagate_gradients_resp.cpp",
"torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_req.cpp",
"torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_resp.cpp",
"torch/csrc/distributed/autograd/rpc_messages/rpc_with_autograd.cpp",
"torch/csrc/distributed/autograd/rpc_messages/rpc_with_profiling_req.cpp",
"torch/csrc/distributed/autograd/rpc_messages/rpc_with_profiling_resp.cpp",
"torch/csrc/distributed/autograd/rpc_messages/rref_backward_req.cpp",
"torch/csrc/distributed/autograd/rpc_messages/rref_backward_resp.cpp",
"torch/csrc/distributed/c10d/HashStore.cpp",
"torch/csrc/distributed/c10d/ProcessGroupRoundRobin.cpp",
"torch/csrc/distributed/rpc/agent_utils.cpp",
"torch/csrc/distributed/rpc/message.cpp",
"torch/csrc/distributed/rpc/profiler/remote_profiler_manager.cpp",
"torch/csrc/distributed/rpc/profiler/server_process_global_profiler.cpp",
"torch/csrc/distributed/rpc/python_call.cpp",
"torch/csrc/distributed/rpc/python_remote_call.cpp",
"torch/csrc/distributed/rpc/python_resp.cpp",
"torch/csrc/distributed/rpc/request_callback.cpp",
"torch/csrc/distributed/rpc/request_callback_no_python.cpp",
"torch/csrc/distributed/rpc/rpc_agent.cpp",
"torch/csrc/distributed/rpc/rref_context.cpp",
"torch/csrc/distributed/rpc/rref_impl.cpp",
"torch/csrc/distributed/rpc/rref_proto.cpp",
"torch/csrc/distributed/rpc/script_call.cpp",
"torch/csrc/distributed/rpc/script_remote_call.cpp",
"torch/csrc/distributed/rpc/script_resp.cpp",
"torch/csrc/distributed/rpc/tensorpipe_agent.cpp",
"torch/csrc/distributed/rpc/tensorpipe_utils.cpp",
"torch/csrc/distributed/rpc/testing/faulty_tensorpipe_agent.cpp",
"torch/csrc/distributed/rpc/torchscript_functions.cpp",
"torch/csrc/distributed/rpc/types.cpp",
"torch/csrc/distributed/rpc/utils.cpp",
]
libtorch_distributed_sources = libtorch_distributed_base_sources + libtorch_distributed_extra_sources
jit_sources_full = [
"torch/csrc/jit/codegen/cuda/interface.cpp",
"torch/csrc/jit/passes/lower_graph.cpp",
"torch/csrc/jit/runtime/register_c10_ops.cpp",
"torch/csrc/jit/runtime/register_prim_ops.cpp",
"torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp",
"torch/csrc/jit/runtime/register_special_ops.cpp",
"torch/csrc/jit/passes/remove_inplace_ops.cpp",
"torch/csrc/jit/passes/utils/check_alias_annotation.cpp",
"torch/csrc/jit/passes/autocast.cpp",
]
libtorch_core_jit_sources = sorted(jit_sources_full)
torch_mobile_tracer_sources = [
"torch/csrc/jit/mobile/model_tracer/tracer.cpp",
"torch/csrc/jit/mobile/model_tracer/TensorUtils.cpp",
"torch/csrc/jit/mobile/model_tracer/TracerRunner.cpp",
"torch/csrc/jit/mobile/model_tracer/MobileModelRunner.cpp",
"torch/csrc/jit/mobile/model_tracer/OperatorCallTracer.cpp",
"torch/csrc/jit/mobile/model_tracer/KernelDTypeTracer.cpp",
]
torch_mobile_core = [
# backend_debug_info.cpp provides
# __torch__.torch.classes.backend.BackendDebugInfo class
# This should not be needed eventually.
# TODO: Remove this dependency
"torch/csrc/jit/backends/backend_debug_info.cpp",
"torch/csrc/jit/mobile/compatibility/model_compatibility.cpp",
"torch/csrc/jit/mobile/function.cpp",
"torch/csrc/jit/mobile/import.cpp",
"torch/csrc/jit/mobile/interpreter.cpp",
"torch/csrc/jit/mobile/module.cpp",
"torch/csrc/jit/mobile/observer.cpp",
"torch/csrc/jit/mobile/parse_bytecode.cpp",
"torch/csrc/jit/mobile/parse_operators.cpp",
"torch/csrc/jit/mobile/upgrader_mobile.cpp",
"torch/csrc/jit/runtime/register_prim_ops.cpp",
"torch/csrc/jit/runtime/register_special_ops.cpp",
]
libtorch_lite_eager_symbolication = [
"torch/csrc/jit/frontend/source_range.cpp",
"torch/csrc/jit/ir/scope.cpp",
"torch/csrc/jit/mobile/debug_info.cpp",
"torch/csrc/jit/serialization/callstack_debug_info_serialization.cpp",
"torch/csrc/jit/serialization/source_range_serialization.cpp",
# Later we can split serialization and deserialization logic
# to have better separation within build and only build relevant parts.
"torch/csrc/jit/serialization/pickle.cpp",
"torch/csrc/jit/serialization/pickler.cpp",
"torch/csrc/jit/serialization/unpickler.cpp",
]
# TODO: core_trainer_sources is not necessary for libtorch lite
libtorch_lite_cmake_sources = sorted(
core_trainer_sources +
core_sources_common +
torch_unpickler_common +
torch_mobile_core,
)
libtorch_cmake_sources = libtorch_core_sources + libtorch_core_jit_sources
libtorch_extra_sources = libtorch_core_jit_sources + [
"torch/csrc/autograd/TraceTypeManual.cpp",
"torch/csrc/autograd/VariableTypeManual.cpp",
"torch/csrc/autograd/FunctionsManual.cpp",
"torch/csrc/jit/api/module_save.cpp",
"torch/csrc/jit/codegen/fuser/cpu/fused_kernel.cpp",
"torch/csrc/jit/mobile/compatibility/backport.cpp",
"torch/csrc/jit/mobile/compatibility/backport_manager.cpp",
"torch/csrc/jit/mobile/compatibility/model_compatibility.cpp",
# To be included for eager symbolication in lite interpreter
# when it is built in libtorch
"torch/csrc/jit/mobile/debug_info.cpp",
"torch/csrc/jit/mobile/function.cpp",
"torch/csrc/jit/mobile/import.cpp",
"torch/csrc/jit/mobile/import_data.cpp",
"torch/csrc/jit/mobile/interpreter.cpp",
"torch/csrc/jit/mobile/module.cpp",
"torch/csrc/jit/mobile/observer.cpp",
"torch/csrc/jit/mobile/parse_bytecode.cpp",
"torch/csrc/jit/mobile/parse_operators.cpp",
"torch/csrc/jit/mobile/train/export_data.cpp",
"torch/csrc/jit/mobile/train/optim/sgd.cpp",
"torch/csrc/jit/mobile/train/random.cpp",
"torch/csrc/jit/mobile/train/sequential.cpp",
"torch/csrc/jit/mobile/upgrader_mobile.cpp",
"torch/csrc/jit/serialization/onnx.cpp",
"torch/csrc/jit/serialization/export.cpp",
"torch/csrc/jit/serialization/export_bytecode.cpp",
"torch/csrc/jit/serialization/export_module.cpp",
"torch/csrc/jit/serialization/import_legacy.cpp",
"torch/csrc/utils/byte_order.cpp",
"torch/csrc/utils/out_types.cpp",
]
def libtorch_sources(gencode_pattern = ":generate-code[{}]"):
return libtorch_generated_sources(gencode_pattern) + libtorch_core_sources + libtorch_distributed_sources + libtorch_extra_sources
libtorch_cuda_core_sources = [
"torch/csrc/CudaIPCTypes.cpp",
"torch/csrc/cuda/comm.cpp",
"torch/csrc/jit/codegen/fuser/cuda/fused_kernel.cpp",
"torch/csrc/profiler/cuda.cpp",
"torch/csrc/autograd/functions/comm.cpp",
"torch/csrc/jit/codegen/cuda/arith.cpp",
"torch/csrc/jit/codegen/cuda/compute_at.cpp",
"torch/csrc/jit/codegen/cuda/compute_at_map.cpp",
"torch/csrc/jit/codegen/cuda/codegen.cpp",
"torch/csrc/jit/codegen/cuda/dispatch.cpp",
"torch/csrc/jit/codegen/cuda/expr_evaluator.cpp",
"torch/csrc/jit/codegen/cuda/executor.cpp",
"torch/csrc/jit/codegen/cuda/executor_kernel_arg.cpp",
"torch/csrc/jit/codegen/cuda/executor_launch_params.cpp",
"torch/csrc/jit/codegen/cuda/evaluator_common.cpp",
"torch/csrc/jit/codegen/cuda/executor_utils.cpp",
"torch/csrc/jit/codegen/cuda/fusion.cpp",
"torch/csrc/jit/codegen/cuda/graph_fuser.cpp",
"torch/csrc/jit/codegen/cuda/index_compute.cpp",
"torch/csrc/jit/codegen/cuda/index_reference_replay.cpp",
"torch/csrc/jit/codegen/cuda/instrumentation.cpp",
"torch/csrc/jit/codegen/cuda/ir_base_nodes.cpp",
"torch/csrc/jit/codegen/cuda/ir_builder.cpp",
"torch/csrc/jit/codegen/cuda/ir_cloner.cpp",
"torch/csrc/jit/codegen/cuda/ir_container.cpp",
"torch/csrc/jit/codegen/cuda/ir_graphviz.cpp",
"torch/csrc/jit/codegen/cuda/ir_nodes.cpp",
"torch/csrc/jit/codegen/cuda/ir_iostream.cpp",
"torch/csrc/jit/codegen/cuda/ir_utils.cpp",
"torch/csrc/jit/codegen/cuda/iter_visitor.cpp",
"torch/csrc/jit/codegen/cuda/kernel.cpp",
"torch/csrc/jit/codegen/cuda/kernel_cache.cpp",
"torch/csrc/jit/codegen/cuda/kernel_expr_evaluator.cpp",
"torch/csrc/jit/codegen/cuda/kernel_ir.cpp",
"torch/csrc/jit/codegen/cuda/kernel_ir_dispatch.cpp",
"torch/csrc/jit/codegen/cuda/lower_alias_memory.cpp",
"torch/csrc/jit/codegen/cuda/lower_allocation.cpp",
"torch/csrc/jit/codegen/cuda/lower_double_buffer.cpp",
"torch/csrc/jit/codegen/cuda/lower_expr_sort.cpp",
"torch/csrc/jit/codegen/cuda/lower_fusion_simplifier.cpp",
"torch/csrc/jit/codegen/cuda/lower_index.cpp",
"torch/csrc/jit/codegen/cuda/lower_insert_syncs.cpp",
"torch/csrc/jit/codegen/cuda/lower_loops.cpp",
"torch/csrc/jit/codegen/cuda/lower_magic_zero.cpp",
"torch/csrc/jit/codegen/cuda/lower_misaligned_vectorization.cpp",
"torch/csrc/jit/codegen/cuda/lower_predicate.cpp",
"torch/csrc/jit/codegen/cuda/lower_replace_size.cpp",
"torch/csrc/jit/codegen/cuda/lower_shift.cpp",
"torch/csrc/jit/codegen/cuda/lower_thread_predicate.cpp",
"torch/csrc/jit/codegen/cuda/lower_trivial_broadcast.cpp",
"torch/csrc/jit/codegen/cuda/lower_trivial_reductions.cpp",
"torch/csrc/jit/codegen/cuda/lower_unroll.cpp",
"torch/csrc/jit/codegen/cuda/lower_utils.cpp",
"torch/csrc/jit/codegen/cuda/lower_validation.cpp",
"torch/csrc/jit/codegen/cuda/lower_warp_reduce.cpp",
"torch/csrc/jit/codegen/cuda/lower2device.cpp",
"torch/csrc/jit/codegen/cuda/manager.cpp",
"torch/csrc/jit/codegen/cuda/mutator.cpp",
"torch/csrc/jit/codegen/cuda/non_divisible_split.cpp",
"torch/csrc/jit/codegen/cuda/ops/alias.cpp",
"torch/csrc/jit/codegen/cuda/ops/composite.cpp",
"torch/csrc/jit/codegen/cuda/ops/normalization.cpp",
"torch/csrc/jit/codegen/cuda/parallel_dimension_map.cpp",
"torch/csrc/jit/codegen/cuda/parallel_type_bitmap.cpp",
"torch/csrc/jit/codegen/cuda/parser.cpp",
"torch/csrc/jit/codegen/cuda/partial_split_map.cpp",
"torch/csrc/jit/codegen/cuda/partition.cpp",
"torch/csrc/jit/codegen/cuda/predicate_compute.cpp",
"torch/csrc/jit/codegen/cuda/register_interface.cpp",
"torch/csrc/jit/codegen/cuda/root_domain_map.cpp",
"torch/csrc/jit/codegen/cuda/scheduler/pointwise.cpp",
"torch/csrc/jit/codegen/cuda/scheduler/normalization.cpp",
"torch/csrc/jit/codegen/cuda/scheduler/reduction.cpp",
"torch/csrc/jit/codegen/cuda/scheduler/reduction_utils.cpp",
"torch/csrc/jit/codegen/cuda/scheduler/registry.cpp",
"torch/csrc/jit/codegen/cuda/scheduler/utils.cpp",
"torch/csrc/jit/codegen/cuda/type_inference.cpp",
"torch/csrc/jit/codegen/cuda/type_promotion.cpp",
"torch/csrc/jit/codegen/cuda/fusion_segmenter.cpp",
"torch/csrc/jit/codegen/cuda/tensor_view.cpp",
"torch/csrc/jit/codegen/cuda/transform_iter.cpp",
"torch/csrc/jit/codegen/cuda/transform_replay.cpp",
"torch/csrc/jit/codegen/cuda/transform_rfactor.cpp",
"torch/csrc/jit/codegen/cuda/transform_view.cpp",
"torch/csrc/jit/codegen/cuda/type.cpp",
"torch/csrc/jit/codegen/cuda/utils.cpp",
"torch/csrc/jit/passes/frozen_conv_add_relu_fusion_cuda.cpp",
"torch/csrc/jit/tensorexpr/cuda_codegen.cpp",
"torch/csrc/jit/runtime/register_cuda_ops.cpp",
]
# These files are the only ones that are supported on Windows.
libtorch_cuda_distributed_base_sources = [
"torch/csrc/distributed/c10d/reducer_cuda.cpp",
]
# These files are only supported on Linux (and others) but not on Windows.
libtorch_cuda_distributed_extra_sources = [
"torch/csrc/distributed/c10d/NCCLUtils.cpp",
"torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp",
"torch/csrc/distributed/rpc/tensorpipe_cuda.cpp",
"torch/csrc/distributed/c10d/quantization/quantization_gpu.cu",
]
libtorch_cuda_distributed_sources = libtorch_cuda_distributed_base_sources + libtorch_cuda_distributed_extra_sources
libtorch_cuda_sources = libtorch_cuda_core_sources + libtorch_cuda_distributed_sources + [
"torch/csrc/cuda/nccl.cpp",
]
torch_cpp_srcs = [
"torch/csrc/api/src/cuda.cpp", # this just forwards stuff, no real CUDA
"torch/csrc/api/src/data/datasets/mnist.cpp",
"torch/csrc/api/src/data/samplers/distributed.cpp",
"torch/csrc/api/src/data/samplers/random.cpp",
"torch/csrc/api/src/data/samplers/sequential.cpp",
"torch/csrc/api/src/data/samplers/stream.cpp",
"torch/csrc/api/src/enum.cpp",
"torch/csrc/api/src/imethod.cpp",
"torch/csrc/api/src/jit.cpp",
"torch/csrc/api/src/serialize.cpp",
"torch/csrc/api/src/nn/init.cpp",
"torch/csrc/api/src/nn/module.cpp",
"torch/csrc/api/src/nn/modules/_functions.cpp",
"torch/csrc/api/src/nn/modules/activation.cpp",
"torch/csrc/api/src/nn/modules/adaptive.cpp",
"torch/csrc/api/src/nn/modules/batchnorm.cpp",
"torch/csrc/api/src/nn/modules/normalization.cpp",
"torch/csrc/api/src/nn/modules/instancenorm.cpp",
"torch/csrc/api/src/nn/modules/conv.cpp",
"torch/csrc/api/src/nn/modules/dropout.cpp",
"torch/csrc/api/src/nn/modules/distance.cpp",
"torch/csrc/api/src/nn/modules/embedding.cpp",
"torch/csrc/api/src/nn/modules/fold.cpp",
"torch/csrc/api/src/nn/modules/linear.cpp",
"torch/csrc/api/src/nn/modules/loss.cpp",
"torch/csrc/api/src/nn/modules/padding.cpp",
"torch/csrc/api/src/nn/modules/pixelshuffle.cpp",
"torch/csrc/api/src/nn/modules/pooling.cpp",
"torch/csrc/api/src/nn/modules/rnn.cpp",
"torch/csrc/api/src/nn/modules/upsampling.cpp",
"torch/csrc/api/src/nn/modules/transformer.cpp",
"torch/csrc/api/src/nn/modules/container/functional.cpp",
"torch/csrc/api/src/nn/options/activation.cpp",
"torch/csrc/api/src/nn/options/adaptive.cpp",
"torch/csrc/api/src/nn/options/batchnorm.cpp",
"torch/csrc/api/src/nn/options/conv.cpp",
"torch/csrc/api/src/nn/options/dropout.cpp",
"torch/csrc/api/src/nn/options/instancenorm.cpp",
"torch/csrc/api/src/nn/options/linear.cpp",
"torch/csrc/api/src/nn/options/normalization.cpp",
"torch/csrc/api/src/nn/options/embedding.cpp",
"torch/csrc/api/src/nn/options/padding.cpp",
"torch/csrc/api/src/nn/options/pooling.cpp",
"torch/csrc/api/src/nn/options/rnn.cpp",
"torch/csrc/api/src/nn/options/vision.cpp",
"torch/csrc/api/src/nn/options/transformer.cpp",
"torch/csrc/api/src/optim/adagrad.cpp",
"torch/csrc/api/src/optim/adam.cpp",
"torch/csrc/api/src/optim/adamw.cpp",
"torch/csrc/api/src/optim/lbfgs.cpp",
"torch/csrc/api/src/optim/optimizer.cpp",
"torch/csrc/api/src/optim/rmsprop.cpp",
"torch/csrc/api/src/optim/serialize.cpp",
"torch/csrc/api/src/optim/sgd.cpp",
"torch/csrc/api/src/optim/schedulers/lr_scheduler.cpp",
"torch/csrc/api/src/optim/schedulers/step_lr.cpp",
"torch/csrc/api/src/serialize/input-archive.cpp",
"torch/csrc/api/src/serialize/output-archive.cpp",
"torch/csrc/utils/crash_handler.cpp",
]
libtorch_python_cuda_core_sources = [
"torch/csrc/cuda/Event.cpp",
"torch/csrc/cuda/Module.cpp",
"torch/csrc/cuda/python_comm.cpp",
"torch/csrc/cuda/Storage.cpp",
"torch/csrc/cuda/Stream.cpp",
"torch/csrc/cuda/Graph.cpp",
"torch/csrc/cuda/serialization.cpp",
"torch/csrc/cuda/shared/cudart.cpp",
"torch/csrc/cuda/shared/nvtx.cpp",
"torch/csrc/cuda/utils.cpp",
]
libtorch_python_cuda_sources = libtorch_python_cuda_core_sources + [
"torch/csrc/cuda/python_nccl.cpp",
"torch/csrc/cuda/shared/cudnn.cpp",
"torch/csrc/cuda/Tensor.cpp",
]
libtorch_python_core_sources = [
"torch/csrc/DataLoader.cpp",
"torch/csrc/Device.cpp",
"torch/csrc/Dtype.cpp",
"torch/csrc/DynamicTypes.cpp",
"torch/csrc/Exceptions.cpp",
"torch/csrc/Generator.cpp",
"torch/csrc/Layout.cpp",
"torch/csrc/MemoryFormat.cpp",
"torch/csrc/QScheme.cpp",
"torch/csrc/Module.cpp",
"torch/csrc/python_dimname.cpp",
"torch/csrc/Size.cpp",
"torch/csrc/Storage.cpp",
"torch/csrc/Stream.cpp",
"torch/csrc/TypeInfo.cpp",
"torch/csrc/api/src/python/init.cpp",
"torch/csrc/autograd/functions/init.cpp",
"torch/csrc/autograd/init.cpp",
"torch/csrc/autograd/profiler_python.cpp",
"torch/csrc/autograd/python_anomaly_mode.cpp",
"torch/csrc/autograd/python_saved_variable_hooks.cpp",
"torch/csrc/autograd/python_mode.cpp",
"torch/csrc/autograd/python_cpp_function.cpp",
"torch/csrc/autograd/python_engine.cpp",
"torch/csrc/autograd/python_function.cpp",
"torch/csrc/autograd/python_hook.cpp",
"torch/csrc/autograd/python_legacy_variable.cpp",
"torch/csrc/autograd/python_torch_functions_manual.cpp",
"torch/csrc/autograd/python_variable.cpp",
"torch/csrc/autograd/python_variable_indexing.cpp",
"torch/csrc/jit/backends/backend_init.cpp",
"torch/csrc/jit/python/init.cpp",
"torch/csrc/jit/passes/onnx.cpp",
"torch/csrc/jit/passes/onnx/cast_all_constant_to_floating.cpp",
"torch/csrc/jit/passes/onnx/deduplicate_initializers.cpp",
"torch/csrc/jit/passes/onnx/eval_peephole.cpp",
"torch/csrc/jit/passes/onnx/constant_fold.cpp",
"torch/csrc/jit/passes/onnx/constant_map.cpp",
"torch/csrc/jit/passes/onnx/eliminate_unused_items.cpp",
"torch/csrc/jit/passes/onnx/fixup_onnx_controlflow.cpp",
"torch/csrc/jit/passes/onnx/list_model_parameters.cpp",
"torch/csrc/jit/passes/onnx/function_substitution.cpp",
"torch/csrc/jit/passes/onnx/helper.cpp",
"torch/csrc/jit/passes/onnx/peephole.cpp",
"torch/csrc/jit/passes/onnx/preprocess_for_onnx.cpp",
"torch/csrc/jit/passes/onnx/prepare_division_for_onnx.cpp",
"torch/csrc/jit/passes/onnx/scalar_type_analysis.cpp",
"torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp",
"torch/csrc/jit/passes/onnx/remove_inplace_ops_for_onnx.cpp",
"torch/csrc/jit/passes/onnx/shape_type_inference.cpp",
"torch/csrc/jit/passes/onnx/function_extraction.cpp",
"torch/csrc/jit/python/pybind_utils.cpp",
"torch/csrc/jit/passes/onnx/pattern_conversion/common.cpp",
"torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.cpp",
"torch/csrc/jit/passes/onnx/pattern_conversion/pattern_conversion.cpp",
"torch/csrc/jit/python/python_arg_flatten.cpp",
"torch/csrc/jit/python/python_custom_class.cpp",
"torch/csrc/jit/python/python_dict.cpp",
"torch/csrc/jit/python/python_interpreter.cpp",
"torch/csrc/jit/python/python_ir.cpp",
"torch/csrc/jit/python/python_list.cpp",
"torch/csrc/jit/python/python_tracer.cpp",
"torch/csrc/jit/python/script_init.cpp",
"torch/csrc/jit/frontend/concrete_module_type.cpp",
"torch/csrc/jit/frontend/tree_views.cpp",
"torch/csrc/jit/python/python_sugared_value.cpp",
"torch/csrc/jit/python/python_tree_views.cpp",
"torch/csrc/jit/runtime/static/init.cpp",
"torch/csrc/jit/tensorexpr/tensorexpr_init.cpp",
"torch/csrc/monitor/python_init.cpp",
"torch/csrc/multiprocessing/init.cpp",
"torch/csrc/onnx/init.cpp",
"torch/csrc/serialization.cpp",
"torch/csrc/tensor/python_tensor.cpp",
"torch/csrc/utils/init.cpp",
"torch/csrc/utils/throughput_benchmark.cpp",
"torch/csrc/utils.cpp",
"torch/csrc/utils/cuda_lazy_init.cpp",
"torch/csrc/utils/invalid_arguments.cpp",
"torch/csrc/utils/object_ptr.cpp",
"torch/csrc/utils/python_arg_parser.cpp",
"torch/csrc/utils/python_dispatch.cpp",
"torch/csrc/utils/structseq.cpp",
"torch/csrc/utils/tensor_apply.cpp",
"torch/csrc/utils/tensor_dtypes.cpp",
"torch/csrc/utils/tensor_layouts.cpp",
"torch/csrc/utils/tensor_memoryformats.cpp",
"torch/csrc/utils/tensor_qschemes.cpp",
"torch/csrc/utils/tensor_list.cpp",
"torch/csrc/utils/tensor_new.cpp",
"torch/csrc/utils/tensor_numpy.cpp",
"torch/csrc/utils/tensor_types.cpp",
"torch/csrc/utils/disable_torch_function.cpp",
] + lazy_tensor_core_python_sources
libtorch_python_distributed_core_sources = [
"torch/csrc/distributed/c10d/init.cpp",
"torch/csrc/distributed/c10d/python_comm_hook.cpp",
"torch/csrc/distributed/c10d/quantization/quantization.cpp",
]
libtorch_python_distributed_sources = libtorch_python_distributed_core_sources + [
"torch/csrc/distributed/autograd/init.cpp",
"torch/csrc/distributed/rpc/init.cpp",
"torch/csrc/distributed/rpc/py_rref.cpp",
"torch/csrc/distributed/rpc/python_functions.cpp",
"torch/csrc/distributed/rpc/python_rpc_handler.cpp",
"torch/csrc/distributed/rpc/request_callback_impl.cpp",
"torch/csrc/distributed/rpc/testing/init.cpp",
"torch/csrc/distributed/rpc/unpickled_python_call.cpp",
"torch/csrc/distributed/rpc/unpickled_python_remote_call.cpp",
"torch/csrc/jit/runtime/register_distributed_ops.cpp",
]
def glob_libtorch_python_sources(gencode_pattern = ":generate-code[{}]"):
_libtorch_python_sources = [gencode_pattern.format(name) for name in [
"autograd/generated/python_functions_0.cpp",
"autograd/generated/python_functions_1.cpp",
"autograd/generated/python_functions_2.cpp",
"autograd/generated/python_functions_3.cpp",
"autograd/generated/python_functions_4.cpp",
"autograd/generated/python_nn_functions.cpp",
"autograd/generated/python_fft_functions.cpp",
"autograd/generated/python_linalg_functions.cpp",
"autograd/generated/python_return_types.cpp",
"autograd/generated/python_sparse_functions.cpp",
"autograd/generated/python_special_functions.cpp",
"autograd/generated/python_torch_functions_0.cpp",
"autograd/generated/python_torch_functions_1.cpp",
"autograd/generated/python_torch_functions_2.cpp",
"autograd/generated/python_variable_methods.cpp",
]]
_libtorch_python_sources.extend(libtorch_python_core_sources)
_libtorch_python_sources.extend(libtorch_python_distributed_sources)
return _libtorch_python_sources
aten_cpu_source_non_codegen_list = [
"aten/src/ATen/AccumulateType.cpp",
"aten/src/ATen/BatchedTensorImpl.cpp",
"aten/src/ATen/CPUGeneratorImpl.cpp",
"aten/src/ATen/Context.cpp",
"aten/src/ATen/DLConvertor.cpp",
"aten/src/ATen/EmptyTensor.cpp",
"aten/src/ATen/ExpandUtils.cpp",
"aten/src/ATen/FunctionalInverses.cpp",
"aten/src/ATen/FunctionalStorageImpl.cpp",
"aten/src/ATen/FunctionalTensorWrapper.cpp",
"aten/src/ATen/FunctionalizeFallbackKernel.cpp",
"aten/src/ATen/MemoryOverlap.cpp",
"aten/src/ATen/MapAllocator.cpp",
"aten/src/ATen/NamedTensorUtils.cpp",
"aten/src/ATen/ParallelCommon.cpp",
"aten/src/ATen/ParallelNative.cpp",
"aten/src/ATen/ParallelNativeTBB.cpp",
"aten/src/ATen/ParallelOpenMP.cpp",
"aten/src/ATen/ParallelThreadPoolNative.cpp",
"aten/src/ATen/ScalarOps.cpp",
"aten/src/ATen/SequenceNumber.cpp",
"aten/src/ATen/SparseTensorImpl.cpp",
"aten/src/ATen/SparseCsrTensorImpl.cpp",
"aten/src/ATen/SparseTensorUtils.cpp",
"aten/src/ATen/TensorGeometry.cpp",
"aten/src/ATen/TensorIndexing.cpp",
"aten/src/ATen/TensorMeta.cpp",
"aten/src/ATen/TensorNames.cpp",
"aten/src/ATen/TensorUtils.cpp",
"aten/src/ATen/ThreadLocalState.cpp",
"aten/src/ATen/FuncTorchTLS.cpp",
"aten/src/ATen/Utils.cpp",
"aten/src/ATen/Version.cpp",
"aten/src/ATen/VmapMode.cpp",
"aten/src/ATen/VmapTransforms.cpp",
"aten/src/ATen/core/BackendSelectFallbackKernel.cpp",
"aten/src/ATen/core/DeprecatedTypeProperties.cpp",
"aten/src/ATen/core/DeprecatedTypePropertiesRegistry.cpp",
"aten/src/ATen/core/Dict.cpp",
"aten/src/ATen/core/Dimname.cpp",
"aten/src/ATen/core/Formatting.cpp",
"aten/src/ATen/core/Generator.cpp",
"aten/src/ATen/core/List.cpp",
"aten/src/ATen/core/NamedTensor.cpp",
"aten/src/ATen/core/Tensor.cpp",
"aten/src/ATen/core/VariableFallbackKernel.cpp",
"aten/src/ATen/core/VariableHooksInterface.cpp",
"aten/src/ATen/core/Vitals.cpp",
"aten/src/ATen/core/boxing/KernelFunction.cpp",
"aten/src/ATen/core/custom_class.cpp",
"aten/src/ATen/core/dispatch/DispatchKeyExtractor.cpp",
"aten/src/ATen/core/dispatch/Dispatcher.cpp",
"aten/src/ATen/core/dispatch/ObservedOperators.cpp",
"aten/src/ATen/core/dispatch/OperatorEntry.cpp",
"aten/src/ATen/core/interned_strings.cpp",
"aten/src/ATen/core/ivalue.cpp",
"aten/src/ATen/core/library.cpp",
"aten/src/ATen/core/op_registration/infer_schema.cpp",
"aten/src/ATen/core/op_registration/op_registration.cpp",
"aten/src/ATen/core/operator_name.cpp",
"aten/src/ATen/core/PythonModeTLS.cpp",
"aten/src/ATen/core/register_symbols.cpp",
"aten/src/ATen/core/class_type.cpp",
"aten/src/ATen/core/type.cpp",
"aten/src/ATen/core/type_factory.cpp",
"aten/src/ATen/core/dynamic_type.cpp",
"aten/src/ATen/core/tensor_type.cpp",
"aten/src/ATen/core/union_type.cpp",
"aten/src/ATen/cpu/FlushDenormal.cpp",
"aten/src/ATen/detail/CPUGuardImpl.cpp",
"aten/src/ATen/detail/CUDAHooksInterface.cpp",
"aten/src/ATen/detail/HIPHooksInterface.cpp",
"aten/src/ATen/detail/ORTHooksInterface.cpp",
"aten/src/ATen/metal/Context.cpp",
"aten/src/ATen/native/AutogradComposite.cpp",
"aten/src/ATen/native/BatchLinearAlgebraKernel.cpp",
"aten/src/ATen/native/DispatchStub.cpp",
"aten/src/ATen/native/UpSample.cpp",
"aten/src/ATen/native/mkl/LinearAlgebra.cpp",
"aten/src/ATen/native/mkl/SparseBlasImpl.cpp",
"aten/src/ATen/native/mkl/SparseCsrLinearAlgebra.cpp",
"aten/src/ATen/native/mkl/SpectralOps.cpp",
"aten/src/ATen/native/mkldnn/BinaryOps.cpp",
"aten/src/ATen/native/mkldnn/Conv.cpp",
"aten/src/ATen/native/mkldnn/Copy.cpp",
"aten/src/ATen/native/mkldnn/Gelu.cpp",
"aten/src/ATen/native/mkldnn/IDeepRegistration.cpp",
"aten/src/ATen/native/mkldnn/Linear.cpp",
"aten/src/ATen/native/mkldnn/MKLDNNCommon.cpp",
"aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp",
"aten/src/ATen/native/mkldnn/MkldnnTensorMath.cpp",
"aten/src/ATen/native/mkldnn/Normalization.cpp",
"aten/src/ATen/native/mkldnn/Pooling.cpp",
"aten/src/ATen/native/mkldnn/Relu.cpp",
"aten/src/ATen/native/mkldnn/SoftMax.cpp",
"aten/src/ATen/native/mkldnn/TensorFactories.cpp",
"aten/src/ATen/native/mkldnn/TensorShape.cpp",
"aten/src/ATen/native/mkldnn/UnaryOps.cpp",
"aten/src/ATen/native/mkldnn/Utils.cpp",
"aten/src/ATen/native/mkldnn/Matmul.cpp",
"aten/src/ATen/native/quantized/cpu/init_qnnpack.cpp",
"aten/src/ATen/record_function.cpp",
"aten/src/ATen/Dispatch.cpp",
"aten/src/ATen/SavedTensorHooks.cpp",
"aten/src/ATen/vulkan/Context.cpp",
"aten/src/ATen/nnapi/nnapi_bind.cpp",
"aten/src/ATen/nnapi/nnapi_wrapper.cpp",
"aten/src/ATen/nnapi/nnapi_model_loader.cpp",
"aten/src/ATen/native/prim_native_functions.cpp",
]
aten_cpu_source_codegen_list = [
"aten/src/ATen/native/cpu/AdaptiveAvgPoolKernel.cpp",
"aten/src/ATen/native/cpu/AdaptiveMaxPoolKernel.cpp",
]
# When building lite interpreter in OSS, "aten/src/ATen/native/cpu/AdaptiveAvgPoolKernel.cpp" will go through
# codegen process. The codegen version of this file, like Activation.cpp.DEFAULT.cpp, will be included
# in ${cpu_kernel_cpp} in aten/src/ATen/CMakeLists.txt. As a result, in aten/src/ATen/CMakeLists.txt,
# only aten_cpu_source_non_codegen_list need to be added to ${all_cpu_cpp}.
aten_cpu_source_list = sorted(aten_cpu_source_non_codegen_list + aten_cpu_source_codegen_list)
# Same as ${aten_cpu_source_codegen_list}, this list will go through aten codegen, and be included in
# ${cpu_kernel_cpp} in aten/src/ATen/CMakeLists.txt.
aten_native_source_codegen_list = [
"aten/src/ATen/native/cpu/Activation.cpp",
"aten/src/ATen/native/cpu/AvgPoolKernel.cpp",
"aten/src/ATen/native/cpu/BinaryOpsKernel.cpp",
"aten/src/ATen/native/cpu/BlasKernel.cpp",
"aten/src/ATen/native/cpu/CatKernel.cpp",
"aten/src/ATen/native/cpu/ChannelShuffleKernel.cpp",
"aten/src/ATen/native/cpu/ComplexKernel.cpp",
"aten/src/ATen/native/cpu/CopyKernel.cpp",
"aten/src/ATen/native/cpu/CrossKernel.cpp",
"aten/src/ATen/native/cpu/DepthwiseConvKernel.cpp",
"aten/src/ATen/native/cpu/DistanceOpsKernel.cpp",
"aten/src/ATen/native/cpu/DistributionKernels.cpp",
"aten/src/ATen/native/cpu/FillKernel.cpp",
"aten/src/ATen/native/cpu/FunctionOfAMatrixUtilsKernel.cpp",
"aten/src/ATen/native/cpu/GridSamplerKernel.cpp",
"aten/src/ATen/native/cpu/HistogramKernel.cpp",
"aten/src/ATen/native/cpu/IndexKernel.cpp",
"aten/src/ATen/native/cpu/LerpKernel.cpp",
"aten/src/ATen/native/cpu/LinearAlgebraKernel.cpp",
"aten/src/ATen/native/cpu/MaxPooling.cpp",
"aten/src/ATen/native/cpu/MaxPoolKernel.cpp",
"aten/src/ATen/native/cpu/MaxUnpoolKernel.cpp",
"aten/src/ATen/native/cpu/MultinomialKernel.cpp",
"aten/src/ATen/native/cpu/PointwiseOpsKernel.cpp",
"aten/src/ATen/native/cpu/PowKernel.cpp",
"aten/src/ATen/native/cpu/RangeFactoriesKernel.cpp",
"aten/src/ATen/native/cpu/ReduceAllOpsKernel.cpp",
"aten/src/ATen/native/cpu/ReduceOpsKernel.cpp",
"aten/src/ATen/native/cpu/RenormKernel.cpp",
"aten/src/ATen/native/cpu/ScatterGatherKernel.cpp",
"aten/src/ATen/native/cpu/SoftMaxKernel.cpp",
"aten/src/ATen/native/cpu/SortingKernel.cpp",
"aten/src/ATen/native/cpu/StackKernel.cpp",
"aten/src/ATen/native/cpu/SumKernel.cpp",
"aten/src/ATen/native/cpu/TensorCompareKernel.cpp",
"aten/src/ATen/native/cpu/UnaryOpsKernel.cpp",
"aten/src/ATen/native/cpu/Unfold2d.cpp",
"aten/src/ATen/native/cpu/UnfoldBackwardKernel.cpp",
"aten/src/ATen/native/cpu/UpSampleKernel.cpp",
"aten/src/ATen/native/cpu/UpSampleMoreKernel.cpp",
"aten/src/ATen/native/cpu/batch_norm_kernel.cpp",
"aten/src/ATen/native/cpu/group_norm_kernel.cpp",
"aten/src/ATen/native/cpu/layer_norm_kernel.cpp",
"aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp",
]
# This aten native source file list will not go through aten codegen process
aten_native_source_non_codegen_list = [
"aten/src/ATen/native/ao_sparse/library.cpp",
"aten/src/ATen/native/ao_sparse/quantized/cpu/fbgemm_utils.cpp",
"aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear.cpp",
"aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_dynamic.cpp",
"aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_prepack.cpp",
"aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_unpack.cpp",
"aten/src/ATen/native/quantized/cpu/fbgemm_utils.cpp",
"aten/src/ATen/native/quantized/cpu/fused_obs_fake_quant.cpp",
"aten/src/ATen/native/quantized/cpu/int_repr_quant.cpp",
"aten/src/ATen/native/quantized/cpu/make_per_tensor_quantized_tensor.cpp",
"aten/src/ATen/native/quantized/cpu/q_adaavgpool.cpp",
"aten/src/ATen/native/quantized/cpu/q_avgpool.cpp",
"aten/src/ATen/native/quantized/cpu/q_avgpool3d.cpp",
"aten/src/ATen/native/quantized/cpu/qadd.cpp",
"aten/src/ATen/native/quantized/cpu/qbatch_norm.cpp",
"aten/src/ATen/native/quantized/cpu/qchannel_shuffle.cpp",
"aten/src/ATen/native/quantized/cpu/qclamp.cpp",
"aten/src/ATen/native/quantized/cpu/qconcat.cpp",
"aten/src/ATen/native/quantized/cpu/qconv.cpp",
"aten/src/ATen/native/quantized/cpu/qconv_prepack.cpp",
"aten/src/ATen/native/quantized/cpu/qconv_unpack.cpp",
"aten/src/ATen/native/quantized/cpu/qelu.cpp",
"aten/src/ATen/native/quantized/cpu/qembeddingbag.cpp",
"aten/src/ATen/native/quantized/cpu/qembeddingbag_prepack.cpp",
"aten/src/ATen/native/quantized/cpu/qembeddingbag_unpack.cpp",
"aten/src/ATen/native/quantized/cpu/qgelu.cpp",
"aten/src/ATen/native/quantized/cpu/qhardsigmoid.cpp",
"aten/src/ATen/native/quantized/cpu/qhardswish.cpp",
"aten/src/ATen/native/quantized/cpu/qlinear.cpp",
"aten/src/ATen/native/quantized/cpu/qlinear_dynamic.cpp",
"aten/src/ATen/native/quantized/cpu/qconv_dynamic.cpp",
"aten/src/ATen/native/quantized/cpu/qlinear_prepack.cpp",
"aten/src/ATen/native/quantized/cpu/qlinear_unpack.cpp",
"aten/src/ATen/native/quantized/cpu/qmatmul.cpp",
"aten/src/ATen/native/quantized/cpu/qmul.cpp",
"aten/src/ATen/native/quantized/cpu/qnormalization.cpp",
"aten/src/ATen/native/quantized/cpu/qpool.cpp",
"aten/src/ATen/native/quantized/cpu/qreduction.cpp",
"aten/src/ATen/native/quantized/cpu/qrelu.cpp",
"aten/src/ATen/native/quantized/cpu/qsigmoid.cpp",
"aten/src/ATen/native/quantized/cpu/qsort.cpp",
"aten/src/ATen/native/quantized/cpu/qtanh.cpp",
"aten/src/ATen/native/quantized/cpu/qthreshold.cpp",
"aten/src/ATen/native/quantized/cpu/qupsample_bilinear2d.cpp",
"aten/src/ATen/native/quantized/cpu/qupsample_nearest2d.cpp",
"aten/src/ATen/native/quantized/cpu/qupsample_nearest3d.cpp",
"aten/src/ATen/native/quantized/cpu/tensor_operators.cpp",
"aten/src/ATen/native/quantized/Copy.cpp",
"aten/src/ATen/native/quantized/QTensor.cpp",
"aten/src/ATen/native/quantized/TensorCompare.cpp",
"aten/src/ATen/native/quantized/TensorFactories.cpp",
"aten/src/ATen/native/quantized/affine_quantizer.cpp",
"aten/src/ATen/native/quantized/affine_quantizer_base.cpp",
"aten/src/ATen/native/quantized/fake_quant_per_channel_affine.cpp",
"aten/src/ATen/native/quantized/fake_quant_per_tensor_affine.cpp",
"aten/src/ATen/native/quantized/library.cpp",
"aten/src/ATen/quantized/QTensorImpl.cpp",
"aten/src/ATen/quantized/Quantizer.cpp",
"aten/src/ATen/native/attention.cpp",
"aten/src/ATen/native/Activation.cpp",
"aten/src/ATen/native/AdaptiveAveragePooling.cpp",
"aten/src/ATen/native/AdaptiveAveragePooling3d.cpp",
"aten/src/ATen/native/AdaptiveMaxPooling2d.cpp",
"aten/src/ATen/native/AdaptiveMaxPooling3d.cpp",
"aten/src/ATen/native/AffineGridGenerator.cpp",
"aten/src/ATen/native/AveragePool2d.cpp",
"aten/src/ATen/native/AveragePool3d.cpp",
"aten/src/ATen/native/BatchLinearAlgebra.cpp",
"aten/src/ATen/native/Batching.cpp",
"aten/src/ATen/native/BinaryOps.cpp",
"aten/src/ATen/native/Blas.cpp",
"aten/src/ATen/native/BlasKernel.cpp",
"aten/src/ATen/native/Bucketization.cpp",
"aten/src/ATen/native/CPUBlas.cpp",
"aten/src/ATen/native/ChanelShuffle.cpp",
"aten/src/ATen/native/Col2Im.cpp",
"aten/src/ATen/native/ConstantPadNd.cpp",
"aten/src/ATen/native/Convolution.cpp",
"aten/src/ATen/native/ConvolutionMM2d.cpp",
"aten/src/ATen/native/ConvolutionMM3d.cpp",
"aten/src/ATen/native/ConvolutionTBC.cpp",
"aten/src/ATen/native/Copy.cpp",
"aten/src/ATen/native/Correlation.cpp",
"aten/src/ATen/native/CPUFallback.cpp",
"aten/src/ATen/native/Cross.cpp",
"aten/src/ATen/native/DilatedMaxPool2d.cpp",
"aten/src/ATen/native/DilatedMaxPool3d.cpp",
# Referenced by both native and ATen/Version.cpp. Does not reference to other native symbols
# "aten/src/ATen/native/DispatchStub.cpp",
# "aten/src/ATen/native/quantized/cpu/init_qnnpack.cpp",
"aten/src/ATen/native/Distance.cpp",
"aten/src/ATen/native/Distributions.cpp",
"aten/src/ATen/native/Dropout.cpp",
"aten/src/ATen/native/Embedding.cpp",
"aten/src/ATen/native/EmbeddingBag.cpp",
"aten/src/ATen/native/Fill.cpp",
"aten/src/ATen/native/ForeachOpsKernels.cpp",
"aten/src/ATen/native/FractionalMaxPool2d.cpp",
"aten/src/ATen/native/FractionalMaxPool3d.cpp",
"aten/src/ATen/native/FunctionOfAMatrixUtils.cpp",
"aten/src/ATen/native/GatedLinearUnit.cpp",
"aten/src/ATen/native/GridSampler.cpp",
"aten/src/ATen/native/Histogram.cpp",
"aten/src/ATen/native/Im2Col.cpp",
"aten/src/ATen/native/IndexingUtils.cpp",
"aten/src/ATen/native/Integration.cpp",
"aten/src/ATen/native/Itertools.cpp",
"aten/src/ATen/native/LegacyBridge.cpp",
"aten/src/ATen/native/Lerp.cpp",
"aten/src/ATen/native/Linear.cpp",
"aten/src/ATen/native/LinearAlgebra.cpp",
"aten/src/ATen/native/Loss.cpp",
"aten/src/ATen/native/LossCTC.cpp",
"aten/src/ATen/native/LossMultiLabelMargin.cpp",
"aten/src/ATen/native/LossMultiMargin.cpp",
"aten/src/ATen/native/LossNLL.cpp",
"aten/src/ATen/native/LossNLL2d.cpp",
"aten/src/ATen/native/MaxPooling.cpp",
"aten/src/ATen/native/MaxUnpooling.cpp",
"aten/src/ATen/native/Memory.cpp",
"aten/src/ATen/native/MetaTensor.cpp",
"aten/src/ATen/native/NNPACK.cpp",
"aten/src/ATen/native/NaiveConvolutionTranspose2d.cpp",
"aten/src/ATen/native/NaiveConvolutionTranspose3d.cpp",
"aten/src/ATen/native/NaiveDilatedConvolution.cpp",
"aten/src/ATen/native/NamedTensor.cpp",
"aten/src/ATen/native/Normalization.cpp",
"aten/src/ATen/native/Onehot.cpp",
"aten/src/ATen/native/PackedSequence.cpp",
"aten/src/ATen/native/PixelShuffle.cpp",
"aten/src/ATen/native/PointwiseOps.cpp",
"aten/src/ATen/native/Pooling.cpp",
"aten/src/ATen/native/Pow.cpp",
"aten/src/ATen/native/QuantizedLinear.cpp",
"aten/src/ATen/native/RNN.cpp",
"aten/src/ATen/native/RangeFactories.cpp",
"aten/src/ATen/native/ReduceAllOps.cpp",
"aten/src/ATen/native/ReduceOps.cpp",
"aten/src/ATen/native/ReflectionPad.cpp",
"aten/src/ATen/native/Repeat.cpp",
"aten/src/ATen/native/ReplicationPadding.cpp",
"aten/src/ATen/native/Resize.cpp",
"aten/src/ATen/native/RowwisePrune.cpp",
"aten/src/ATen/native/SegmentReduce.cpp",
"aten/src/ATen/native/Scalar.cpp",
"aten/src/ATen/native/SobolEngineOps.cpp",
"aten/src/ATen/native/SobolEngineOpsUtils.cpp",
"aten/src/ATen/native/SoftMax.cpp",
"aten/src/ATen/native/Sorting.cpp",
"aten/src/ATen/native/SpectralOps.cpp",
"aten/src/ATen/native/SummaryOps.cpp",
"aten/src/ATen/native/TensorAdvancedIndexing.cpp",
"aten/src/ATen/native/TensorCompare.cpp",
"aten/src/ATen/native/TensorConversions.cpp",
"aten/src/ATen/native/TensorFactories.cpp",
"aten/src/ATen/native/TensorIteratorReduce.cpp",
"aten/src/ATen/native/TensorProperties.cpp",
"aten/src/ATen/native/TensorShape.cpp",
"aten/src/ATen/native/TensorTransformations.cpp",
"aten/src/ATen/native/TestOps.cpp",
"aten/src/ATen/native/TriangularOps.cpp",
"aten/src/ATen/native/TypeProperties.cpp",
"aten/src/ATen/native/UnaryOps.cpp",
"aten/src/ATen/native/Unfold2d.cpp",
"aten/src/ATen/native/Unfold3d.cpp",
"aten/src/ATen/native/UnfoldBackward.cpp",
"aten/src/ATen/native/Unique.cpp",
# Low-level functions that can be directly referenced
# "aten/src/ATen/native/UpSample.cpp",
"aten/src/ATen/native/UpSampleBicubic2d.cpp",
"aten/src/ATen/native/UpSampleBilinear2d.cpp",
"aten/src/ATen/native/UpSampleLinear1d.cpp",
"aten/src/ATen/native/UpSampleNearest1d.cpp",
"aten/src/ATen/native/UpSampleNearest2d.cpp",
"aten/src/ATen/native/UpSampleNearest3d.cpp",
"aten/src/ATen/native/UpSampleTrilinear3d.cpp",
"aten/src/ATen/native/VariableMethodStubs.cpp",
"aten/src/ATen/native/WeightNorm.cpp",
"aten/src/ATen/native/group_norm.cpp",
"aten/src/ATen/native/layer_norm.cpp",
"aten/src/ATen/native/sparse/ParamUtils.cpp",
"aten/src/ATen/native/sparse/SoftMax.cpp",
"aten/src/ATen/native/sparse/SparseBlas.cpp",
"aten/src/ATen/native/sparse/SparseBlasImpl.cpp",
"aten/src/ATen/native/sparse/SparseMatMul.cpp",
"aten/src/ATen/native/sparse/SparseTensor.cpp",
"aten/src/ATen/native/sparse/SparseCsrTensor.cpp",
"aten/src/ATen/native/sparse/SparseTensorMath.cpp",
"aten/src/ATen/native/sparse/SparseUnaryOps.cpp",
"aten/src/ATen/native/sparse/SparseCsrTensorMath.cpp",
"aten/src/ATen/native/utils/Factory.cpp",
"aten/src/ATen/native/xnnpack/Activation.cpp",
"aten/src/ATen/native/xnnpack/ChannelShuffle.cpp",
"aten/src/ATen/native/xnnpack/Convolution.cpp",
"aten/src/ATen/native/xnnpack/AveragePooling.cpp",
"aten/src/ATen/native/xnnpack/Init.cpp",
"aten/src/ATen/native/xnnpack/Linear.cpp",
"aten/src/ATen/native/xnnpack/MaxPooling.cpp",
"aten/src/ATen/native/xnnpack/OpContext.cpp",
"aten/src/ATen/native/xnnpack/RegisterOpContextClass.cpp",
"aten/src/ATen/native/xnnpack/Shim.cpp",
# Files not in native, but depends on native symbols
# "aten/src/ATen/TensorIndexing.cpp",
"aten/src/ATen/TensorIterator.cpp",
"aten/src/ATen/nnapi/nnapi_register.cpp",
]
# 1. Files in ATen/native with a few exceptions
# TODO: move the exceptions to proper locations
# 2. The whole aten native source list includes the list with and without aten codegen process.
aten_native_source_list = sorted(aten_native_source_non_codegen_list + aten_native_source_codegen_list)
# These are cpp files which need to go in the torch_cuda_cu library
# .cu files can be found via glob
aten_cuda_cu_source_list = [
"aten/src/ATen/cuda/CUDABlas.cpp",
"aten/src/ATen/cuda/CUDASparseBlas.cpp",
"aten/src/ATen/cuda/CublasHandlePool.cpp",
"aten/src/ATen/native/cuda/Activation.cpp",
"aten/src/ATen/native/cuda/Blas.cpp",
"aten/src/ATen/native/cuda/Equal.cpp",
"aten/src/ATen/native/cuda/GridSampler.cpp",
"aten/src/ATen/native/cuda/IndexKernel.cpp",
"aten/src/ATen/native/cuda/ReduceOps.cpp",
"aten/src/ATen/native/cuda/ScanKernels.cpp",
"aten/src/ATen/native/cuda/Sort.cpp",
"aten/src/ATen/native/cuda/Sorting.cpp",
"aten/src/ATen/native/cuda/TensorModeKernel.cpp",
"aten/src/ATen/native/cuda/TensorShapeCUDA.cpp",
"aten/src/ATen/native/cuda/TensorTopK.cpp",
"aten/src/ATen/native/cuda/jit_utils.cpp",
"aten/src/ATen/native/sparse/cuda/SparseBlas.cpp",
"aten/src/ATen/native/sparse/cuda/SparseBlasImpl.cpp",
"aten/src/ATen/native/sparse/cuda/SparseBlasLegacy.cpp",
"aten/src/ATen/native/sparse/cuda/SparseCUDABlas.cpp",
]
# Files using thrust::sort_by_key need to be linked last
aten_cuda_with_sort_by_key_source_list = [
# empty_cuda is needed by torch_cuda_cpp
"aten/src/ATen/native/cuda/TensorFactories.cu",
]
aten_cuda_cu_with_sort_by_key_source_list = [
"aten/src/ATen/native/cuda/Unique.cu",
]
| 46.961933
| 134
| 0.738628
|
d8f80edeedf5a75235661a6e2fa4008b26a23ea3
| 4,141
|
py
|
Python
|
thing/tasks/planetarycolonies.py
|
Gillingham/evething
|
e00b722cf00a6a3a25e1fff3014ed3365c7ef3e4
|
[
"BSD-2-Clause"
] | 33
|
2015-02-18T00:07:57.000Z
|
2020-06-09T15:19:05.000Z
|
thing/tasks/planetarycolonies.py
|
Gillingham/evething
|
e00b722cf00a6a3a25e1fff3014ed3365c7ef3e4
|
[
"BSD-2-Clause"
] | 19
|
2015-03-09T19:51:43.000Z
|
2019-10-19T12:04:23.000Z
|
thing/tasks/planetarycolonies.py
|
Gillingham/evething
|
e00b722cf00a6a3a25e1fff3014ed3365c7ef3e4
|
[
"BSD-2-Clause"
] | 20
|
2015-02-20T17:53:17.000Z
|
2022-02-11T06:29:11.000Z
|
# ------------------------------------------------------------------------------
# Copyright (c) 2010-2014, EVEthing team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
from celery.execute import send_task
from .apitask import APITask
from thing.models import System, Character, Colony
class PlanetaryColonies(APITask):
name = 'thing.planetary_colonies'
def run(self, url, taskstate_id, apikey_id, character_id):
if self.init(taskstate_id, apikey_id) is False:
return
# Make sure the character exists
try:
character = Character.objects.select_related('details').get(pk=character_id)
except Character.DoesNotExist:
self.log_warn('Character %s does not exist!', character_id)
return
params = {'characterID': character_id}
if self.fetch_api(url, params) is False or self.root is None:
return
c_filter = Colony.objects.filter(character=character)
c_map = {}
for colony in c_filter:
c_map[colony.planet_id] = colony
system_ids = set()
rows = []
for row in self.root.findall('result/rowset/row'):
rows.append(row)
system_ids.add(int(row.attrib['solarSystemID']))
system_map = System.objects.in_bulk(system_ids)
for row in rows:
planet_id = int(row.attrib['planetID'])
colony = c_map.get(planet_id, None)
if colony is not None:
colony.last_update = self.parse_api_date(row.attrib['lastUpdate'])
colony.level = int(row.attrib['upgradeLevel'])
colony.pins = int(row.attrib['numberOfPins'])
colony.save()
del c_map[planet_id]
else:
colony = Colony()
colony.character = character
colony.planet_id = planet_id
colony.system = system_map.get(int(row.attrib['solarSystemID']))
colony.planet = row.attrib['planetName']
colony.planet_type = row.attrib['planetTypeName']
colony.last_update = self.parse_api_date(row.attrib['lastUpdate'])
colony.level = int(row.attrib['upgradeLevel'])
colony.pins = int(row.attrib['numberOfPins'])
colony.save()
if colony.id:
send_task(
'thing.planetary_pins',
args=(apikey_id, character_id, colony.id),
kwargs={},
queue='et_medium',
)
# Remove old colonies that weren't present in the current API request
for id in c_map:
old_colony = c_map.get(id)
old_colony.delete()
return True
| 41
| 88
| 0.616518
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.